repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/Framer.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
/**
* @author Eric Medvet <eric.medvet@gmail.com>
*/
@FunctionalInterface
public interface Framer {
BoundingBox getFrame(double t, Snapshot snapshot, double ratio);
}
| 1,093
| 34.290323
| 98
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/GridOnlineViewer.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers;
import com.google.common.base.Stopwatch;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.core.snapshots.SnapshotListener;
import it.units.erallab.hmsrobots.tasks.Task;
import it.units.erallab.hmsrobots.util.Grid;
import it.units.erallab.hmsrobots.viewers.drawers.Drawer;
import it.units.erallab.hmsrobots.viewers.drawers.Drawers;
import org.apache.commons.lang3.tuple.Pair;
import javax.swing.*;
import java.awt.*;
import java.awt.image.BufferStrategy;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
/**
* @author Eric Medvet <eric.medvet@gmail.com>
*/
public class GridOnlineViewer extends JFrame implements GridSnapshotListener {
private static class TimedSnapshot {
private final double t;
private final Snapshot snapshot;
public TimedSnapshot(double t, Snapshot snapshot) {
this.t = t;
this.snapshot = snapshot;
}
}
private final static int FRAME_RATE = 20;
private final static int INIT_WIN_WIDTH = 1000;
private final static int INIT_WIN_HEIGHT = 600;
private final Grid<Drawer> drawersGrid;
private final Queue<Grid<TimedSnapshot>> gridQueue;
private final Grid<Queue<TimedSnapshot>> queueGrid;
private final Canvas canvas;
private final ScheduledExecutorService executor;
private double t;
private final boolean running;
public GridOnlineViewer(Grid<String> namesGrid, Grid<Drawer> drawersGrid, ScheduledExecutorService executor) {
super("World viewer");
if (namesGrid.getW() != drawersGrid.getW() || namesGrid.getH() != drawersGrid.getH()) {
throw new IllegalArgumentException("Names grid and drawers grid should have the same size");
}
this.drawersGrid = Grid.create(
namesGrid.getW(),
namesGrid.getH(),
(x, y) -> Drawer.clip(
BoundingBox.of(
(double) x / (double) namesGrid.getW(),
(double) y / (double) namesGrid.getH(),
(double) (x + 1) / (double) namesGrid.getW(),
(double) (y + 1) / (double) namesGrid.getH()
),
drawersGrid.get(x, y)
)
);
this.executor = executor;
//create things
gridQueue = new LinkedList<>();
queueGrid = Grid.create(namesGrid);
//create drawer
for (int x = 0; x < namesGrid.getW(); x++) {
for (int y = 0; y < namesGrid.getH(); y++) {
queueGrid.set(x, y, new LinkedList<>());
}
}
//create/set ui components
setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
Dimension dimension = new Dimension(INIT_WIN_WIDTH, INIT_WIN_HEIGHT);
canvas = new Canvas();
canvas.setPreferredSize(dimension);
canvas.setMinimumSize(dimension);
canvas.setMaximumSize(dimension);
getContentPane().add(canvas, BorderLayout.CENTER);
//pack
pack();
//init time and grid
t = 0d;
running = true;
//start consumer of single frames
executor.submit(() -> {
while (running) {
//check if ready
Grid<TimedSnapshot> snapshotGrid = Grid.create(queueGrid);
synchronized (queueGrid) {
for (Grid.Entry<Queue<TimedSnapshot>> entry : queueGrid) {
TimedSnapshot snapshot;
while ((snapshot = entry.getValue().peek()) != null) {
if (snapshot.t < t) {
entry.getValue().poll();
} else {
break;
}
}
snapshotGrid.set(entry.getX(), entry.getY(), snapshot);
}
}
boolean ready = true;
for (Grid.Entry<Queue<TimedSnapshot>> entry : queueGrid) {
ready = ready && ((namesGrid.get(entry.getX(), entry.getY()) == null) || (snapshotGrid.get(entry.getX(), entry.getY()) != null));
}
if (ready) {
//update time
t = t + 1d / FRAME_RATE;
//render asynchronously
synchronized (gridQueue) {
gridQueue.offer(Grid.copy(snapshotGrid));
gridQueue.notifyAll();
}
} else {
synchronized (queueGrid) {
try {
queueGrid.wait();
} catch (InterruptedException ex) {
//ignore
}
}
}
}
}
);
}
public void start(int delay) {
setVisible(true);
canvas.setIgnoreRepaint(true);
canvas.createBufferStrategy(2);
//start consumer of composed frames
Runnable drawer = new Runnable() {
final Stopwatch stopwatch = Stopwatch.createUnstarted();
@Override
public void run() {
if (!stopwatch.isRunning()) {
stopwatch.start();
}
double currentTime = (double) stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000d;
Grid<TimedSnapshot> localSnapshotGrid = null;
synchronized (gridQueue) {
while (!gridQueue.isEmpty()) {
localSnapshotGrid = gridQueue.poll();
if (gridQueue.isEmpty() || (gridQueue.peek().get(0, 0).t > currentTime)) {
break;
}
}
}
if (localSnapshotGrid != null) {
try {
renderFrame(localSnapshotGrid);
} catch (Throwable t) {
t.printStackTrace();
System.exit(0);
}
synchronized (gridQueue) {
gridQueue.notifyAll();
}
} else {
synchronized (gridQueue) {
try {
gridQueue.wait();
} catch (InterruptedException ex) {
//ignore
}
}
}
}
};
executor.scheduleAtFixedRate(drawer, Math.round(delay * 1000d), Math.round(1000d / (double) FRAME_RATE), TimeUnit.MILLISECONDS);
}
@Override
public SnapshotListener listener(final int lX, final int lY) {
return (double t, Snapshot snapshot) -> {
synchronized (queueGrid) {
queueGrid.get(lX, lY).offer(new TimedSnapshot(t, snapshot));
queueGrid.notifyAll();
}
};
}
private void renderFrame(Grid<TimedSnapshot> localSnapshotGrid) {
//get graphics
Graphics2D g = (Graphics2D) canvas.getBufferStrategy().getDrawGraphics();
g.setClip(0, 0, canvas.getWidth(), canvas.getHeight());
//iterate over snapshot grid
for (Grid.Entry<TimedSnapshot> entry : localSnapshotGrid) {
if (entry.getValue() != null) {
drawersGrid.get(entry.getX(), entry.getY()).draw(entry.getValue().t, entry.getValue().snapshot, g);
}
}
//dispose and encode
g.dispose();
BufferStrategy strategy = canvas.getBufferStrategy();
if (!strategy.contentsLost()) {
strategy.show();
}
Toolkit.getDefaultToolkit().sync();
}
public static <S> void run(Task<S, ?> task, Grid<Pair<String, S>> namedSolutions, Function<String, Drawer> drawerSupplier) {
ScheduledExecutorService uiExecutor = Executors.newScheduledThreadPool(4);
ExecutorService executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
GridOnlineViewer gridOnlineViewer = new GridOnlineViewer(
Grid.create(namedSolutions, p -> p == null ? null : p.getLeft()),
Grid.create(namedSolutions, p -> drawerSupplier.apply(p.getLeft())),
uiExecutor
);
gridOnlineViewer.start(3);
GridEpisodeRunner<S> runner = new GridEpisodeRunner<>(
namedSolutions,
task,
gridOnlineViewer,
executor
);
runner.run();
}
public static <S> void run(Task<S, ?> task, Grid<Pair<String, S>> namedSolutions) {
run(task, namedSolutions, Drawers::basicWithMiniWorld);
}
public static <S> void run(Task<S, ?> task, List<S> ss) {
int nRows = (int) Math.ceil(Math.sqrt(ss.size()));
int nCols = (int) Math.ceil((double) ss.size() / (double) nRows);
Grid<Pair<String, S>> namedSolutions = Grid.create(nRows, nCols);
for (int i = 0; i < ss.size(); i++) {
namedSolutions.set(i % nRows, Math.floorDiv(i, nRows), Pair.of(Integer.toString(i), ss.get(i)));
}
run(task, namedSolutions);
}
public static <S> void run(Task<S, ?> task, S s) {
run(task, Grid.create(1, 1, Pair.of("", s)));
}
}
| 9,370
| 33.836431
| 143
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/AllRobotFollower.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.geometry.Shape;
import it.units.erallab.hmsrobots.core.objects.Robot;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.viewers.drawers.SubtreeDrawer;
import java.util.SortedMap;
import java.util.TreeMap;
/**
* @author Eric Medvet <eric.medvet@gmail.com>
*/
public class AllRobotFollower implements Framer {
private final double sizeRelativeMargin;
private final double windowT;
private final SortedMap<Double, BoundingBox> boundingBoxes;
public AllRobotFollower(double sizeRelativeMargin, double windowT) {
this.sizeRelativeMargin = sizeRelativeMargin;
this.windowT = windowT;
boundingBoxes = new TreeMap<>();
}
@Override
public BoundingBox getFrame(double t, Snapshot snapshot, double ratio) {
//get current bounding box
SubtreeDrawer.Extractor.matches(Shape.class, Robot.class, null).extract(snapshot).stream()
.map(s -> ((Shape) s.getContent()).boundingBox())
.reduce(BoundingBox::largest)
.ifPresent(boundingBox -> boundingBoxes.put(t, boundingBox));
//clean
if (boundingBoxes.firstKey() < (t - windowT)) {
boundingBoxes.remove(boundingBoxes.firstKey());
}
//aggregate
BoundingBox aggregated = boundingBoxes.values().stream()
.reduce(BoundingBox::largest)
.orElse(BoundingBox.of(0, 0, 1, 1));
//enlarge
double cx = (aggregated.min.x + aggregated.max.x) / 2d;
double cy = (aggregated.min.y + aggregated.max.y) / 2d;
double w = aggregated.max.x - aggregated.min.x;
double h = aggregated.max.y - aggregated.min.y;
BoundingBox enlarged = BoundingBox.of(
cx - w / 2d * sizeRelativeMargin,
cy - h / 2d * sizeRelativeMargin,
cx + w / 2d * sizeRelativeMargin,
cy + h / 2d * sizeRelativeMargin
);
//adjust
BoundingBox adjusted = enlarged;
double fRatio = (enlarged.max.x - enlarged.min.x) / (enlarged.max.y - enlarged.min.y);
if (fRatio > ratio) {
//enlarge h
adjusted = BoundingBox.of(
enlarged.min.x,
cy - h / 2d * sizeRelativeMargin * fRatio / ratio,
enlarged.max.x,
cy + h / 2d * sizeRelativeMargin * fRatio / ratio
);
} else if (fRatio < ratio) {
//enlarge w
adjusted = BoundingBox.of(
cx - w / 2d * sizeRelativeMargin * ratio / fRatio,
enlarged.min.y,
cx + w / 2d * sizeRelativeMargin * ratio / fRatio,
enlarged.max.y
);
}
return adjusted;
}
}
| 3,403
| 35.212766
| 98
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/GridEpisodeRunner.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers;
import it.units.erallab.hmsrobots.tasks.Task;
import it.units.erallab.hmsrobots.util.Grid;
import org.apache.commons.lang3.tuple.Pair;
import java.io.Flushable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.logging.Level;
import java.util.logging.LogManager;
import java.util.logging.Logger;
/**
* @author Eric Medvet <eric.medvet@gmail.com>
*/
public class GridEpisodeRunner<S> implements Runnable {
static {
try {
LogManager.getLogManager().readConfiguration(GridEpisodeRunner.class.getClassLoader().getResourceAsStream("logging.properties"));
} catch (IOException ex) {
//ignore
} catch (SecurityException ex) {
//ignore
}
}
private final Grid<Pair<String, S>> namedSolutionGrid;
private final Task<S, ?> episode;
private final GridSnapshotListener gridSnapshotListener;
private final ExecutorService executor;
private static final Logger L = Logger.getLogger(GridEpisodeRunner.class.getName());
public GridEpisodeRunner(Grid<Pair<String, S>> namedSolutionGrid, Task<S, ?> episode, GridSnapshotListener gridSnapshotListener, ExecutorService executor) {
this.namedSolutionGrid = namedSolutionGrid;
this.episode = episode;
this.executor = executor;
this.gridSnapshotListener = gridSnapshotListener;
}
@Override
public void run() {
//start episodes
List<Future<?>> results = new ArrayList<>();
namedSolutionGrid.stream()
.filter(p -> p.getValue() != null && p.getValue().getRight() != null)
.forEach(entry -> {
results.add(executor.submit(() -> {
L.fine(String.format("Starting %s in position (%d,%d)", episode.getClass().getSimpleName(), entry.getX(), entry.getY()));
Object outcome = episode.apply(entry.getValue().getRight(), gridSnapshotListener.listener(entry.getX(), entry.getY()));
L.fine(String.format("Ended %s in position (%d,%d) with outcome %s", episode.getClass().getSimpleName(), entry.getX(), entry.getY(), outcome));
}));
});
//wait for results
for (Future<?> result : results) {
try {
result.get();
} catch (InterruptedException | ExecutionException ex) {
L.log(Level.SEVERE, String.format("Cannot obtain one result due to %s", ex), ex);
}
}
//flush and write
if (gridSnapshotListener instanceof Flushable) {
try {
L.finer(String.format("Flushing with %s", gridSnapshotListener.getClass().getSimpleName()));
((Flushable) gridSnapshotListener).flush();
L.finer("Flushed");
} catch (IOException e) {
L.log(Level.SEVERE, String.format("Cannot flush video due to %s", e), e);
}
}
}
}
| 3,660
| 36.357143
| 158
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/GridFileWriter.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.core.snapshots.SnapshotListener;
import it.units.erallab.hmsrobots.tasks.Task;
import it.units.erallab.hmsrobots.util.Grid;
import it.units.erallab.hmsrobots.viewers.drawers.Drawer;
import it.units.erallab.hmsrobots.viewers.drawers.Drawers;
import org.apache.commons.lang3.time.StopWatch;
import org.apache.commons.lang3.tuple.Pair;
import java.awt.*;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.Flushable;
import java.io.IOException;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.logging.Logger;
/**
* @author Eric Medvet <eric.medvet@gmail.com>
*/
public class GridFileWriter implements Flushable, GridSnapshotListener {
private final int w;
private final int h;
private final double startTime;
private final double frameRate;
private final VideoUtils.EncoderFacility encoder;
private final File file;
private final Grid<Drawer> drawersGrid;
private final Grid<List<Double>> timesGrid;
private final List<BufferedImage> images;
private static final Logger L = Logger.getLogger(GridFileWriter.class.getName());
public GridFileWriter(int w, int h, double startTime, double frameRate, VideoUtils.EncoderFacility encoder, File file, Grid<String> namesGrid, Grid<Drawer> drawersGrid) throws IOException {
if (namesGrid.getW() != drawersGrid.getW() || namesGrid.getH() != drawersGrid.getH()) {
throw new IllegalArgumentException("Names grid and drawers grid should have the same size");
}
this.drawersGrid = Grid.create(
namesGrid.getW(),
namesGrid.getH(),
(x, y) -> Drawer.clip(
BoundingBox.of(
(double) x / (double) namesGrid.getW(),
(double) y / (double) namesGrid.getH(),
(double) (x + 1) / (double) namesGrid.getW(),
(double) (y + 1) / (double) namesGrid.getH()
),
drawersGrid.get(x, y)
)
);
this.w = w;
this.h = h;
this.startTime = startTime;
this.frameRate = frameRate;
this.encoder = encoder;
this.file = file;
images = new ArrayList<>();
timesGrid = Grid.create(namesGrid.getW(), namesGrid.getH(), (x, y) -> new ArrayList<>());
}
@Override
public SnapshotListener listener(final int lX, final int lY) {
return (double t, Snapshot snapshot) -> {
List<Double> times = timesGrid.get(lX, lY);
double lastT = times.isEmpty() ? Double.NEGATIVE_INFINITY : times.get(times.size() - 1);
if (t >= startTime && t - lastT >= 1d / frameRate) {
int frameNumber = (int) Math.round((t - startTime) * frameRate);
int lastFrameNumber = times.isEmpty() ? frameNumber : (int) Math.round((times.get(times.size() - 1) - startTime) * frameRate);
synchronized (images) {
times.add(t);
while (frameNumber >= images.size()) {
images.add(new BufferedImage(w, h, BufferedImage.TYPE_3BYTE_BGR));
}
for (int i = lastFrameNumber; i <= frameNumber; i++) {
BufferedImage image = images.get(i);
Graphics2D g = image.createGraphics();
g.setClip(0, 0, image.getWidth(), image.getHeight());
drawersGrid.get(lX, lY).draw(t, snapshot, g);
g.dispose();
}
}
}
};
}
@Override
public void flush() throws IOException {
L.fine(String.format("Saving video on %s", file));
StopWatch stopWatch = StopWatch.createStarted();
VideoUtils.encodeAndSave(images, frameRate, file, encoder);
long millis = stopWatch.getTime(TimeUnit.MILLISECONDS);
L.fine(String.format(
"Video saved: %.1fMB written in %.2fs",
Files.size(file.toPath()) / 1024f / 1024f,
millis / 1000f
));
}
public static <S> void save(Task<S, ?> task, Grid<Pair<String, S>> namedSolutions, int w, int h, double startTime, double frameRate, VideoUtils.EncoderFacility encoder, File file, Function<String, Drawer> drawerSupplier) throws IOException {
ExecutorService executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
GridFileWriter gridFileWriter = new GridFileWriter(
w, h, startTime, frameRate, encoder, file,
Grid.create(namedSolutions, p -> p == null ? null : p.getLeft()),
Grid.create(namedSolutions, p -> drawerSupplier.apply(p.getLeft()))
);
GridEpisodeRunner<S> runner = new GridEpisodeRunner<>(
namedSolutions,
task,
gridFileWriter,
executor
);
runner.run();
executor.shutdownNow();
}
public static <S> void save(Task<S, ?> task, Grid<Pair<String, S>> namedSolutions, int w, int h, double startTime, double frameRate, VideoUtils.EncoderFacility encoder, File file) throws IOException {
save(task, namedSolutions, w, h, startTime, frameRate, encoder, file, Drawers::basicWithMiniWorld);
}
public static <S> void save(Task<S, ?> task, List<S> ss, int w, int h, double startTime, double frameRate, VideoUtils.EncoderFacility encoder, File file) throws IOException {
int nRows = (int) Math.ceil(Math.sqrt(ss.size()));
int nCols = (int) Math.ceil((double) ss.size() / (double) nRows);
Grid<Pair<String, S>> namedSolutions = Grid.create(nRows, nCols);
for (int i = 0; i < ss.size(); i++) {
namedSolutions.set(i % nRows, Math.floorDiv(i, nRows), Pair.of(Integer.toString(i), ss.get(i)));
}
save(task, namedSolutions, w, h, startTime, frameRate, encoder, file);
}
public static <S> void save(Task<S, ?> task, S s, int w, int h, double startTime, double frameRate, VideoUtils.EncoderFacility encoder, File file) throws IOException {
save(task, Grid.create(1, 1, Pair.of("solution", s)), w, h, startTime, frameRate, encoder, file);
}
public static <S> void save(Task<S, ?> task, S s, int w, int h, double startTime, double frameRate, VideoUtils.EncoderFacility encoder, File file, Function<String, Drawer> drawerSupplier) throws IOException {
save(task, Grid.create(1, 1, Pair.of("solution", s)), w, h, startTime, frameRate, encoder, file, drawerSupplier);
}
}
| 7,217
| 42.221557
| 243
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/DrawingUtils.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers;
import it.units.erallab.hmsrobots.core.geometry.Point2;
import it.units.erallab.hmsrobots.core.geometry.Poly;
import java.awt.*;
import java.awt.geom.Path2D;
/**
* @author "Eric Medvet" on 2021/08/29 for 2dhmsr
*/
public class DrawingUtils {
public static class Colors {
public final static Color TEXT = Color.BLUE;
public final static Color AXES = Color.BLACK;
public final static Color DATA = Color.RED;
public final static Color DATA_POSITIVE = Color.BLUE;
public final static Color DATA_NEGATIVE = Color.YELLOW;
public final static Color DATA_ZERO = Color.BLACK;
}
public enum Alignment {LEFT, CENTER, RIGHT}
private DrawingUtils() {
}
public static Color linear(final Color c1, final Color c2, final Color c3, float x1, float x2, float x3, float x) {
if (x < x2) {
return linear(c1, c2, x1, x2, x);
}
return linear(c2, c3, x2, x3, x);
}
public static Color linear(final Color c1, final Color c2, float min, float max, float x) {
x = (x - min) / (max - min);
x = Float.max(0f, Float.min(1f, x));
final float r1 = c1.getRed() / 255f;
final float g1 = c1.getGreen() / 255f;
final float b1 = c1.getBlue() / 255f;
final float a1 = c1.getAlpha() / 255f;
final float r2 = c2.getRed() / 255f;
final float g2 = c2.getGreen() / 255f;
final float b2 = c2.getBlue() / 255f;
final float a2 = c2.getAlpha() / 255f;
final float r = r1 + (r2 - r1) * x;
final float g = g1 + (g2 - g1) * x;
final float b = b1 + (b2 - b1) * x;
final float a = a1 + (a2 - a1) * x;
return new Color(r, g, b, a);
}
public static Path2D toPath(Poly poly, boolean close) {
Path2D path = toPath(poly.getVertexes());
if (close) {
path.closePath();
}
return path;
}
public static Path2D toPath(Point2... points) {
Path2D path = new Path2D.Double();
path.moveTo(points[0].x, points[0].y);
for (int i = 1; i < points.length; i++) {
path.lineTo(points[i].x, points[i].y);
}
return path;
}
public static Color alphaed(Color color, float alpha) {
return new Color(
(float) color.getRed() / 255f,
(float) color.getGreen() / 255f,
(float) color.getBlue() / 255f,
alpha);
}
public static Stroke getScaleIndependentStroke(float thickness, float scale) {
return new BasicStroke(thickness / scale);
}
}
| 3,192
| 30.93
| 117
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/TargetDrawer.java
|
/*
* Copyright (c) "Eric Medvet" 2021.
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import java.awt.*;
import java.awt.geom.Line2D;
import java.awt.geom.Rectangle2D;
import java.util.List;
/**
* @author "Eric Medvet" on 2021/10/03 for 2dhmsr
*/
public class TargetDrawer extends SubtreeDrawer {
private final static Color COLOR = Color.RED;
private final Color color;
public TargetDrawer(Extractor extractor, Color color) {
super(extractor);
this.color = color;
}
public TargetDrawer(Extractor extractor) {
this(extractor, COLOR);
}
@Override
protected void innerDraw(double t, Snapshot snapshot, Graphics2D g) {
@SuppressWarnings("unchecked")
List<Double> targets = (List<Double>) snapshot.getContent();
Rectangle2D r = (Rectangle2D) g.getClip();
g.setColor(color);
for (double x : targets) {
g.draw(new Line2D.Double(x, r.getMinY(), x, r.getMaxY()));
}
}
}
| 1,651
| 27.482759
| 74
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/MLPDrawer.java
|
/*
* Copyright (c) "Eric Medvet" 2021.
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.snapshots.MLPState;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.viewers.DrawingUtils;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.math3.stat.descriptive.moment.Variance;
import java.awt.*;
import java.awt.geom.Line2D;
import java.awt.geom.Rectangle2D;
import java.util.List;
import java.util.*;
import java.util.function.Function;
import java.util.function.IntFunction;
import java.util.stream.Collectors;
import java.util.stream.DoubleStream;
import java.util.stream.IntStream;
/**
* @author "Eric Medvet" on 2021/09/10 for 2dhmsr
*/
public class MLPDrawer extends MemoryDrawer<MLPState> {
public enum Part {ACTIVATION_VALUES, WEIGHTS, USAGE, VARIANCE, VARIANCE_AND_WEIGHTS, LEGEND, T_AXIS, STRUCTURE_AXIS, HISTOGRAM}
private static final Set<Part> PLOTS = Set.of(Part.ACTIVATION_VALUES, Part.WEIGHTS, Part.USAGE, Part.VARIANCE, Part.VARIANCE_AND_WEIGHTS);
private final static double VARIANCE_WINDOW = 2d;
private final static double LEGEND_COLORS = 15;
private final static int N_OF_BINS = 15;
private final static double HISTOGRAM_PLOT_RATIO = 0.1;
private final static double HISTOGRAM_BLANK_RATIO = 0.9;
private final Set<Part> parts;
private final List<Part> plotParts;
private final BoundingBox[] boundingBoxes;
private final Color minColor;
private final Color zeroColor;
private final Color maxColor;
private final Color axesColor;
private final Color textColor;
public MLPDrawer(Extractor extractor, double windowT, Set<Part> parts, Color minColor, Color zeroColor, Color maxColor, Color axesColor, Color textColor) {
super(extractor, s -> (MLPState) s.getContent(), windowT);
this.parts = parts;
this.plotParts = parts.stream().filter(PLOTS::contains).sorted().collect(Collectors.toList());
this.boundingBoxes = new BoundingBox[plotParts.size()];
this.minColor = minColor;
this.zeroColor = zeroColor;
this.maxColor = maxColor;
this.axesColor = axesColor;
this.textColor = textColor;
}
public MLPDrawer(Extractor extractor, double windowT, Set<Part> parts) {
this(extractor, windowT, parts, DrawingUtils.Colors.DATA_NEGATIVE, DrawingUtils.Colors.DATA_ZERO, DrawingUtils.Colors.DATA_POSITIVE, DrawingUtils.Colors.AXES, DrawingUtils.Colors.TEXT);
}
public MLPDrawer(Extractor extractor, double windowT) {
this(extractor, windowT, EnumSet.of(Part.ACTIVATION_VALUES, Part.LEGEND, Part.T_AXIS, Part.STRUCTURE_AXIS));
}
@Override
protected void innerDraw(double t, Snapshot snapshot, SortedMap<Double, MLPState> memory, Graphics2D g) {
MLPState current = memory.get(memory.lastKey());
//prepare clips
double textH = g.getFontMetrics().getMaxAscent();
double textW = g.getFontMetrics().charWidth('m');
double oBBOffset = parts.contains(Part.HISTOGRAM) ? HISTOGRAM_PLOT_RATIO * (g.getClip().getBounds2D().getMaxX() - g.getClip().getBounds2D().getX()) : 0;
BoundingBox oBB = BoundingBox.of(
g.getClip().getBounds2D().getX() + oBBOffset,
g.getClip().getBounds2D().getY(),
g.getClip().getBounds2D().getMaxX(),
g.getClip().getBounds2D().getMaxY()
);
BoundingBox hBB = BoundingBox.of(
g.getClip().getBounds2D().getX(),
oBB.min.y,
oBB.min.x,
oBB.max.y
);
BoundingBox pBB = BoundingBox.of(
parts.contains(Part.LEGEND) ? (oBB.min.x + 6 * textW) : oBB.min.x,
oBB.min.y,
parts.contains(Part.STRUCTURE_AXIS) ? (oBB.max.x - 5 * textW) : oBB.max.x,
parts.contains(Part.T_AXIS) ? (oBB.max.y - 3 * textH) : oBB.max.y
);
if (plotParts.size() == 1) {
boundingBoxes[0] = pBB;
}
double size = (pBB.max.y - pBB.min.y - textH * (boundingBoxes.length - 1) / 2) / boundingBoxes.length;
IntStream.range(0, boundingBoxes.length).forEach(i -> {
double minY = i == 0 ? pBB.min.y : boundingBoxes[i - 1].max.y + textH / 2d;
double maxY = (i == boundingBoxes.length - 1) ? pBB.max.y : minY + size;
boundingBoxes[i] = BoundingBox.of(
pBB.min.x,
minY,
pBB.max.x,
maxY
);
});
if (parts.contains(Part.T_AXIS)) {
g.setColor(axesColor);
g.draw(new Line2D.Double(pBB.min.x, pBB.max.y, pBB.max.x, pBB.max.y));
double maxT = memory.lastKey();
for (double tickT = Math.ceil(maxT - windowT); tickT < maxT; tickT++) {
g.setColor(axesColor);
double x = (tickT - maxT + windowT) / windowT * (pBB.max.x - pBB.min.x) + pBB.min.x;
g.draw(new Line2D.Double(x, pBB.max.y, x, pBB.max.y + textH));
g.setColor(textColor);
String s = String.format("%.0f", tickT);
g.drawString(s, (float) (x - g.getFontMetrics().stringWidth(s) / 2f), (float) (pBB.max.y + 2 * textH));
}
}
if (parts.contains(Part.ACTIVATION_VALUES)) {
double min = current.getActivationDomain().getMin() > Double.NEGATIVE_INFINITY ? current.getActivationDomain().getMin() : memory.values().stream()
.mapToDouble(s -> min(s.getActivationValues()))
.min().orElse(0d);
double max = current.getActivationDomain().getMax() < Double.POSITIVE_INFINITY ? current.getActivationDomain().getMax() : memory.values().stream()
.mapToDouble(s -> max(s.getActivationValues()))
.max().orElse(0d);
draw(t, memory, MLPState::getActivationValues, min, max, boundingBoxes[plotParts.indexOf(Part.ACTIVATION_VALUES)], g);
if (parts.contains(Part.LEGEND)) {
drawLegend(
min, max,
BoundingBox.of(
oBB.min.x,
boundingBoxes[plotParts.indexOf(Part.ACTIVATION_VALUES)].min.y + textH,
boundingBoxes[plotParts.indexOf(Part.ACTIVATION_VALUES)].min.x - textW,
boundingBoxes[plotParts.indexOf(Part.ACTIVATION_VALUES)].max.y - textH
),
textW, g
);
}
if (parts.contains(Part.STRUCTURE_AXIS)) {
drawStructure(
Arrays.stream(current.getActivationValues()).mapToInt(v -> v.length).toArray(),
i -> "l" + i,
BoundingBox.of(boundingBoxes[plotParts.indexOf(Part.ACTIVATION_VALUES)].max.x, boundingBoxes[plotParts.indexOf(Part.ACTIVATION_VALUES)].min.y, oBB.max.x, boundingBoxes[plotParts.indexOf(Part.ACTIVATION_VALUES)].max.y),
textW, g
);
}
if (parts.contains(Part.HISTOGRAM)) {
drawHistogram(memory, MLPState::getActivationValues, min, max,
BoundingBox.of(
hBB.min.x,
boundingBoxes[plotParts.indexOf(Part.ACTIVATION_VALUES)].min.y + textH,
hBB.max.x,
boundingBoxes[plotParts.indexOf(Part.ACTIVATION_VALUES)].max.y - textH
),
g);
}
}
if (parts.contains(Part.WEIGHTS)) {
double min = memory.values().stream()
.mapToDouble(s -> min(s.getWeights()))
.min().orElse(0d);
double max = memory.values().stream()
.mapToDouble(s -> max(s.getWeights()))
.max().orElse(0d);
draw(t, memory, s -> flat(s.getWeights()), min, max, boundingBoxes[plotParts.indexOf(Part.WEIGHTS)], g);
if (parts.contains(Part.LEGEND)) {
drawLegend(
min, max,
BoundingBox.of(
oBB.min.x,
boundingBoxes[plotParts.indexOf(Part.WEIGHTS)].min.y + textH,
boundingBoxes[plotParts.indexOf(Part.WEIGHTS)].min.x - textW,
boundingBoxes[plotParts.indexOf(Part.WEIGHTS)].max.y - textH
),
textW, g
);
}
if (parts.contains(Part.STRUCTURE_AXIS)) {
drawStructure(
Arrays.stream(flat(current.getWeights())).mapToInt(v -> v.length).toArray(),
i -> "w" + i + (i + 1),
BoundingBox.of(boundingBoxes[plotParts.indexOf(Part.WEIGHTS)].max.x, boundingBoxes[plotParts.indexOf(Part.WEIGHTS)].min.y, oBB.max.x, boundingBoxes[plotParts.indexOf(Part.WEIGHTS)].max.y),
textW, g
);
}
if (parts.contains(Part.HISTOGRAM)) {
drawHistogram(memory, s -> flat(s.getWeights()), min, max,
BoundingBox.of(
hBB.min.x,
boundingBoxes[plotParts.indexOf(Part.WEIGHTS)].min.y + textH,
hBB.max.x,
boundingBoxes[plotParts.indexOf(Part.WEIGHTS)].max.y - textH
),
g);
}
}
if (parts.contains(Part.USAGE)) {
double min = memory.values().stream()
.mapToDouble(s -> min(s.getActivationValues()))
.min().orElse(0d);
double max = memory.values().stream()
.mapToDouble(s -> max(s.getActivationValues()))
.max().orElse(0d);
draw(t, memory, s -> flat(mapActivationValuesToWeights(s.getActivationValues(), s.getWeights())), min, max, boundingBoxes[plotParts.indexOf(Part.USAGE)], g);
if (parts.contains(Part.LEGEND)) {
drawLegend(
min, max,
BoundingBox.of(
oBB.min.x,
boundingBoxes[plotParts.indexOf(Part.USAGE)].min.y + textH,
boundingBoxes[plotParts.indexOf(Part.USAGE)].min.x - textW,
boundingBoxes[plotParts.indexOf(Part.USAGE)].max.y - textH
),
textW, g
);
}
if (parts.contains(Part.STRUCTURE_AXIS)) {
drawStructure(
Arrays.stream(flat(current.getWeights())).mapToInt(v -> v.length).toArray(),
i -> "n-w" + i + (i + 1),
BoundingBox.of(boundingBoxes[plotParts.indexOf(Part.USAGE)].max.x, boundingBoxes[plotParts.indexOf(Part.USAGE)].min.y, oBB.max.x, boundingBoxes[plotParts.indexOf(Part.USAGE)].max.y),
textW, g
);
}
}
if (parts.contains(Part.VARIANCE)) {
double min = memory.values().stream()
.mapToDouble(s -> min(s.getActivationValues()))
.min().orElse(0d);
double max = memory.values().stream()
.mapToDouble(s -> max(s.getActivationValues()))
.max().orElse(0d);
draw(t, VARIANCE_WINDOW, memory, s -> flat(mapActivationValuesToWeights(s.getActivationValues(), s.getWeights())), min, max, boundingBoxes[plotParts.indexOf(Part.VARIANCE)], g);
if (parts.contains(Part.LEGEND)) {
drawLegend(
min, max,
BoundingBox.of(
oBB.min.x,
boundingBoxes[plotParts.indexOf(Part.VARIANCE)].min.y + textH,
boundingBoxes[plotParts.indexOf(Part.VARIANCE)].min.x - textW,
boundingBoxes[plotParts.indexOf(Part.VARIANCE)].max.y - textH
),
textW, g
);
}
if (parts.contains(Part.STRUCTURE_AXIS)) {
drawStructure(
Arrays.stream(flat(current.getWeights())).mapToInt(v -> v.length).toArray(),
i -> "n-w" + i + (i + 1),
BoundingBox.of(boundingBoxes[plotParts.indexOf(Part.VARIANCE)].max.x, boundingBoxes[plotParts.indexOf(Part.VARIANCE)].min.y, oBB.max.x, boundingBoxes[plotParts.indexOf(Part.VARIANCE)].max.y),
textW, g
);
}
}
if (parts.contains(Part.VARIANCE_AND_WEIGHTS)) {
double minWeights = 0d;
double maxWeights = memory.values().stream()
.mapToDouble(s -> max(abs(flat(s.getWeights()))))
.max().orElse(0d);
double minVariance = 0;
double maxVariance = 2;
drawDoubleChannel(t, 0d, VARIANCE_WINDOW, memory,
s -> abs(flat(s.getWeights())), minWeights, maxWeights,
s -> flat(mapActivationValuesToWeights(s.getActivationValues(), s.getWeights())), minVariance, maxVariance,
boundingBoxes[plotParts.indexOf(Part.VARIANCE_AND_WEIGHTS)], g);
if (parts.contains(Part.LEGEND)) {
drawDoubleChannelLegend(minWeights, maxWeights, Color.RED, minVariance, maxVariance, Color.GREEN,
BoundingBox.of(
oBB.min.x,
boundingBoxes[plotParts.indexOf(Part.VARIANCE_AND_WEIGHTS)].min.y,
boundingBoxes[plotParts.indexOf(Part.VARIANCE_AND_WEIGHTS)].min.x - textW,
boundingBoxes[plotParts.indexOf(Part.VARIANCE_AND_WEIGHTS)].max.y
),
textW, textH, g);
}
if (parts.contains(Part.STRUCTURE_AXIS)) {
drawStructure(
Arrays.stream(flat(current.getWeights())).mapToInt(v -> v.length).toArray(),
i -> "w-v" + i + (i + 1),
BoundingBox.of(boundingBoxes[plotParts.indexOf(Part.VARIANCE_AND_WEIGHTS)].max.x, boundingBoxes[plotParts.indexOf(Part.VARIANCE_AND_WEIGHTS)].min.y, oBB.max.x, boundingBoxes[plotParts.indexOf(Part.VARIANCE_AND_WEIGHTS)].max.y),
textW, g
);
}
}
}
private void drawDoubleChannelLegend(double min1, double max1, Color channel1,
double min2, double max2, Color channel2,
BoundingBox bb, double textW, double textH, Graphics2D g) {
Color minColor = Color.BLACK;
double legendMiddle = (bb.max.y + bb.min.y) / 2;
drawLegend(
min1, max1,
BoundingBox.of(
bb.min.x,
bb.min.y + textH / 2,
bb.max.x,
legendMiddle - textH / 2
),
textW, g, minColor, minColor, channel1
);
drawLegend(
min2, max2,
BoundingBox.of(
bb.min.x,
legendMiddle + textH / 2,
bb.max.x,
bb.max.y - textH / 2
),
textW, g, minColor, minColor, channel2
);
}
private void drawLegend(double min, double max, BoundingBox bb, double textW, Graphics2D g, Color minColor, Color zeroColor, Color maxColor) {
double deltaY = (bb.max.y - bb.min.y) / LEGEND_COLORS;
double deltaV = (max - min) / LEGEND_COLORS;
double colorX = bb.max.x - textW;
for (int i = 0; i < LEGEND_COLORS; i++) {
double vMin = min + deltaV * i;
double vMax = vMin + deltaV;
double yMin = bb.min.y + deltaY * i;
double numberHeight = g.getFontMetrics().getHeight() / 2d;
g.setColor(DrawingUtils.linear(minColor, zeroColor, maxColor, (float) min, 0f, (float) max, (float) vMin));
g.fill(new Rectangle2D.Double(colorX, yMin, textW, deltaY));
if (i == 0) {
g.setColor(textColor);
String s = String.format("%.1f", vMin);
g.drawString(s,
(float) (colorX - textW - g.getFontMetrics().stringWidth(s)),
(float) (yMin + numberHeight / 2d));
} else if (vMin <= 0 && vMax >= 0) {
g.setColor(textColor);
String s = "0";
g.drawString(s,
(float) (colorX - textW - g.getFontMetrics().stringWidth(s)),
(float) (yMin + deltaY / 2d + numberHeight / 2d));
} else if (i >= LEGEND_COLORS - 1) {
g.setColor(textColor);
String s = String.format("%.1f", vMax);
g.drawString(s,
(float) (colorX - textW - g.getFontMetrics().stringWidth(s)),
(float) (yMin + deltaY + numberHeight / 2d));
}
}
}
private void drawLegend(double min, double max, BoundingBox bb, double textW, Graphics2D g) {
drawLegend(min, max, bb, textW, g, minColor, zeroColor, maxColor);
}
private void drawStructure(int[] sizes, IntFunction<String> namer, BoundingBox bb, double textW, Graphics2D g) {
double n = Arrays.stream(sizes).sum();
double bbH = bb.max.y - bb.min.y;
g.setColor(axesColor);
double c = 0;
for (int i = 0; i < sizes.length; i++) {
double minY = bb.min.y + c / n * bbH;
c = c + sizes[i];
double maxY = bb.min.y + c / n * bbH;
g.setColor(axesColor);
g.draw(new Line2D.Double(bb.min.x, minY, bb.min.x + textW, minY));
g.draw(new Line2D.Double(bb.min.x, maxY, bb.min.x + textW, maxY));
g.draw(new Line2D.Double(bb.min.x + textW, minY, bb.min.x + textW, maxY));
g.setColor(textColor);
String s = namer.apply(i);
g.drawString(s, (float) (bb.min.x + 2 * textW), (float) ((minY + maxY) / 2d + g.getFontMetrics().getHeight() / 2));
}
}
private void draw(double fT, SortedMap<Double, MLPState> states, Function<MLPState, double[][]> f, double min, double max, BoundingBox bb, Graphics2D g) {
draw(fT, 0d, states, f, min, max, bb, g);
}
private void draw(double fT, double vT, SortedMap<Double, MLPState> states, Function<MLPState, double[][]> f, double min, double max, BoundingBox bb, Graphics2D g) {
Variance variance = new Variance();
double bbW = bb.max.x - bb.min.x;
double bbH = bb.max.y - bb.min.y;
double deltaT = states.size() == 1 ? (1d / 10d) : ((states.lastKey() - states.firstKey()) / (states.size() - 1));
double[][] last = f.apply(states.get(states.lastKey()));
double cellW = bbW * deltaT / windowT;
double n = Arrays.stream(last).mapToInt(v -> v.length).sum();
double cellH = bbH / n;
double iT = fT - windowT;
states.forEach((t, state) -> {
double c = 0;
double x = bb.min.x + (t - iT) / windowT * bbW;
if (x - 2 * cellW < bb.min.x) {
return;
}
double[][] valuesToPlot = f.apply(state);
if (vT > 0) {
List<double[][]> valuesList = states.keySet().stream().filter(t1 -> t1 <= t && t1 >= t - vT).map(t1 -> f.apply(states.get(t1))).collect(Collectors.toList());
IntStream.range(0, valuesToPlot.length).forEach(i -> IntStream.range(0, valuesToPlot[i].length).forEach(j -> {
variance.clear();
valuesToPlot[i][j] = variance.evaluate(valuesList.stream().mapToDouble(array -> array[i][j]).toArray());
}));
}
for (double[] doubles : valuesToPlot) {
for (double aDouble : doubles) {
double y = bb.min.y + c / n * bbH;
c = c + 1;
g.setColor(DrawingUtils.linear(minColor, zeroColor, maxColor, (float) min, 0, (float) max, (float) aDouble));
g.fill(new Rectangle2D.Double(x - 2 * cellW, y, 2 * cellW, cellH));
}
}
});
}
private void drawDoubleChannel(double fT, double vT1, double vT2, SortedMap<Double, MLPState> states,
Function<MLPState, double[][]> f1, double min1, double max1,
Function<MLPState, double[][]> f2, double min2, double max2,
BoundingBox bb, Graphics2D g) {
Variance variance = new Variance();
double bbW = bb.max.x - bb.min.x;
double bbH = bb.max.y - bb.min.y;
double deltaT = states.size() == 1 ? (1d / 10d) : ((states.lastKey() - states.firstKey()) / (states.size() - 1));
double[][] last = f1.apply(states.get(states.lastKey()));
double cellW = bbW * deltaT / windowT;
double n = Arrays.stream(last).mapToInt(v -> v.length).sum();
double cellH = bbH / n;
double iT = fT - windowT;
states.forEach((t, state) -> {
double c = 0;
double x = bb.min.x + (t - iT) / windowT * bbW;
if (x - 2 * cellW < bb.min.x) {
return;
}
double[][] valuesToPlot1 = f1.apply(state);
if (vT1 > 0) {
List<double[][]> valuesList = states.keySet().stream().filter(t1 -> t1 <= t && t1 >= t - vT1).map(t1 -> f1.apply(states.get(t1))).collect(Collectors.toList());
IntStream.range(0, valuesToPlot1.length).forEach(i -> IntStream.range(0, valuesToPlot1[i].length).forEach(j -> {
variance.clear();
valuesToPlot1[i][j] = variance.evaluate(valuesList.stream().mapToDouble(array -> array[i][j]).toArray());
}));
}
double[][] valuesToPlot2 = f2.apply(state);
if (vT2 > 0) {
List<double[][]> valuesList = states.keySet().stream().filter(t1 -> t1 <= t && t1 >= t - vT2).map(t1 -> f2.apply(states.get(t1))).collect(Collectors.toList());
IntStream.range(0, valuesToPlot1.length).forEach(i -> IntStream.range(0, valuesToPlot2[i].length).forEach(j -> {
variance.clear();
valuesToPlot2[i][j] = variance.evaluate(valuesList.stream().mapToDouble(array -> array[i][j]).toArray());
}));
}
for (int i = 0; i < valuesToPlot1.length; i++) {
for (int j = 0; j < valuesToPlot1[i].length; j++) {
double y = bb.min.y + c / n * bbH;
c = c + 1;
float red = (float) ((valuesToPlot1[i][j] - min1) / (max1 - min1));
float green = (float) ((valuesToPlot2[i][j] - min2) / (max2 - min2));
g.setColor(new Color(red, green, 0f));
g.fill(new Rectangle2D.Double(x - 2 * cellW, y, 2 * cellW, cellH));
}
}
});
}
private void drawHistogram(SortedMap<Double, MLPState> states, Function<MLPState, double[][]> f, double min, double max, BoundingBox bb, Graphics2D g) {
double[][] values = f.apply(states.get(states.lastKey()));
double binSize = (max - min) / N_OF_BINS;
int[] nOfValuesPerBin = new int[N_OF_BINS];
Arrays.stream(values).flatMapToDouble(Arrays::stream)
.map(d -> Math.max(min, Math.min(d, max)) - min)
.forEach(d ->
nOfValuesPerBin[(int) Math.min(Math.floor(d / binSize), N_OF_BINS - 1)] += 1
);
int maxAmountPerBin = Arrays.stream(nOfValuesPerBin).max().orElse(10);
double[] barSizes = Arrays.stream(nOfValuesPerBin).mapToDouble(i -> i * bb.width() * HISTOGRAM_BLANK_RATIO / maxAmountPerBin).toArray();
double barHeight = (bb.max.y - bb.min.y) / N_OF_BINS;
for (int i = 0; i < barSizes.length; i++) {
double color = min + (0.5 + i) * binSize;
Rectangle2D bar = new Rectangle2D.Double(bb.max.x - barSizes[i], bb.min.y + barHeight * i, barSizes[i], barHeight);
g.setColor(DrawingUtils.linear(minColor, zeroColor, maxColor, (float) min, 0, (float) max, (float) color));
g.fill(bar);
}
}
private static double max(double[][] v) {
return Arrays.stream(v)
.mapToDouble(w -> DoubleStream.of(w).max().orElse(0d))
.max().orElse(0d);
}
private static double min(double[][] v) {
return Arrays.stream(v)
.mapToDouble(w -> DoubleStream.of(w).min().orElse(0d))
.min().orElse(0d);
}
private static double max(double[][][] v) {
return Arrays.stream(v)
.mapToDouble(MLPDrawer::max)
.max().orElse(0d);
}
private static double min(double[][][] v) {
return Arrays.stream(v)
.mapToDouble(MLPDrawer::min)
.min().orElse(0d);
}
private static double[][] flat(double[][][] v3) {
return Arrays.stream(v3)
.map(v2 -> Arrays.stream(v2).reduce(ArrayUtils::addAll).orElse(new double[0]))
.toArray(double[][]::new);
}
private static double[][] abs(double[][] v) {
double[][] absV = new double[v.length][];
IntStream.range(0, v.length).forEach(i ->
absV[i] = Arrays.stream(v[i]).map(Math::abs).toArray()
);
return absV;
}
private static double[][][] mapActivationValuesToWeights(double[][] activationValues, double[][][] weights) {
if (weights[0][0].length > activationValues[0].length) {
return mapActivationValuesToWeightsForMLP(activationValues, weights);
} else {
return mapActivationValuesToWeightsForSNN(activationValues, weights);
}
}
private static double[][][] mapActivationValuesToWeightsForSNN(double[][] activationValues, double[][][] weights) {
double[][][] mapped = new double[weights.length][][];
for (int startingLayer = 0; startingLayer < weights.length; startingLayer++) {
mapped[startingLayer] = new double[weights[startingLayer].length][];
for (int startingNeuron = 0; startingNeuron < weights[startingLayer].length; startingNeuron++) {
mapped[startingLayer][startingNeuron] = new double[weights[startingLayer][startingNeuron].length];
for (int destinationNeuron = 0; destinationNeuron < weights[startingLayer][startingNeuron].length; destinationNeuron++) {
mapped[startingLayer][startingNeuron][destinationNeuron] = activationValues[startingLayer + 1][destinationNeuron];
}
}
}
return mapped;
}
private static double[][][] mapActivationValuesToWeightsForMLP(double[][] activationValues, double[][][] weights) {
double[][][] mapped = new double[weights.length][][];
for (int startingLayer = 0; startingLayer < weights.length; startingLayer++) {
mapped[startingLayer] = new double[weights[startingLayer].length][];
for (int destNeuron = 0; destNeuron < weights[startingLayer].length; destNeuron++) {
mapped[startingLayer][destNeuron] = new double[weights[startingLayer][destNeuron].length];
for (int startNeuron = 0; startNeuron < weights[startingLayer][destNeuron].length; startNeuron++) {
mapped[startingLayer][destNeuron][startNeuron] = activationValues[startingLayer + 1][destNeuron];
}
}
}
return mapped;
}
}
| 25,625
| 43.957895
| 239
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/BoundingBoxDrawer.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.geometry.Shape;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.viewers.DrawingUtils;
import java.awt.*;
import java.awt.geom.Rectangle2D;
public class BoundingBoxDrawer extends SubtreeDrawer {
private final static Color COLOR = Color.PINK;
private final Color fillColor;
private final Color strokeColor;
public BoundingBoxDrawer(Color fillColor, Color strokeColor, Extractor extractor) {
super(extractor);
this.fillColor = fillColor;
this.strokeColor = strokeColor;
}
public BoundingBoxDrawer(Extractor extractor) {
this(COLOR, DrawingUtils.alphaed(COLOR, 0.5f), extractor);
}
@Override
protected void innerDraw(double t, Snapshot snapshot, Graphics2D g) {
BoundingBox box = ((Shape) snapshot.getContent()).boundingBox();
Rectangle2D rect = new Rectangle2D.Double(
box.min.x,
box.min.y,
box.max.x - box.min.x,
box.max.y - box.min.y
);
g.setColor(fillColor);
g.fill(rect);
g.setColor(strokeColor);
g.draw(rect);
}
}
| 1,973
| 31.360656
| 98
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/SensorReadingsSectorDrawer.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.core.geometry.Point2;
import it.units.erallab.hmsrobots.core.objects.SensingVoxel;
import it.units.erallab.hmsrobots.core.snapshots.ScopedReadings;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.core.snapshots.VoxelPoly;
import it.units.erallab.hmsrobots.util.Domain;
import it.units.erallab.hmsrobots.viewers.DrawingUtils;
import java.awt.*;
import java.awt.geom.Path2D;
import java.util.List;
import java.util.stream.Collectors;
public class SensorReadingsSectorDrawer extends SubtreeDrawer {
private final static Color COLOR = Color.BLACK;
private final static float SPAN_ANGLE = (float) Math.PI;
private final static float ANGLE_RESOLUTION = 0.01f * (float) Math.PI;
private final static boolean SENSOR_FRAME = true;
private final static boolean ROTATED = true;
private final Color fillColor;
private final Color strokeColor;
public SensorReadingsSectorDrawer(Color color) {
super(Extractor.matches(VoxelPoly.class, SensingVoxel.class, null));
this.fillColor = DrawingUtils.alphaed(color, 0.33f);
this.strokeColor = color;
}
public SensorReadingsSectorDrawer() {
this(COLOR);
}
private static Path2D getSector(Point2 c, double r, double a1, double a2) {
Path2D sector = new Path2D.Double();
sector.moveTo(c.x, c.y);
for (double a = a1; a < a2; a = a + ANGLE_RESOLUTION) {
sector.lineTo(c.x + r * Math.cos(a), c.y + r * Math.sin(a));
}
sector.lineTo(c.x + r * Math.cos(a2), c.y + r * Math.sin(a2));
sector.closePath();
return sector;
}
@Override
protected void innerDraw(double t, Snapshot snapshot, Graphics2D g) {
VoxelPoly voxelPoly = (VoxelPoly) snapshot.getContent();
List<ScopedReadings> readings = snapshot.getChildren().stream()
.filter(s -> s.getContent() instanceof ScopedReadings)
.map(s -> (ScopedReadings) s.getContent())
.collect(Collectors.toList());
if (readings.isEmpty()) {
return;
}
double radius = Math.sqrt(voxelPoly.area()) / 2d;
Point2 center = voxelPoly.center();
double voxelAngle = Math.atan2((voxelPoly.getVertexes()[1].y - voxelPoly.getVertexes()[0].y), (voxelPoly.getVertexes()[1].x - voxelPoly.getVertexes()[0].x)) / 2d +
Math.atan2((voxelPoly.getVertexes()[2].y - voxelPoly.getVertexes()[3].y), (voxelPoly.getVertexes()[2].x - voxelPoly.getVertexes()[3].x)) / 2d;
double angle = ROTATED ? voxelAngle : 0d;
double sensorSliceAngle = SPAN_ANGLE / (double) readings.size();
for (int i = 0; i < readings.size(); i++) {
double sensorStartingAngle = angle + (double) i * sensorSliceAngle;
double valueSliceAngle = sensorSliceAngle / (double) readings.get(i).getReadings().length;
if (SENSOR_FRAME) {
g.setColor(strokeColor);
Path2D sector = getSector(center, radius, sensorStartingAngle, sensorStartingAngle + sensorSliceAngle);
g.draw(sector);
}
g.setColor(fillColor);
for (int j = 0; j < readings.get(i).getReadings().length; j++) {
double value = readings.get(i).getReadings()[j];
Domain d = readings.get(i).getDomains()[j];
double normalizedRadius = radius * Math.min(1d, Math.max(0d, (value - d.getMin()) / (d.getMax() - d.getMin())));
double valueStartingAngle = sensorStartingAngle + (double) j * valueSliceAngle;
double valueEndingAngle = valueStartingAngle + valueSliceAngle;
Path2D sector = getSector(center, normalizedRadius, valueStartingAngle, valueEndingAngle);
g.fill(sector);
}
}
}
}
| 4,410
| 42.245098
| 167
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/PolyDrawer.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.core.geometry.Poly;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.viewers.DrawingUtils;
import java.awt.*;
import java.awt.geom.Path2D;
import java.awt.image.BufferedImage;
public class PolyDrawer extends SubtreeDrawer {
private final static Color COLOR = Color.BLACK;
private final static int TEXTURE_SIZE = 4;
private final static Color TEXTURE_COLOR = Color.GRAY;
public final static TexturePaint TEXTURE_PAINT = createTexturePaint();
private final Color strokeColor;
private final Color fillColor;
private final boolean useTexture;
public PolyDrawer(Extractor extractor) {
this(COLOR, extractor);
}
public PolyDrawer(Color color, Extractor extractor) {
super(extractor);
strokeColor = color;
fillColor = DrawingUtils.alphaed(color, 0.25f);
useTexture = false;
}
public PolyDrawer(TexturePaint texturePaint, Extractor extractor) {
super(extractor);
strokeColor = COLOR;
fillColor = DrawingUtils.alphaed(COLOR, 0.25f);
texturePaint = TEXTURE_PAINT;
useTexture = true;
}
@Override
protected void innerDraw(double t, Snapshot snapshot, Graphics2D g) {
if (!(snapshot.getContent() instanceof Poly)) {
return;
}
Poly poly = (Poly) snapshot.getContent();
Path2D path = DrawingUtils.toPath(poly, true);
if (useTexture || fillColor != null) {
if (useTexture) {
g.setPaint(TEXTURE_PAINT);
} else {
g.setColor(fillColor);
}
g.fill(path);
}
if (strokeColor != null) {
g.setColor(strokeColor);
g.draw(path);
}
}
private static TexturePaint createTexturePaint() {
BufferedImage texture = new BufferedImage(2, 2, BufferedImage.TYPE_4BYTE_ABGR);
Graphics2D g = texture.createGraphics();
g.setColor(DrawingUtils.alphaed(TEXTURE_COLOR, 0.5f));
g.fillRect(0, 0, 2, 2);
g.setColor(DrawingUtils.alphaed(TEXTURE_COLOR, 0.75f));
g.fillRect(1, 0, 1, 1);
g.fillRect(0, 1, 1, 1);
g.dispose();
return new TexturePaint(texture, new Rectangle(0, 0, TEXTURE_SIZE, TEXTURE_SIZE));
}
}
| 2,958
| 31.516484
| 98
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/MemoryDrawer.java
|
/*
* Copyright (c) "Eric Medvet" 2021.
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import java.awt.*;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.function.Function;
/**
* @author "Eric Medvet" on 2021/09/20 for 2dhmsr
*/
public abstract class MemoryDrawer<K> extends SubtreeDrawer {
private final Function<Snapshot, K> function;
protected final double windowT;
private final SortedMap<Double, K> memory;
public MemoryDrawer(Extractor extractor, Function<Snapshot, K> function, double windowT) {
super(extractor);
this.function = function;
this.windowT = windowT;
memory = new TreeMap<>();
}
@Override
protected void innerDraw(double t, Snapshot snapshot, Graphics2D g) {
K currentReading = function.apply(snapshot);
memory.put(t, currentReading);
while (memory.firstKey() < (t - windowT)) {
memory.remove(memory.firstKey());
}
innerDraw(t, snapshot, memory, g);
}
protected abstract void innerDraw(double t, Snapshot snapshot, SortedMap<Double, K> memory, Graphics2D g);
}
| 1,791
| 31
| 108
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/PostureDrawer.java
|
/*
* Copyright (c) "Eric Medvet" 2021.
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.behavior.BehaviorUtils;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.util.Grid;
import it.units.erallab.hmsrobots.viewers.DrawingUtils;
import java.awt.*;
import java.awt.geom.Rectangle2D;
import java.util.Objects;
import java.util.SortedMap;
import java.util.stream.Collectors;
/**
* @author "Eric Medvet" on 2021/09/22 for 2dhmsr
*/
public class PostureDrawer extends MemoryDrawer<Grid<Boolean>> {
private final int n;
private final boolean isBoolean;
private final Color dataColor;
private final Color axesColor;
public PostureDrawer(Extractor extractor, double windowT, int n, boolean isBoolean, Color dataColor, Color axesColor) {
super(
extractor,
BehaviorUtils.voxelPolyGrid()
.andThen(g -> BehaviorUtils.computePosture(g.values().stream().filter(Objects::nonNull).collect(Collectors.toList()), n)),
windowT
);
this.n = n;
this.isBoolean = isBoolean;
this.dataColor = dataColor;
this.axesColor = axesColor;
}
public PostureDrawer(Extractor extractor, double windowT, int n, boolean isBoolean) {
this(extractor, windowT, n, isBoolean, DrawingUtils.Colors.DATA, DrawingUtils.Colors.AXES);
}
@Override
protected void innerDraw(double t, Snapshot snapshot, SortedMap<Double, Grid<Boolean>> memory, Graphics2D g) {
//compute aggregate
Grid<Double> average = Grid.create(n, n, (x, y) -> memory.values().stream().mapToDouble(grid -> grid.get(x, y) ? 1d : 0d).average().orElse(0d));
//prepare clips
double textH = g.getFontMetrics().getMaxAscent();
BoundingBox oBB = BoundingBox.of(
g.getClip().getBounds2D().getX(),
g.getClip().getBounds2D().getY(),
g.getClip().getBounds2D().getMaxX(),
g.getClip().getBounds2D().getMaxY()
);
BoundingBox pBB = (oBB.width() > oBB.height()) ? BoundingBox.of(
oBB.min.x + (oBB.width() - oBB.height()) / 2d + textH,
oBB.min.y + textH,
oBB.max.x - (oBB.width() - oBB.height()) / 2d - textH,
oBB.max.y - textH
) : BoundingBox.of(
oBB.min.x + textH,
oBB.min.y + (oBB.height() - oBB.width()) / 2d + textH,
oBB.max.x - textH,
oBB.max.y - (oBB.height() - oBB.width()) / 2d - textH
);
//draw data
double l = pBB.width() / (double) n;
average.forEach(e -> {
if (!isBoolean || e.getValue() > 0.5d) {
double minX = pBB.min.x + (double) e.getX() / (double) n * pBB.width();
double minY = pBB.min.y + (n - (double) e.getY() - 1) / (double) n * pBB.width();
g.setColor(isBoolean ? dataColor : DrawingUtils.alphaed(dataColor, e.getValue().floatValue()));
g.fill(new Rectangle2D.Double(minX, minY, l, l));
}
});
//draw box
g.setColor(axesColor);
g.draw(new Rectangle2D.Double(pBB.min.x, pBB.min.y, pBB.width(), pBB.height()));
}
}
| 3,733
| 37.494845
| 148
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/Drawer.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.viewers.DrawingUtils;
import it.units.erallab.hmsrobots.viewers.Framer;
import java.awt.*;
import java.awt.geom.AffineTransform;
import java.awt.geom.Line2D;
import java.awt.geom.Rectangle2D;
import java.util.List;
/**
* @author "Eric Medvet" on 2021/08/13 for 2dhmsr
*/
public interface Drawer {
void draw(double t, Snapshot snapshot, Graphics2D g);
static Drawer of(Drawer... drawers) {
return of(List.of(drawers));
}
static Drawer of(List<Drawer> drawers) {
return (t, snapshot, g) -> drawers.forEach(d -> d.draw(t, snapshot, g));
}
static Drawer diagonals() {
return diagonals(DrawingUtils.Colors.AXES);
}
static Drawer diagonals(Color color) {
return (t, snapshot, g) -> {
Rectangle2D r = (Rectangle2D) g.getClip();
g.setColor(color);
g.draw(new Line2D.Double(r.getX(), r.getY(), r.getMaxX(), r.getMaxY()));
g.draw(new Line2D.Double(r.getX(), r.getMaxY(), r.getMaxX(), r.getY()));
};
}
static Drawer text(String s) {
return text(s, DrawingUtils.Alignment.CENTER);
}
static Drawer text(String s, DrawingUtils.Alignment alignment) {
return text(s, alignment, DrawingUtils.Colors.TEXT);
}
static Drawer text(String s, DrawingUtils.Alignment alignment, Color color) {
return (t, snapshot, g) -> {
g.setColor(color);
g.drawString(
s,
switch (alignment) {
case LEFT -> g.getClipBounds().x + 1;
case CENTER -> g.getClipBounds().x + g.getClipBounds().width / 2 - g.getFontMetrics().stringWidth(s) / 2;
case RIGHT -> g.getClipBounds().x + g.getClipBounds().width - 1 - g.getFontMetrics().stringWidth(s);
},
g.getClipBounds().y + 1 + g.getFontMetrics().getMaxAscent());
};
}
static Drawer clear() {
return clear(Color.WHITE);
}
static Drawer clear(Color color) {
return (t, snapshot, g) -> {
g.setColor(color);
g.fill(g.getClip());
};
}
static Drawer clip(BoundingBox boundingBox, Drawer drawer) {
return (t, snapshot, g) -> {
Shape shape = g.getClip();
double clipX = shape.getBounds2D().getX();
double clipY = shape.getBounds2D().getY();
double clipW = shape.getBounds2D().getWidth();
double clipH = shape.getBounds2D().getHeight();
g.clip(new Rectangle2D.Double(
clipX + boundingBox.min.x * clipW,
clipY + boundingBox.min.y * clipH,
clipW * boundingBox.width(),
clipH * boundingBox.height()
));
//draw
drawer.draw(t, snapshot, g);
//restore clip and transform
g.setClip(shape);
};
}
static Drawer transform(Framer framer, Drawer drawer) {
return (t, snapshot, g) -> {
BoundingBox graphicsFrame = BoundingBox.of(
g.getClip().getBounds2D().getX(),
g.getClip().getBounds2D().getY(),
g.getClip().getBounds2D().getMaxX(),
g.getClip().getBounds2D().getMaxY()
);
BoundingBox worldFrame = framer.getFrame(t, snapshot, graphicsFrame.width() / graphicsFrame.height());
//save original transform and stroke
AffineTransform oAt = g.getTransform();
Stroke oStroke = g.getStroke();
//prepare transformation
double xRatio = graphicsFrame.width() / worldFrame.width();
double yRatio = graphicsFrame.height() / worldFrame.height();
double ratio = Math.min(xRatio, yRatio);
AffineTransform at = new AffineTransform();
at.translate(graphicsFrame.min.x, graphicsFrame.min.y);
at.scale(ratio, -ratio);
at.translate(-worldFrame.min.x, -worldFrame.max.y);
//apply transform and stroke
g.setTransform(at);
g.setStroke(DrawingUtils.getScaleIndependentStroke(1, (float) ratio));
//draw
drawer.draw(t, snapshot, g);
//restore transform
g.setTransform(oAt);
g.setStroke(oStroke);
};
}
}
| 4,848
| 32.673611
| 117
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/VoxelDrawer.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.core.geometry.Point2;
import it.units.erallab.hmsrobots.core.objects.BreakableVoxel;
import it.units.erallab.hmsrobots.core.objects.Voxel;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.core.snapshots.VoxelPoly;
import it.units.erallab.hmsrobots.viewers.DrawingUtils;
import java.awt.*;
import java.awt.geom.Path2D;
public class VoxelDrawer extends SubtreeDrawer {
public enum FillType {APPLIED_FORCE, AREA_RATIO, NONE}
private final static Color STROKE_COLOR = Color.BLUE;
private final static Color REST_FILL_COLOR = DrawingUtils.alphaed(Color.YELLOW, 0.5f);
private final static Color SHRUNK_FILL_COLOR = DrawingUtils.alphaed(Color.RED, 0.5f);
private final static Color EXPANDED_FILL_COLOR = DrawingUtils.alphaed(Color.GREEN, 0.5f);
private final static Color MALFUNCTION_COLOR = DrawingUtils.alphaed(Color.BLACK, 0.75f);
private final static float MALFUNCTION_STROKE_WIDTH = 3f;
private final static float SHRUNK_RATIO = 0.75f;
private final static float EXPANDEND_RATIO = 1.25f;
private final FillType fillType;
public VoxelDrawer(FillType fillType) {
super(Extractor.matches(VoxelPoly.class, Voxel.class, null));
this.fillType = fillType;
}
public VoxelDrawer() {
this(FillType.AREA_RATIO);
}
@Override
protected void innerDraw(double t, Snapshot snapshot, Graphics2D g) {
VoxelPoly voxelPoly = (VoxelPoly) snapshot.getContent();
Path2D path = DrawingUtils.toPath(voxelPoly, true);
g.setColor(STROKE_COLOR);
g.draw(path);
if (fillType.equals(FillType.AREA_RATIO)) {
g.setColor(DrawingUtils.linear(
SHRUNK_FILL_COLOR, REST_FILL_COLOR, EXPANDED_FILL_COLOR,
SHRUNK_RATIO, 1f, EXPANDEND_RATIO,
(float) voxelPoly.getAreaRatio()
));
g.fill(path);
} else if (fillType.equals(FillType.APPLIED_FORCE)) {
g.setColor(DrawingUtils.linear(
SHRUNK_FILL_COLOR, REST_FILL_COLOR, EXPANDED_FILL_COLOR,
-1f, 0f, 1f,
(float) voxelPoly.getLastAppliedForce()
));
g.fill(path);
}
if (BreakableVoxel.class.isAssignableFrom(snapshot.getSnapshottableClass())) {
g.setColor(MALFUNCTION_COLOR);
g.setStroke(new BasicStroke(MALFUNCTION_STROKE_WIDTH / (float) g.getTransform().getScaleX()));
if (!voxelPoly.getMalfunctions().get(BreakableVoxel.ComponentType.ACTUATOR).equals(BreakableVoxel.MalfunctionType.NONE)) {
g.draw(DrawingUtils.toPath(voxelPoly.getVertexes()[0], voxelPoly.getVertexes()[2]));
}
if (!voxelPoly.getMalfunctions().get(BreakableVoxel.ComponentType.SENSORS).equals(BreakableVoxel.MalfunctionType.NONE)) {
g.draw(DrawingUtils.toPath(voxelPoly.getVertexes()[1], voxelPoly.getVertexes()[3]));
}
if (!voxelPoly.getMalfunctions().get(BreakableVoxel.ComponentType.STRUCTURE).equals(BreakableVoxel.MalfunctionType.NONE)) {
g.draw(DrawingUtils.toPath(
Point2.average(voxelPoly.getVertexes()[0], voxelPoly.getVertexes()[3]),
Point2.average(voxelPoly.getVertexes()[1], voxelPoly.getVertexes()[2])
));
}
}
}
}
| 3,973
| 41.731183
| 129
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/SubtreeDrawer.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.core.snapshots.Snapshottable;
import java.awt.*;
import java.util.ArrayList;
import java.util.List;
/**
* @author "Eric Medvet" on 2021/08/29 for 2dhmsr
*/
public abstract class SubtreeDrawer implements Drawer {
@FunctionalInterface
public interface Extractor {
List<Snapshot> extract(Snapshot snapshot);
static Extractor matches(Class<?> contentClass, Class<? extends Snapshottable> snapshottableClass, Integer index) {
return snapshot -> {
List<Snapshot> snapshots = new ArrayList<>();
if (matches(snapshot, 0, contentClass, snapshottableClass, index)) {
snapshots.add(snapshot);
}
extract(snapshots, snapshot, contentClass, snapshottableClass, index);
return snapshots;
};
}
private static void extract(List<Snapshot> snapshots, Snapshot s, Class<?> contentClass, Class<? extends Snapshottable> snapshottableClass, Integer index) {
int c = 0;
for (int i = 0; i < s.getChildren().size(); i++) {
if (matches(s.getChildren().get(i), 0, contentClass, snapshottableClass, null)) {
c = c + 1;
}
if (matches(s.getChildren().get(i), c - 1, contentClass, snapshottableClass, index)) {
snapshots.add(s.getChildren().get(i));
}
extract(snapshots, s.getChildren().get(i), contentClass, snapshottableClass, index);
}
}
private static boolean matches(Snapshot snapshot, int i, Class<?> contentClass, Class<? extends Snapshottable> snapshottableClass, Integer index) {
return (contentClass == null || contentClass.isAssignableFrom(snapshot.getContent().
getClass())) &&
(snapshottableClass == null || snapshottableClass.isAssignableFrom(snapshot.getSnapshottableClass())) &&
(index == null || index == i);
}
}
private final Extractor extractor;
public SubtreeDrawer(Extractor extractor) {
this.extractor = extractor;
}
@Override
public void draw(double t, Snapshot snapshot, Graphics2D g) {
extractor.extract(snapshot).forEach(s -> innerDraw(t, s, g));
}
protected abstract void innerDraw(double t, Snapshot snapshot, Graphics2D g);
}
| 3,070
| 36
| 160
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/SpectrumDrawer.java
|
/*
* Copyright (c) "Eric Medvet" 2021.
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.behavior.BehaviorUtils;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.util.Domain;
import it.units.erallab.hmsrobots.viewers.DrawingUtils;
import java.awt.*;
import java.awt.geom.Line2D;
import java.awt.geom.Rectangle2D;
import java.util.SortedMap;
import java.util.function.Function;
/**
* @author "Eric Medvet" on 2021/09/17 for 2dhmsr
*/
public class SpectrumDrawer extends MemoryDrawer<Double> {
private final double minF;
private final double maxF;
private final int nBins;
private final Color barFillColor;
private final Color barLineColor;
private final Color axesColor;
private final Color textColor;
public SpectrumDrawer(Extractor extractor, Function<Snapshot, Double> function, double windowT, double minF, double maxF, int nBins, Color barFillColor, Color barLineColor, Color axesColor, Color textColor) {
super(extractor, function, windowT);
this.minF = minF;
this.maxF = maxF;
this.nBins = nBins;
this.barFillColor = barFillColor;
this.barLineColor = barLineColor;
this.axesColor = axesColor;
this.textColor = textColor;
}
public SpectrumDrawer(Extractor extractor, Function<Snapshot, Double> function, double windowT, double minF, double maxF, int nBins) {
this(extractor, function, windowT, minF, maxF, nBins, DrawingUtils.alphaed(DrawingUtils.Colors.DATA, .5f), DrawingUtils.Colors.DATA, DrawingUtils.Colors.AXES, DrawingUtils.Colors.TEXT);
}
@Override
protected void innerDraw(double t, Snapshot snapshot, SortedMap<Double, Double> memory, Graphics2D g) {
//compute spectrum
SortedMap<Domain, Double> spectrum = BehaviorUtils.computeQuantizedSpectrum(memory, minF, maxF, nBins);
double maxValue = spectrum.values().stream().mapToDouble(d -> d).max().orElse(0d);
Domain[] domains = spectrum.keySet().toArray(Domain[]::new);
double[] values = spectrum.values().stream().mapToDouble(d -> d).toArray();
//prepare clips
double textH = g.getFontMetrics().getMaxAscent();
double textW = g.getFontMetrics().charWidth('m');
BoundingBox oBB = BoundingBox.of(
g.getClip().getBounds2D().getX(),
g.getClip().getBounds2D().getY(),
g.getClip().getBounds2D().getMaxX(),
g.getClip().getBounds2D().getMaxY()
);
BoundingBox pBB = BoundingBox.of(
oBB.min.x + 5 * textW,
oBB.min.y + textH,
oBB.max.x - textW,
oBB.max.y - 3 * textH
);
double binW = pBB.width() / (double) nBins;
//draw bars
for (int i = 0; i < nBins; i++) {
double minX = pBB.min.x + (double) i * binW;
double barH = pBB.height() * values[i] / maxValue;
Shape shape = new Rectangle2D.Double(minX, pBB.min.y + pBB.height() - barH, binW, barH);
g.setColor(barFillColor);
g.fill(shape);
g.setColor(barLineColor);
g.draw(shape);
}
//draw x-axis
g.setColor(axesColor);
g.draw(new Line2D.Double(pBB.min.x, pBB.max.y, pBB.max.x, pBB.max.y));
for (int i = 0; i <= nBins; i++) {
double x = pBB.min.x + (double) i * binW;
g.setColor(axesColor);
g.draw(new Line2D.Double(x, pBB.max.y, x, pBB.max.y + textH));
g.setColor(textColor);
String s = String.format("%.1f", (i < nBins) ? domains[i].getMin() : domains[i - 1].getMax());
g.drawString(s, (float) x - g.getFontMetrics().stringWidth(s) / 2f, (float) (pBB.max.y + 2 * textH));
}
//draw y-axis
g.setColor(axesColor);
g.draw(new Line2D.Double(pBB.min.x, pBB.max.y, pBB.min.x, pBB.min.y));
g.draw(new Line2D.Double(pBB.min.x - textW, pBB.max.y, pBB.min.x, pBB.max.y));
g.draw(new Line2D.Double(pBB.min.x - textW, pBB.min.y, pBB.min.x, pBB.min.y));
g.setColor(textColor);
String s = String.format("%.1f", maxValue);
g.drawString(s, (float) (pBB.min.x - 2d * textW - g.getFontMetrics().stringWidth(s)), (float) (pBB.min.y + textH / 2d));
g.drawString("0", (float) (pBB.min.x - 2d * textW - g.getFontMetrics().stringWidth("0")), (float) (pBB.max.y + textH / 2d));
}
}
| 4,889
| 41.155172
| 210
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/FootprintDrawer.java
|
/*
* Copyright (c) "Eric Medvet" 2021.
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.behavior.BehaviorUtils;
import it.units.erallab.hmsrobots.behavior.Footprint;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.viewers.DrawingUtils;
import java.awt.*;
import java.awt.geom.Line2D;
import java.awt.geom.Rectangle2D;
import java.util.Objects;
import java.util.SortedMap;
import java.util.stream.Collectors;
/**
* @author "Eric Medvet" on 2021/09/22 for 2dhmsr
*/
public class FootprintDrawer extends MemoryDrawer<Footprint> {
private final int n;
private final Color touchColor;
private final Color axesColor;
private final Color textColor;
public FootprintDrawer(Extractor extractor, double windowT, int n, Color touchColor, Color axesColor, Color textColor) {
super(
extractor,
BehaviorUtils.voxelPolyGrid()
.andThen(g -> BehaviorUtils.computeFootprint(g.values().stream().filter(Objects::nonNull).collect(Collectors.toList()), n)),
windowT
);
this.n = n;
this.touchColor = touchColor;
this.axesColor = axesColor;
this.textColor = textColor;
}
public FootprintDrawer(Extractor extractor, double windowT, int n) {
this(extractor, windowT, n, DrawingUtils.Colors.DATA, DrawingUtils.Colors.AXES, DrawingUtils.Colors.TEXT);
}
@Override
protected void innerDraw(double t, Snapshot snapshot, SortedMap<Double, Footprint> memory, Graphics2D g) {
//prepare clips
double textH = g.getFontMetrics().getMaxAscent();
BoundingBox oBB = BoundingBox.of(
g.getClip().getBounds2D().getX(),
g.getClip().getBounds2D().getY(),
g.getClip().getBounds2D().getMaxX(),
g.getClip().getBounds2D().getMaxY()
);
BoundingBox pBB = BoundingBox.of(
oBB.min.x,
oBB.min.y + textH,
oBB.max.x,
oBB.max.y - 3 * textH
);
//draw data
g.setColor(touchColor);
double[] ts = memory.keySet().stream().mapToDouble(v -> v).toArray();
Footprint[] footprints = memory.values().toArray(Footprint[]::new);
double h = pBB.height() / (double) n;
for (int i = 1; i < ts.length; i++) {
double x1 = pBB.max.x - (ts[ts.length - 1] - ts[i - 1]) / windowT * pBB.width();
double x2 = pBB.max.x - (ts[ts.length - 1] - ts[i]) / windowT * pBB.width();
boolean[] mask = footprints[i].getMask();
for (int j = 0; j < n; j++) {
if (mask[j]) {
double y1 = pBB.min.y + (double) j / (double) n * pBB.height();
g.fill(new Rectangle2D.Double(x1, y1, x2 - x1, h));
}
}
}
//draw x-axis
g.setColor(axesColor);
g.draw(new Line2D.Double(pBB.min.x, pBB.max.y, pBB.max.x, pBB.max.y));
double maxT = memory.lastKey();
for (double tickT = Math.ceil(maxT - windowT); tickT < maxT; tickT++) {
g.setColor(axesColor);
double x = (tickT - maxT + windowT) / windowT * (pBB.max.x - pBB.min.x) + pBB.min.x;
g.draw(new Line2D.Double(x, pBB.max.y, x, pBB.max.y + textH));
g.setColor(textColor);
String s = String.format("%.0f", tickT);
g.drawString(s, (float) (x - g.getFontMetrics().stringWidth(s) / 2f), (float) (pBB.max.y + 2 * textH));
}
}
}
| 3,995
| 36.345794
| 136
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/StackedScopedReadingsDrawer.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.core.snapshots.ScopedReadings;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.core.snapshots.StackedScopedReadings;
import it.units.erallab.hmsrobots.viewers.DrawingUtils;
import java.awt.*;
import java.awt.geom.Rectangle2D;
import java.util.Arrays;
import java.util.Map;
import java.util.SortedMap;
/**
* @author "Eric Medvet" on 2021/09/07 for 2dhmsr
*/
public class StackedScopedReadingsDrawer extends MemoryDrawer<StackedScopedReadings> {
private final Color minColor;
private final Color maxColor;
public StackedScopedReadingsDrawer(Extractor extractor, double windowT, Color minColor, Color maxColor) {
super(extractor, s -> (StackedScopedReadings) s.getContent(), windowT);
this.minColor = minColor;
this.maxColor = maxColor;
}
public StackedScopedReadingsDrawer(Extractor extractor, double windowT) {
this(extractor, windowT, DrawingUtils.Colors.DATA_POSITIVE, DrawingUtils.Colors.DATA_NEGATIVE);
}
@Override
protected void innerDraw(double t, Snapshot snapshot, SortedMap<Double, StackedScopedReadings> memory, Graphics2D g) { //TODO rewrite like MLPState
StackedScopedReadings currentReading = memory.get(memory.lastKey());
//plot
double clipX = g.getClip().getBounds2D().getX();
double clipY = g.getClip().getBounds2D().getY();
double clipW = g.getClip().getBounds2D().getWidth();
double clipH = g.getClip().getBounds2D().getHeight();
double deltaT = memory.size() == 1 ? (1d / 60d) : ((memory.lastKey() - memory.firstKey()) / (memory.size() - 1));
double n = Arrays.stream(currentReading.getScopedReadings()).mapToInt(r -> r.getReadings().length).sum();
double cellW = clipW * deltaT / windowT;
double cellH = clipH / n;
for (Map.Entry<Double, StackedScopedReadings> entry : memory.entrySet()) {
double x = 1d - (t - entry.getKey()) / windowT;
double c = 0;
for (ScopedReadings scopedReadings : entry.getValue().getScopedReadings()) {
for (int i = 0; i < scopedReadings.getReadings().length; i++) {
double v = (scopedReadings.getReadings()[i] - scopedReadings.getDomains()[i].getMin()) / (scopedReadings.getDomains()[i].getMax() - scopedReadings.getDomains()[i].getMin());
double y = c / (n + currentReading.getScopedReadings().length - 1);
c = c + 1;
g.setColor(DrawingUtils.linear(minColor, maxColor, 0f, 1f, (float) v));
g.fill(new Rectangle2D.Double(
clipX + x * clipW - 2 * cellW, // 2* is for avoing gaps in the plot
clipY + y * clipH,
2 * cellW, //2* is for avoing gaps in the plot
cellH
));
}
c = c + 1;
}
}
}
}
| 3,574
| 42.597561
| 183
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/SignalDrawer.java
|
/*
* Copyright (c) "Eric Medvet" 2021.
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.viewers.DrawingUtils;
import java.awt.*;
import java.awt.geom.Line2D;
import java.util.Arrays;
import java.util.SortedMap;
import java.util.function.Function;
/**
* @author "Eric Medvet" on 2021/09/17 for 2dhmsr
*/
public class SignalDrawer extends MemoryDrawer<Double> {
private final Color signalColor;
private final Color axesColor;
private final Color textColor;
public SignalDrawer(Extractor extractor, Function<Snapshot, Double> function, double windowT, Color signalColor, Color axesColor, Color textColor) {
super(extractor, function, windowT);
this.signalColor = signalColor;
this.axesColor = axesColor;
this.textColor = textColor;
}
public SignalDrawer(Extractor extractor, Function<Snapshot, Double> function, double windowT) {
this(extractor, function, windowT, DrawingUtils.Colors.DATA, DrawingUtils.Colors.AXES, DrawingUtils.Colors.TEXT);
}
@Override
protected void innerDraw(double t, Snapshot snapshot, SortedMap<Double, Double> memory, Graphics2D g) {
//prepare clips
double textH = g.getFontMetrics().getMaxAscent();
double textW = g.getFontMetrics().charWidth('m');
BoundingBox oBB = BoundingBox.of(
g.getClip().getBounds2D().getX(),
g.getClip().getBounds2D().getY(),
g.getClip().getBounds2D().getMaxX(),
g.getClip().getBounds2D().getMaxY()
);
BoundingBox pBB = BoundingBox.of(
oBB.min.x + 5 * textW,
oBB.min.y + textH,
oBB.max.x - textW,
oBB.max.y - 3 * textH
);
//draw data
g.setColor(signalColor);
double[] ts = memory.keySet().stream().mapToDouble(v -> v).toArray();
double[] vs = memory.values().stream().mapToDouble(v -> v).toArray();
double minV = Arrays.stream(vs).min().orElse(0d);
double maxV = Arrays.stream(vs).max().orElse(0d);
for (int i = 1; i < ts.length; i++) {
double x1 = pBB.max.x - (ts[ts.length - 1] - ts[i - 1]) / windowT * pBB.width();
double x2 = pBB.max.x - (ts[ts.length - 1] - ts[i]) / windowT * pBB.width();
double y1 = pBB.max.y - (vs[i - 1] - minV) / (maxV - minV) * pBB.height();
double y2 = pBB.max.y - (vs[i] - minV) / (maxV - minV) * pBB.height();
g.draw(new Line2D.Double(x1, y1, x2, y2));
}
//draw x-axis
g.setColor(axesColor);
g.draw(new Line2D.Double(pBB.min.x, pBB.max.y, pBB.max.x, pBB.max.y));
double maxT = memory.lastKey();
for (double tickT = Math.ceil(maxT - windowT); tickT < maxT; tickT++) {
g.setColor(axesColor);
double x = (tickT - maxT + windowT) / windowT * (pBB.max.x - pBB.min.x) + pBB.min.x;
g.draw(new Line2D.Double(x, pBB.max.y, x, pBB.max.y + textH));
g.setColor(textColor);
String s = String.format("%.0f", tickT);
g.drawString(s, (float) (x - g.getFontMetrics().stringWidth(s) / 2f), (float) (pBB.max.y + 2 * textH));
}
//draw 0-line
if (maxV > 0 && minV < 0) {
g.setColor(axesColor);
double y = pBB.max.y - (0 - minV) / (maxV - minV) * pBB.height();
g.draw(new Line2D.Double(pBB.min.x, y, pBB.max.x, y));
}
//draw y-axis
g.setColor(axesColor);
g.draw(new Line2D.Double(pBB.min.x, pBB.max.y, pBB.min.x, pBB.min.y));
g.draw(new Line2D.Double(pBB.min.x - textW, pBB.max.y, pBB.min.x, pBB.max.y));
g.draw(new Line2D.Double(pBB.min.x - textW, pBB.min.y, pBB.min.x, pBB.min.y));
g.setColor(textColor);
String s = String.format("%.1f", maxV);
g.drawString(s, (float) (pBB.min.x - 2d * textW - g.getFontMetrics().stringWidth(s)), (float) (pBB.min.y + textH / 2d));
s = String.format("%.1f", minV);
g.drawString(s, (float) (pBB.min.x - 2d * textW - g.getFontMetrics().stringWidth(s)), (float) (pBB.max.y + textH / 2d));
}
}
| 4,628
| 41.081818
| 150
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/InfoDrawer.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.core.geometry.Point2;
import it.units.erallab.hmsrobots.core.objects.Robot;
import it.units.erallab.hmsrobots.core.objects.Voxel;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.core.snapshots.VoxelPoly;
import it.units.erallab.hmsrobots.viewers.DrawingUtils;
import java.awt.*;
import java.util.List;
import java.util.*;
import java.util.stream.Collectors;
/**
* @author "Eric Medvet" on 2021/08/27 for 2dhmsr
*/
public class InfoDrawer implements Drawer {
public enum RobotInfo {CENTER_POSITION, CENTER_VELOCITY}
private final String string;
private final Set<RobotInfo> robotInfos;
private final double windowT;
private final List<SortedMap<Double, Point2>> centerPositions;
public InfoDrawer(String string, Set<RobotInfo> robotInfos, double windowT) {
this.string = string;
this.robotInfos = robotInfos;
this.windowT = windowT;
centerPositions = new ArrayList<>();
}
public InfoDrawer(String string) {
this(
string,
EnumSet.allOf(RobotInfo.class),
5d
);
}
public InfoDrawer() {
this("");
}
@Override
public void draw(double t, Snapshot snapshot, Graphics2D g) {
//prepare string
StringBuilder sb = new StringBuilder();
if (!string.isEmpty()) {
sb.append(string);
sb.append("\n");
}
sb.append(String.format("t=%4.1f%n", t));
//collect robots info
if (!robotInfos.isEmpty()) {
//get centers
List<Point2> currentCenterPositions = SubtreeDrawer.Extractor.matches(null, Robot.class, null).extract(snapshot).stream()
.map(s -> Point2.average(
s.getChildren().stream()
.filter(c -> Voxel.class.isAssignableFrom(c.getSnapshottableClass()))
.map(c -> Point2.average(((VoxelPoly) c.getContent()).getVertexes()))
.toArray(Point2[]::new))
).collect(Collectors.toList());
//add to maps
for (int i = 0; i < currentCenterPositions.size(); i++) {
if (centerPositions.size() <= i) {
centerPositions.add(new TreeMap<>());
}
centerPositions.get(i).put(t, currentCenterPositions.get(i));
}
//clean maps
centerPositions.forEach(m -> {
while (m.firstKey() < (t - windowT)) {
m.remove(m.firstKey());
}
});
//print
for (int i = 0; i < centerPositions.size(); i++) {
Point2 currentPos = currentCenterPositions.get(i);
Point2 oldestPos = centerPositions.get(i).get(centerPositions.get(i).firstKey());
sb.append(String.format("robot %d:", i));
if (robotInfos.contains(RobotInfo.CENTER_POSITION)) {
sb.append(String.format(" pos=(%5.1f,%5.1f)",
currentPos.x,
currentPos.y
));
}
if (robotInfos.contains(RobotInfo.CENTER_VELOCITY)) {
sb.append(String.format(" vel[%.0f]=(%+5.1f,%+5.1f)%n",
windowT,
(currentPos.x - oldestPos.x) / windowT,
(currentPos.y - oldestPos.y) / windowT
));
}
}
}
//write
g.setColor(DrawingUtils.Colors.TEXT);
int relY = g.getClipBounds().y + 1;
for (String line : sb.toString().split(String.format("%n"))) {
g.drawString(line, g.getClipBounds().x + 1, relY + g.getFontMetrics().getMaxAscent());
relY = relY + g.getFontMetrics().getMaxAscent() + 1;
}
}
}
| 4,287
| 33.304
| 127
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/Drawers.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.behavior.BehaviorUtils;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.objects.Ground;
import it.units.erallab.hmsrobots.core.objects.Robot;
import it.units.erallab.hmsrobots.core.snapshots.MLPState;
import it.units.erallab.hmsrobots.core.snapshots.RobotShape;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.core.snapshots.VoxelPoly;
import it.units.erallab.hmsrobots.viewers.AllRobotFollower;
import java.util.Set;
import java.util.function.Function;
import java.util.function.Supplier;
public class Drawers {
private Drawers() {
}
public static Drawer world() {
return Drawer.transform(
new AllRobotFollower(1.5d, 2),
Drawer.of(
new PolyDrawer(PolyDrawer.TEXTURE_PAINT, SubtreeDrawer.Extractor.matches(null, Ground.class, null)),
new VoxelDrawer(),
new SensorReadingsSectorDrawer(),
new LidarDrawer()
)
);
}
public static Drawer miniWorld() {
return Drawer.transform(
new AllRobotFollower(5, 4),
Drawer.of(
new PolyDrawer(SubtreeDrawer.Extractor.matches(null, Ground.class, null)),
new VoxelDrawer()
)
);
}
public static Drawer signalAndSpectrum(int robotIndex, double windowT, double minF, double maxF, int nBins, String title, Supplier<Function<Snapshot, Double>> functionSupplier) {
return Drawer.of(
Drawer.clip(
BoundingBox.of(0d, 0d, 1d, .5d),
Drawer.of(
Drawer.clear(),
new SignalDrawer(
SubtreeDrawer.Extractor.matches(RobotShape.class, Robot.class, robotIndex),
functionSupplier.get(),
windowT
),
Drawer.text(title)
)
),
Drawer.clip(
BoundingBox.of(0d, 0.5d, 1d, 1d),
Drawer.of(
Drawer.clear(),
new SpectrumDrawer(
SubtreeDrawer.Extractor.matches(RobotShape.class, Robot.class, robotIndex),
functionSupplier.get(),
windowT, minF, maxF, nBins
)
)
)
);
}
public static Drawer spectra(int robotIndex, double windowT, double minF, double maxF, int nBins) {
return Drawer.of(
Drawer.clip(
BoundingBox.of(0d, 0d, .333d, 1d),
signalAndSpectrum(
robotIndex, windowT, minF, maxF, nBins, "vx",
() -> BehaviorUtils.voxelPolyGrid()
.andThen(BehaviorUtils::getCentralElement)
.andThen(p -> p.getLinearVelocity().x)
)
),
Drawer.clip(
BoundingBox.of(0.333d, 0d, .666d, 1d),
signalAndSpectrum(
robotIndex, windowT, minF, maxF, nBins, "vy",
() -> BehaviorUtils.voxelPolyGrid()
.andThen(BehaviorUtils::getCentralElement)
.andThen(p -> p.getLinearVelocity().y)
)
),
Drawer.clip(
BoundingBox.of(0.666d, 0d, 1d, 1d),
signalAndSpectrum(
robotIndex, windowT, minF, maxF, nBins, "angle",
() -> BehaviorUtils.voxelPolyGrid()
.andThen(BehaviorUtils::getCentralElement)
.andThen(VoxelPoly::getAngle)
)
)
);
}
public static Drawer basic(String string) {
return Drawer.of(
Drawer.clear(),
world(),
new InfoDrawer(string)
);
}
public static Drawer basicWithMiniWorld(String string) {
return Drawer.of(
Drawer.clear(),
world(),
Drawer.clip(
BoundingBox.of(0.5d, 0.01d, 0.95d, 0.2d),
miniWorld()
),
new InfoDrawer(string)
);
}
public static Drawer footprintsAndPosture(int robotIndex, double windowT, int nFootprint, int nPosture) {
return Drawer.of(
Drawer.clip(
BoundingBox.of(0d, 0.0d, .666d, 1d),
new FootprintDrawer(
SubtreeDrawer.Extractor.matches(RobotShape.class, Robot.class, robotIndex),
windowT,
nFootprint
)
),
Drawer.clip(
BoundingBox.of(0.666d, 0.0d, 1d, 1d),
new PostureDrawer(
SubtreeDrawer.Extractor.matches(RobotShape.class, Robot.class, robotIndex),
windowT,
nPosture,
true
)
)
);
}
public static Drawer basicWithMiniWorldAndSpectra(String string) {
return Drawer.of(
Drawer.clear(),
Drawer.clip(
BoundingBox.of(0d, 0.0d, 1d, 0.5d),
Drawer.of(
world(),
Drawer.clip(
BoundingBox.of(0.5d, 0.01d, 0.95d, 0.2d),
miniWorld()
)
)
),
Drawer.clip(
BoundingBox.of(0d, 0.5d, 1d, 1d),
spectra(0, 5, 0, 2, 8)
),
new InfoDrawer(string)
);
}
public static Drawer basicWithMiniWorldAndFootprintsAndPosture(String string) {
return Drawer.of(
Drawer.clear(),
Drawer.clip(
BoundingBox.of(0d, 0.0d, 1d, 0.5d),
Drawer.of(
world(),
Drawer.clip(
BoundingBox.of(0.5d, 0.01d, 0.95d, 0.2d),
miniWorld()
)
)
),
Drawer.clip(
BoundingBox.of(0d, 0.5d, 1d, 1d),
footprintsAndPosture(0, 5, 4, 8)
),
new InfoDrawer(string)
);
}
public static Drawer basicWithMiniWorldAndBrainUsage(String string) {
return Drawer.of(
Drawer.clip(
BoundingBox.of(0d, 0d, 1d, 0.5d),
Drawers.basicWithMiniWorld(string)
),
Drawer.clip(
BoundingBox.of(0d, 0.5d, 1d, 1d),
Drawer.of(
Drawer.clear(),
new MLPDrawer(SubtreeDrawer.Extractor.matches(MLPState.class, null, null), 15d,
Set.of(MLPDrawer.Part.ACTIVATION_VALUES, MLPDrawer.Part.WEIGHTS, MLPDrawer.Part.VARIANCE_AND_WEIGHTS, MLPDrawer.Part.LEGEND, MLPDrawer.Part.T_AXIS, MLPDrawer.Part.STRUCTURE_AXIS, MLPDrawer.Part.HISTOGRAM)
)
)
)
);
}
public static Drawer basicWithMiniWorldAndBrain(String string) {
return Drawer.of(
Drawer.clip(
BoundingBox.of(0d, 0d, 1d, 0.5d),
Drawers.basicWithMiniWorld(string)
),
Drawer.clip(
BoundingBox.of(0d, 0.5d, 1d, 1d),
Drawer.of(
Drawer.clear(),
new MLPDrawer(SubtreeDrawer.Extractor.matches(MLPState.class, null, null), 15d,
Set.of(MLPDrawer.Part.ACTIVATION_VALUES, MLPDrawer.Part.WEIGHTS, MLPDrawer.Part.LEGEND, MLPDrawer.Part.T_AXIS, MLPDrawer.Part.STRUCTURE_AXIS, MLPDrawer.Part.HISTOGRAM)
)
)
)
);
}
public static Drawer basic() {
return basic("");
}
public static Drawer basicWithMiniWorld() {
return basicWithMiniWorld("");
}
}
| 8,121
| 31.618474
| 224
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/viewers/drawers/LidarDrawer.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.viewers.drawers;
import it.units.erallab.hmsrobots.core.geometry.Point2;
import it.units.erallab.hmsrobots.core.objects.SensingVoxel;
import it.units.erallab.hmsrobots.core.snapshots.LidarReadings;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.core.snapshots.VoxelPoly;
import it.units.erallab.hmsrobots.viewers.DrawingUtils;
import java.awt.*;
import java.awt.geom.Ellipse2D;
import java.util.List;
public class LidarDrawer extends SubtreeDrawer {
private final static Color COLOR = Color.RED;
private final static double CIRCLE_SIZE = 0.5d;
private final static Extractor LIDAR_EXTRACTOR = Extractor.matches(LidarReadings.class, null, null);
private final Color strokeColor;
public LidarDrawer(Color strokeColor) {
super(Extractor.matches(null, SensingVoxel.class, null));
this.strokeColor = strokeColor;
}
public LidarDrawer() {
this(COLOR);
}
@Override
protected void innerDraw(double t, Snapshot snapshot, Graphics2D g) {
List<Snapshot> lidarSnapshots = LIDAR_EXTRACTOR.extract(snapshot);
if (lidarSnapshots.isEmpty()) {
return;
}
VoxelPoly voxelPoly = (VoxelPoly) snapshot.getContent();
Point2 center = voxelPoly.center();
for (Snapshot lidarSnapshot : lidarSnapshots) {
LidarReadings lidarReadings = (LidarReadings) lidarSnapshot.getContent();
double angle = lidarReadings.getVoxelAngle();
double rayLength = lidarReadings.getDomains()[0].getMax();
double[] rayDirections = lidarReadings.getRayDirections();
double[] rayHits = lidarReadings.getReadings();
for (int rayIdx = 0; rayIdx < rayDirections.length; rayIdx++) {
double direction = rayDirections[rayIdx];
// take into account rotation angle
direction += angle;
// Draw a ray from the given start point towards the given direction
g.setColor(strokeColor);
g.draw(DrawingUtils.toPath(
center,
Point2.of(
center.x + rayLength * Math.cos(direction),
center.y + rayLength * Math.sin(direction)
)
));
// draw only hits
if (rayHits[rayIdx] < rayLength) {
g.draw(new Ellipse2D.Double(
center.x + rayHits[rayIdx] * Math.cos(direction) - CIRCLE_SIZE / 2d,
center.y + rayHits[rayIdx] * Math.sin(direction) - CIRCLE_SIZE / 2d,
CIRCLE_SIZE,
CIRCLE_SIZE
));
}
}
}
}
}
| 3,298
| 36.067416
| 102
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/behavior/Footprint.java
|
/*
* Copyright (c) "Eric Medvet" 2021.
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.behavior;
import java.util.Arrays;
/**
* @author "Eric Medvet" on 2021/09/16 for 2dhmsr
*/
public class Footprint {
private final boolean[] mask;
public Footprint(boolean[] mask) {
this.mask = mask;
}
public boolean[] getMask() {
return mask;
}
public int length() {
return mask.length;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Footprint footprint = (Footprint) o;
return Arrays.equals(mask, footprint.mask);
}
@Override
public int hashCode() {
return Arrays.hashCode(mask);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (boolean b : mask) {
sb.append(b ? '_' : '.');
}
return sb.toString();
}
}
| 1,550
| 23.619048
| 74
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/behavior/PoseUtils.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.behavior;
import it.units.erallab.hmsrobots.core.controllers.PosesController;
import it.units.erallab.hmsrobots.core.objects.ControllableVoxel;
import it.units.erallab.hmsrobots.core.objects.Robot;
import it.units.erallab.hmsrobots.tasks.FinalPosture;
import it.units.erallab.hmsrobots.util.Grid;
import it.units.erallab.hmsrobots.util.SerializationUtils;
import org.apache.commons.math3.ml.clustering.CentroidCluster;
import org.apache.commons.math3.ml.clustering.Clusterable;
import org.apache.commons.math3.ml.clustering.KMeansPlusPlusClusterer;
import org.apache.commons.math3.ml.distance.DistanceMeasure;
import org.apache.commons.math3.ml.distance.EuclideanDistance;
import org.apache.commons.math3.ml.distance.ManhattanDistance;
import org.apache.commons.math3.random.JDKRandomGenerator;
import java.util.*;
import java.util.stream.Collectors;
/**
* @author "Eric Medvet" on 2021/12/03 for 2dhmsr
*/
public class PoseUtils {
private static class ClusterableGridKey implements Clusterable {
private final Grid.Key key;
public ClusterableGridKey(Grid.Key key) {
this.key = key;
}
@Override
public double[] getPoint() {
return new double[]{key.getX(), key.getY()};
}
}
private static class ClusterablePosture implements Clusterable {
private final Set<Grid.Key> pose;
private final Grid<Boolean> posture;
public ClusterablePosture(Set<Grid.Key> pose, Grid<Boolean> posture) {
this.pose = pose;
this.posture = posture;
}
@Override
public double[] getPoint() {
return posture.values().stream().mapToDouble(b -> b ? 1d : 0d).toArray();
}
}
private PoseUtils() {
}
public static Set<Set<Grid.Key>> computeCardinalPoses(Grid<Boolean> shape) {
Set<Grid.Key> left = shape.stream().filter(e -> e.getX() < shape.getW() / 4d).filter(Grid.Entry::getValue).collect(Collectors.toSet());
Set<Grid.Key> right = shape.stream().filter(e -> e.getX() >= shape.getW() * 3d / 4d).filter(Grid.Entry::getValue).collect(Collectors.toSet());
Set<Grid.Key> center = shape.stream().filter(e -> e.getX() >= shape.getW() / 4d && e.getX() < shape.getW() * 3d / 4d).filter(Grid.Entry::getValue).collect(Collectors.toSet());
double midCenterY = center.stream().mapToDouble(Grid.Key::getY).average().orElse(0d);
Set<Grid.Key> top = center.stream().filter(e -> e.getY() <= midCenterY).collect(Collectors.toSet());
Set<Grid.Key> bottom = center.stream().filter(e -> e.getY() > midCenterY).collect(Collectors.toSet());
return new LinkedHashSet<>(List.of(left, top, bottom, right));
}
public static Grid<Boolean> computeDynamicPosture(Grid<Boolean> shape, Set<Grid.Key> pose, ControllableVoxel voxelPrototype, double finalT, int gridSize) {
Grid<ControllableVoxel> body = Grid.create(shape, b -> b ? SerializationUtils.clone(voxelPrototype) : null);
PosesController controller = new PosesController(0.5d, List.of(pose));
Robot<ControllableVoxel> robot = new Robot<>(controller, body);
FinalPosture finalPosture = new FinalPosture(gridSize, finalT);
return finalPosture.apply(robot);
}
public static Set<Set<Grid.Key>> computeClusteredByPosturePoses(Grid<Boolean> shape, Set<Set<Grid.Key>> startingPoses, int n, int seed, ControllableVoxel voxelPrototype, double finalT, int gridSize) {
List<Set<Grid.Key>> sPoses = new ArrayList<>(startingPoses);
List<Set<Grid.Key>> allPoses = new ArrayList<>((int) Math.pow(2, sPoses.size()));
//build expanded poses (2^|poses.size|)
for (int i = 0; i < Math.pow(2, sPoses.size()); i++) {
Set<Grid.Key> combinedPose = new HashSet<>();
for (int j = 0; j < sPoses.size(); j++) {
int remainder = (i / (int) Math.pow(2, j)) % 2;
if (remainder == 1) {
combinedPose.addAll(sPoses.get(j));
}
}
allPoses.add(combinedPose);
}
//compute all postures
Collection<ClusterablePosture> points = allPoses.stream().map(p -> new ClusterablePosture(p, computeDynamicPosture(shape, p, voxelPrototype, finalT, gridSize))).collect(Collectors.toList());
//cluster postures in nPoses clusters
KMeansPlusPlusClusterer<ClusterablePosture> clusterer = new KMeansPlusPlusClusterer<>(n, -1, new ManhattanDistance(), new JDKRandomGenerator(seed));
List<CentroidCluster<ClusterablePosture>> clusters = clusterer.cluster(points);
//find representatives
return clusters.stream().map(c -> center(c, new ManhattanDistance()).pose).collect(Collectors.toSet());
}
private static <K extends Clusterable> K center(CentroidCluster<K> cluster, DistanceMeasure d) {
return cluster.getPoints().stream().min(Comparator.comparingDouble(k -> d.compute(k.getPoint(), cluster.getCenter().getPoint()))).orElseThrow();
}
public static Set<Set<Grid.Key>> computeClusteredByPositionPoses(Grid<Boolean> shape, int n, int seed) {
Collection<ClusterableGridKey> points = shape.stream().filter(Grid.Entry::getValue).map(ClusterableGridKey::new).collect(Collectors.toList());
KMeansPlusPlusClusterer<ClusterableGridKey> clusterer = new KMeansPlusPlusClusterer<>(n, -1, new EuclideanDistance(), new JDKRandomGenerator(seed));
List<CentroidCluster<ClusterableGridKey>> clusters = clusterer.cluster(points);
return clusters.stream().map(c -> c.getPoints().stream().map(cgk -> cgk.key).collect(Collectors.toSet())).collect(Collectors.toSet());
}
}
| 6,185
| 48.095238
| 202
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/behavior/BehaviorUtils.java
|
/*
* Copyright (c) "Eric Medvet" 2021.
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.behavior;
import com.google.common.collect.Range;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.geometry.Point2;
import it.units.erallab.hmsrobots.core.geometry.Shape;
import it.units.erallab.hmsrobots.core.snapshots.RobotShape;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.core.snapshots.VoxelPoly;
import it.units.erallab.hmsrobots.util.Domain;
import it.units.erallab.hmsrobots.util.Grid;
import org.apache.commons.math3.complex.Complex;
import org.apache.commons.math3.transform.DftNormalization;
import org.apache.commons.math3.transform.FastFourierTransformer;
import org.apache.commons.math3.transform.TransformType;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
/**
* @author "Eric Medvet" on 2021/09/16 for 2dhmsr
*/
public class BehaviorUtils {
private BehaviorUtils() {
}
public static Point2 center(Collection<? extends Shape> shapes) {
double x = 0d;
double y = 0d;
for (Shape shape : shapes) {
Point2 center = shape.center();
x = x + center.x;
y = y + center.y;
}
return Point2.of(x / (double) shapes.size(), y / (double) shapes.size());
}
public static <K> K getCentralElement(Grid<K> grid) {
if (grid.values().stream().noneMatch(Objects::nonNull)) {
throw new IllegalArgumentException("Cannot get central element of an empty grid");
}
double mX = grid.stream().filter(e -> e.getValue() != null).mapToInt(Grid.Entry::getX).average().orElse(0d);
double mY = grid.stream().filter(e -> e.getValue() != null).mapToInt(Grid.Entry::getY).average().orElse(0d);
double minD = Double.MAX_VALUE;
int closestX = 0;
int closestY = 0;
for (int x = 0; x < grid.getW(); x++) {
for (int y = 0; y < grid.getH(); y++) {
double d = (x - mX) * (x - mX) + (y - mY) * (y - mY);
if (d < minD) {
minD = d;
closestX = x;
closestY = y;
}
}
}
return grid.get(closestX, closestY);
}
public static Grid<Boolean> computePosture(Collection<? extends Shape> shapes, int n) {
Collection<BoundingBox> boxes = shapes.stream().map(Shape::boundingBox).collect(Collectors.toList());
double robotMinX = boxes.stream().mapToDouble(b -> b.min.x).min().orElseThrow(() -> new IllegalArgumentException("Empty robot"));
double robotMaxX = boxes.stream().mapToDouble(b -> b.max.x).max().orElseThrow(() -> new IllegalArgumentException("Empty robot"));
double robotMinY = boxes.stream().mapToDouble(b -> b.min.y).min().orElseThrow(() -> new IllegalArgumentException("Empty robot"));
double robotMaxY = boxes.stream().mapToDouble(b -> b.max.y).max().orElseThrow(() -> new IllegalArgumentException("Empty robot"));
//adjust box to make it squared
if ((robotMaxY - robotMinY) < (robotMaxX - robotMinX)) {
double d = (robotMaxX - robotMinX) - (robotMaxY - robotMinY);
robotMaxY = robotMaxY + d / 2;
robotMinY = robotMinY - d / 2;
} else if ((robotMaxY - robotMinY) > (robotMaxX - robotMinX)) {
double d = (robotMaxY - robotMinY) - (robotMaxX - robotMinX);
robotMaxX = robotMaxX + d / 2;
robotMinX = robotMinX - d / 2;
}
Grid<Boolean> mask = Grid.create(n, n, false);
for (BoundingBox b : boxes) {
int minXIndex = (int) Math.round((b.min.x - robotMinX) / (robotMaxX - robotMinX) * (double) (n - 1));
int maxXIndex = (int) Math.round((b.max.x - robotMinX) / (robotMaxX - robotMinX) * (double) (n - 1));
int minYIndex = (int) Math.round((b.min.y - robotMinY) / (robotMaxY - robotMinY) * (double) (n - 1));
int maxYIndex = (int) Math.round((b.max.y - robotMinY) / (robotMaxY - robotMinY) * (double) (n - 1));
for (int x = minXIndex; x <= maxXIndex; x++) {
for (int y = minYIndex; y <= maxYIndex; y++) {
mask.set(x, y, true);
}
}
}
return mask;
}
public static Footprint computeFootprint(Collection<? extends VoxelPoly> polies, int n) {
Collection<BoundingBox> boxes = polies.stream().map(Shape::boundingBox).collect(Collectors.toList());
double robotMinX = boxes.stream().mapToDouble(b -> b.min.x).min().orElseThrow(() -> new IllegalArgumentException("Empty robot"));
double robotMaxX = boxes.stream().mapToDouble(b -> b.max.x).max().orElseThrow(() -> new IllegalArgumentException("Empty robot"));
List<Domain> contacts = polies.stream().filter(VoxelPoly::isTouchingGround).map(s -> Domain.of(s.boundingBox().min.x, s.boundingBox().max.x)).collect(Collectors.toList());
boolean[] mask = new boolean[n];
for (Domain contact : contacts) {
int minIndex = (int) Math.round((contact.getMin() - robotMinX) / (robotMaxX - robotMinX) * (double) (n - 1));
int maxIndex = (int) Math.round((contact.getMax() - robotMinX) / (robotMaxX - robotMinX) * (double) (n - 1));
for (int x = minIndex; x <= Math.min(maxIndex, n - 1); x++) {
mask[x] = true;
}
}
return new Footprint(mask);
}
public static Grid<Boolean> computeAveragePosture(Collection<Grid<Boolean>> postures) {
int w = postures.iterator().next().getW();
int h = postures.iterator().next().getH();
return Grid.create(
w,
h,
(x, y) -> postures.stream()
.mapToDouble(p -> p.get(x, y) ? 1d : 0d)
.average()
.orElse(0d) > 0.5d
);
}
public static SortedMap<Double, Double> computeSpectrum(SortedMap<Double, Double> signal) {
List<Double> intervals = new ArrayList<>(signal.size() - 1);
double previousT = Double.NaN;
for (double t : signal.keySet()) {
if (!Double.isNaN(previousT)) {
intervals.add(t - previousT);
}
previousT = t;
}
return computeSpectrum(
signal.values().stream().mapToDouble(d -> d).toArray(),
intervals.stream().mapToDouble(d -> d).average().orElse(0d)
);
}
public static SortedMap<Double, Double> computeSpectrum(double[] signal, double dT) {
// pad
int paddedSize = (int) Math.pow(2d, Math.ceil(Math.log(signal.length) / Math.log(2d)));
if (paddedSize != signal.length) {
double[] paddedSignal = new double[paddedSize];
System.arraycopy(signal, 0, paddedSignal, 0, signal.length);
signal = paddedSignal;
}
// compute fft
FastFourierTransformer fft = new FastFourierTransformer(DftNormalization.STANDARD);
List<Double> f = Stream.of(fft.transform(signal, TransformType.FORWARD))
.map(Complex::abs)
.collect(Collectors.toList())
.subList(0, paddedSize / 2 + 1);
SortedMap<Double, Double> spectrum = new TreeMap<>();
for (int i = 0; i < f.size(); i++) {
spectrum.put(
1d / dT / 2d * (double) i / (double) f.size(),
f.get(i)
);
}
return spectrum;
}
public static SortedMap<Domain, Double> computeQuantizedSpectrum(SortedMap<Double, Double> signal, double minF, double maxF, int nBins) {
SortedMap<Double, Double> spectrum = computeSpectrum(signal);
SortedMap<Domain, Double> qSpectrum = new TreeMap<>(Comparator.comparingDouble(Domain::getMin));
double binSpan = (maxF - minF) / (double) nBins;
for (int i = 0; i < nBins; i++) {
double binMinF = minF + binSpan * (double) i;
double binMaxF = minF + binSpan * ((double) i + 1d);
qSpectrum.put(
Domain.of(binMinF, binMaxF),
spectrum.subMap(binMinF, binMaxF).values().stream()
.mapToDouble(d -> d)
.average()
.orElse(0d)
);
}
return qSpectrum;
}
public static Gait computeMainGait(double interval, double longestInterval, SortedMap<Double, Collection<? extends VoxelPoly>> polies, int n) {
List<Gait> gaits = computeGaits(
computeQuantizedFootprints(interval, polies, n),
2,
(int) Math.round(longestInterval / interval),
interval
);
if (gaits.isEmpty()) {
return null;
}
gaits.sort(Comparator.comparingDouble(Gait::getDuration).reversed());
return gaits.get(0);
}
public static SortedMap<Double, Footprint> computeQuantizedFootprints(double interval, SortedMap<Double, Collection<? extends VoxelPoly>> polies, int n) {
SortedMap<Double, Footprint> quantized = new TreeMap<>();
for (double t = polies.firstKey(); t <= polies.lastKey(); t = t + interval) {
List<Footprint> local = polies.subMap(t, t + interval).values().stream()
.map(voxelPolies -> computeFootprint(voxelPolies, n))
.collect(Collectors.toList());
double[] counts = new double[n];
double tot = local.size();
for (int x = 0; x < n; x++) {
final int finalX = x;
counts[x] = local.stream().mapToDouble(f -> f.getMask()[finalX] ? 1d : 0d).sum();
}
boolean[] localFootprint = new boolean[n];
for (int x = 0; x < n; x++) {
if (counts[x] > tot / 2d) {
localFootprint[x] = true;
}
}
quantized.put(t, new Footprint(localFootprint));
}
return quantized;
}
public static List<Gait> computeGaits(SortedMap<Double, Footprint> footprints, int minSequenceLength, int maxSequenceLength, double interval) {
// compute subsequences
Map<List<Footprint>, List<Range<Double>>> sequences = new HashMap<>();
List<Footprint> footprintList = new ArrayList<>(footprints.values());
List<Range<Double>> ranges = footprints.keySet().stream()
.map(d -> Range.closedOpen(d, d + interval))
.collect(Collectors.toList()); // list of range of each footprint
for (int l = minSequenceLength; l <= maxSequenceLength; l++) {
for (int i = l; i <= footprintList.size(); i++) {
List<Footprint> sequence = footprintList.subList(i - l, i);
List<Range<Double>> localRanges = sequences.getOrDefault(sequence, new ArrayList<>());
// make sure there's no overlap
if (localRanges.size() == 0 || localRanges.get(localRanges.size() - 1).upperEndpoint() <= ranges.get(i - l).lowerEndpoint()) {
localRanges.add(Range.openClosed(
ranges.get(i - l).lowerEndpoint(), // first t of the first footprint
ranges.get(i - 1).upperEndpoint() // last t of the last footprint
));
}
sequences.put(sequence, localRanges);
}
}
// compute median interval
List<Double> allIntervals = sequences.values().stream()
.map(l -> IntStream.range(0, l.size() - 1)
.mapToObj(i -> l.get(i + 1).lowerEndpoint() - l.get(i).lowerEndpoint())
.collect(Collectors.toList())
) // stream of List<Double>, each being a list of the intervals of that subsequence
.reduce((l1, l2) -> Stream.concat(l1.stream(), l2.stream()).collect(Collectors.toList()))
.orElse(List.of());
if (allIntervals.isEmpty()) {
return List.of();
}
double modeInterval = mode(allIntervals);
// compute gaits
return sequences.entrySet().stream()
.filter(e -> e.getValue().size() > 1) // discard subsequences observed only once
.map(e -> {
List<Double> intervals = IntStream.range(0, e.getValue().size() - 1)
.mapToObj(i -> e.getValue().get(i + 1).lowerEndpoint() - e.getValue().get(i).lowerEndpoint())
.collect(Collectors.toList());
List<Double> coverages = IntStream.range(0, intervals.size())
.mapToObj(i -> (e.getValue().get(i).upperEndpoint() - e.getValue().get(i).lowerEndpoint()) / intervals.get(i))
.collect(Collectors.toList());
double localModeInterval = mode(intervals);
return new Gait(
e.getKey(),
localModeInterval,
coverages.stream().mapToDouble(d -> d).average().orElse(0d),
e.getValue().stream().mapToDouble(r -> r.upperEndpoint() - r.lowerEndpoint()).sum(),
(double) intervals.stream().filter(d -> d == localModeInterval).count() / (double) e.getValue().size()
);
}
)
.filter(g -> g.getModeInterval() == modeInterval)
.collect(Collectors.toList());
}
private static <K> K mode(Collection<K> collection) {
return collection.stream()
.collect(Collectors.groupingBy(Function.identity(), Collectors.counting()))
.entrySet()
.stream()
.max(Map.Entry.comparingByValue())
.orElseThrow()
.getKey();
}
public static Function<Snapshot, Grid<? extends VoxelPoly>> voxelPolyGrid() {
return s -> {
if (!RobotShape.class.isAssignableFrom(s.getContent().getClass())) {
throw new IllegalArgumentException("Cannot extract voxel polies from a snapshots of a non robot");
}
return ((RobotShape) s.getContent()).getPolies();
};
}
}
| 13,695
| 42.897436
| 175
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/behavior/Gait.java
|
/*
* Copyright (c) "Eric Medvet" 2021.
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.behavior;
import java.util.List;
import java.util.stream.IntStream;
/**
* @author "Eric Medvet" on 2021/09/16 for 2dhmsr
*/
public class Gait {
private final List<Footprint> footprints;
private final double modeInterval;
private final double coverage;
private final double duration;
private final double purity;
public Gait(List<Footprint> footprints, double modeInterval, double coverage, double duration, double purity) {
this.footprints = footprints;
this.modeInterval = modeInterval;
this.coverage = coverage;
this.duration = duration;
this.purity = purity;
}
public List<Footprint> getFootprints() {
return footprints;
}
public double getModeInterval() {
return modeInterval;
}
public double getCoverage() {
return coverage;
}
public double getDuration() {
return duration;
}
public double getPurity() {
return purity;
}
public double getAvgTouchArea() {
return footprints.stream()
.mapToDouble(f -> IntStream.range(0, f.length())
.mapToDouble(i -> f.getMask()[i] ? 1d : 0d)
.sum() / (double) f.length())
.average()
.orElse(0d);
}
@Override
public String toString() {
return String.format("Gait{footprints=%s, modeInterval=%.1fs, coverage=%.2f, duration=%.1fs, purity=%.2f}",
footprints, modeInterval, coverage, duration, purity);
}
}
| 2,129
| 26.662338
| 113
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/tasks/FinalPosture.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.tasks;
import it.units.erallab.hmsrobots.behavior.BehaviorUtils;
import it.units.erallab.hmsrobots.core.objects.ControllableVoxel;
import it.units.erallab.hmsrobots.core.objects.Robot;
import it.units.erallab.hmsrobots.core.objects.WorldObject;
import it.units.erallab.hmsrobots.core.snapshots.SnapshotListener;
import it.units.erallab.hmsrobots.util.Grid;
import org.dyn4j.dynamics.Settings;
import org.dyn4j.dynamics.World;
import org.dyn4j.geometry.Vector2;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
public class FinalPosture extends AbstractTask<Robot<?>, Grid<Boolean>> {
private final int gridSize;
private final double finalT;
public FinalPosture(int gridSize, double finalT) {
super(new Settings());
this.gridSize = gridSize;
this.finalT = finalT;
}
@Override
public Grid<Boolean> apply(Robot<?> robot, SnapshotListener listener) {
//init world
World world = new World();
world.setSettings(settings);
world.setGravity(Vector2.create(0d, Math.PI));
List<WorldObject> worldObjects = new ArrayList<>();
robot.reset();
//add robot to world
robot.addTo(world);
worldObjects.add(robot);
//run
double t = 0d;
while (t < finalT) {
t = AbstractTask.updateWorld(t, settings.getStepFrequency(), world, worldObjects, listener);
}
//get final posture
return BehaviorUtils.computePosture(
robot.getVoxels().values().stream()
.filter(Objects::nonNull)
.map(ControllableVoxel::getVoxelPoly)
.collect(Collectors.toList()),
gridSize
);
}
}
| 2,446
| 32.520548
| 98
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/tasks/Task.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.tasks;
import it.units.erallab.hmsrobots.core.snapshots.SnapshotListener;
import java.util.function.Function;
public interface Task<S, R> extends Function<S, R> {
R apply(S solution, SnapshotListener listener);
default R apply(S solution) {
return apply(solution, null);
}
}
| 1,080
| 32.78125
| 98
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/tasks/AbstractTask.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.tasks;
import it.units.erallab.hmsrobots.core.Actionable;
import it.units.erallab.hmsrobots.core.objects.WorldObject;
import it.units.erallab.hmsrobots.core.snapshots.Snapshot;
import it.units.erallab.hmsrobots.core.snapshots.SnapshotListener;
import it.units.erallab.hmsrobots.core.snapshots.Snapshottable;
import org.dyn4j.dynamics.Settings;
import org.dyn4j.dynamics.World;
import java.util.List;
import java.util.stream.Collectors;
/**
* @author Eric Medvet <eric.medvet@gmail.com>
*/
public abstract class AbstractTask<T, R> implements Task<T, R> {
protected final Settings settings;
public AbstractTask(Settings settings) {
this.settings = settings;
}
public Settings getSettings() {
return settings;
}
protected static double updateWorld(final double t, final double dT, final World world, final List<WorldObject> objects, final SnapshotListener listener) {
double newT = t + dT;
world.step(1);
objects.stream().filter(o -> o instanceof Actionable).forEach(o -> ((Actionable) o).act(newT));
//possibly output snapshot
if (listener != null) {
listener.listen(
newT,
Snapshot.world(
objects.stream()
.filter(o -> o instanceof Snapshottable)
.map(o -> ((Snapshottable) o).getSnapshot())
.collect(Collectors.toList())
)
);
}
return newT;
}
}
| 2,204
| 32.923077
| 157
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/tasks/locomotion/Outcome.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.tasks.locomotion;
import it.units.erallab.hmsrobots.behavior.BehaviorUtils;
import it.units.erallab.hmsrobots.behavior.Footprint;
import it.units.erallab.hmsrobots.core.geometry.Point2;
import it.units.erallab.hmsrobots.core.snapshots.MLPState;
import it.units.erallab.hmsrobots.core.snapshots.VoxelPoly;
import it.units.erallab.hmsrobots.util.Domain;
import it.units.erallab.hmsrobots.util.Grid;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
public class Outcome {
public static class Observation {
private final double absSumOfWeights;
private final Grid<VoxelPoly> voxelPolies;
private final double terrainHeight;
private final double computationTime;
public Observation(MLPState mlpState, Grid<VoxelPoly> voxelPolies, double terrainHeight, double computationTime) {
absSumOfWeights = computeSumOfAbsoluteWeights(mlpState);
this.voxelPolies = voxelPolies;
this.terrainHeight = terrainHeight;
this.computationTime = computationTime;
}
public Observation(Grid<VoxelPoly> voxelPolies, double terrainHeight, double computationTime) {
this(null, voxelPolies, terrainHeight, computationTime);
}
public Grid<VoxelPoly> getVoxelPolies() {
return voxelPolies;
}
public double getTerrainHeight() {
return terrainHeight;
}
public double getComputationTime() {
return computationTime;
}
}
private final SortedMap<Double, Observation> observations;
public Outcome(Map<Double, Observation> observations) {
this.observations = Collections.unmodifiableSortedMap(new TreeMap<>(observations));
}
public double getComputationTime() {
return observations.get(observations.lastKey()).getComputationTime() - observations.get(observations.firstKey()).getComputationTime();
}
public double getDistance() {
Point2 initialCenter = BehaviorUtils.center(observations.get(observations.firstKey()).getVoxelPolies().values().stream().filter(Objects::nonNull).collect(Collectors.toList()));
Point2 finalCenter = BehaviorUtils.center(observations.get(observations.lastKey()).getVoxelPolies().values().stream().filter(Objects::nonNull).collect(Collectors.toList()));
return finalCenter.x - initialCenter.x;
}
public double getTime() {
return observations.lastKey() - observations.firstKey();
}
public double getControlEnergy() {
double initialEnergy = observations.get(observations.firstKey()).getVoxelPolies().values().stream()
.filter(Objects::nonNull)
.mapToDouble(VoxelPoly::getControlEnergy)
.sum();
double finalEnergy = observations.get(observations.lastKey()).getVoxelPolies().values().stream()
.filter(Objects::nonNull)
.mapToDouble(VoxelPoly::getControlEnergy)
.sum();
return finalEnergy - initialEnergy;
}
public double getAreaRatioEnergy() {
double initialEnergy = observations.get(observations.firstKey()).getVoxelPolies().values().stream()
.filter(Objects::nonNull)
.mapToDouble(VoxelPoly::getAreaRatioEnergy)
.sum();
double finalEnergy = observations.get(observations.lastKey()).getVoxelPolies().values().stream()
.filter(Objects::nonNull)
.mapToDouble(VoxelPoly::getAreaRatioEnergy)
.sum();
return finalEnergy - initialEnergy;
}
public double getControlPower() {
return getControlEnergy() / getTime();
}
public double getAreaRatioPower() {
return getAreaRatioEnergy() / getTime();
}
public SortedMap<Double, Observation> getObservations() {
return observations;
}
@Override
public String toString() {
return String.format("Outcome{computationTime=%.2fs, distance=%.2f, time=%.1fs, controlPower=%.1f, areaRatioPower=%.1f}",
getComputationTime(), getDistance(), getTime(), getControlPower(), getAreaRatioPower());
}
public double getVelocity() {
return getDistance() / getTime();
}
public double getCorrectedEfficiency() {
return getDistance() / (1d + getControlPower() * getTime());
}
public Outcome subOutcome(double startT, double endT) {
return new Outcome(observations.subMap(startT, endT));
}
public SortedMap<Domain, Double> getCenterXPositionSpectrum(double minF, double maxF, int nBins) {
SortedMap<Double, Double> signal = new TreeMap<>(
observations.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
e -> BehaviorUtils.getCentralElement(e.getValue().getVoxelPolies()).center().x
)));
return BehaviorUtils.computeQuantizedSpectrum(signal, minF, maxF, nBins);
}
public SortedMap<Domain, Double> getCenterYPositionSpectrum(double minF, double maxF, int nBins) {
SortedMap<Double, Double> signal = new TreeMap<>(
observations.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
e -> BehaviorUtils.getCentralElement(e.getValue().getVoxelPolies()).center().y
)));
return BehaviorUtils.computeQuantizedSpectrum(signal, minF, maxF, nBins);
}
public SortedMap<Domain, Double> getCenterXVelocitySpectrum(double minF, double maxF, int nBins) {
SortedMap<Double, Double> signal = new TreeMap<>(
observations.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
e -> BehaviorUtils.getCentralElement(e.getValue().getVoxelPolies()).getLinearVelocity().x
)));
return BehaviorUtils.computeQuantizedSpectrum(signal, minF, maxF, nBins);
}
public SortedMap<Domain, Double> getCenterYVelocitySpectrum(double minF, double maxF, int nBins) {
SortedMap<Double, Double> signal = new TreeMap<>(
observations.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
e -> BehaviorUtils.getCentralElement(e.getValue().getVoxelPolies()).getLinearVelocity().y
)));
return BehaviorUtils.computeQuantizedSpectrum(signal, minF, maxF, nBins);
}
public SortedMap<Domain, Double> getCenterAngleSpectrum(double minF, double maxF, int nBins) {
SortedMap<Double, Double> signal = new TreeMap<>(
observations.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
e -> BehaviorUtils.getCentralElement(e.getValue().getVoxelPolies()).getAngle()
)));
return BehaviorUtils.computeQuantizedSpectrum(signal, minF, maxF, nBins);
}
public Grid<Boolean> getAveragePosture(int n) {
return BehaviorUtils.computeAveragePosture(
observations.values().stream()
.map(o -> BehaviorUtils.computePosture(
o.getVoxelPolies().values().stream()
.filter(Objects::nonNull)
.collect(Collectors.toList()),
n
))
.collect(Collectors.toList())
);
}
public List<SortedMap<Domain, Double>> getFootprintsSpectra(int n, double minF, double maxF, int nBins) {
SortedMap<Double, Footprint> footprints = new TreeMap<>(
observations.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
e -> BehaviorUtils.computeFootprint(
e.getValue().getVoxelPolies().values().stream()
.filter(Objects::nonNull)
.collect(Collectors.toList()),
n
))));
return IntStream.range(0, n)
.mapToObj(i -> BehaviorUtils.computeQuantizedSpectrum(
new TreeMap<>(
footprints.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
e -> e.getValue().getMask()[i] ? 1d : 0d
))),
minF, maxF, nBins
))
.collect(Collectors.toList());
}
public double getInitialSumOfAbsoluteWeights() {
return observations.get(observations.firstKey()).absSumOfWeights;
}
public double getAverageSumOfAbsoluteWeights() {
return observations.values().stream()
.mapToDouble(o -> o.absSumOfWeights)
.average()
.orElse(0d);
}
private static double computeSumOfAbsoluteWeights(MLPState mlpState) {
if (mlpState == null) {
return 0d;
}
return computeSumOfAbsoluteWeights(mlpState.getWeights());
}
private static double computeSumOfAbsoluteWeights(double[][][] weights) {
double s = 0;
for (double[][] initialWeight : weights) {
for (double[] doubles : initialWeight) {
s += Arrays.stream(doubles).map(Math::abs).sum();
}
}
return s;
}
}
| 9,325
| 36.757085
| 180
|
java
|
null |
VSRCollectiveControlViaSNCA-main/src/main/java/it/units/erallab/hmsrobots/tasks/locomotion/Locomotion.java
|
/*
* Copyright (C) 2021 Eric Medvet <eric.medvet@gmail.com> (as Eric Medvet <eric.medvet@gmail.com>)
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package it.units.erallab.hmsrobots.tasks.locomotion;
import it.units.erallab.hmsrobots.core.controllers.CentralizedSensing;
import it.units.erallab.hmsrobots.core.controllers.Controller;
import it.units.erallab.hmsrobots.core.controllers.StatefulNN;
import it.units.erallab.hmsrobots.core.controllers.TimedRealFunction;
import it.units.erallab.hmsrobots.core.geometry.BoundingBox;
import it.units.erallab.hmsrobots.core.objects.Ground;
import it.units.erallab.hmsrobots.core.objects.Robot;
import it.units.erallab.hmsrobots.core.objects.WorldObject;
import it.units.erallab.hmsrobots.core.snapshots.MLPState;
import it.units.erallab.hmsrobots.core.snapshots.SnapshotListener;
import it.units.erallab.hmsrobots.tasks.AbstractTask;
import it.units.erallab.hmsrobots.util.Grid;
import it.units.erallab.hmsrobots.util.Utils;
import org.apache.commons.lang3.time.StopWatch;
import org.dyn4j.dynamics.Settings;
import org.dyn4j.dynamics.World;
import org.dyn4j.geometry.Vector2;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.stream.IntStream;
public class Locomotion extends AbstractTask<Robot<?>, Outcome> {
public final static double INITIAL_PLACEMENT_X_GAP = 1d;
public final static double INITIAL_PLACEMENT_Y_GAP = 1d;
public final static double TERRAIN_BORDER_HEIGHT = 100d;
public static final int TERRAIN_LENGTH = 2000;
public static final double TERRAIN_BORDER_WIDTH = 10d;
private final double finalT;
private final double[][] groundProfile;
private final double initialPlacement;
public Locomotion(double finalT, double[][] groundProfile, Settings settings) {
this(finalT, groundProfile, groundProfile[0][1] + INITIAL_PLACEMENT_X_GAP, settings);
}
public Locomotion(double finalT, double[][] groundProfile, double initialPlacement, Settings settings) {
super(settings);
this.finalT = finalT;
this.groundProfile = groundProfile;
this.initialPlacement = initialPlacement;
}
@Override
public Outcome apply(Robot<?> robot, SnapshotListener listener) {
StopWatch stopWatch = StopWatch.createStarted();
//init world
World world = new World();
world.setSettings(settings);
List<WorldObject> worldObjects = new ArrayList<>();
Ground ground = new Ground(groundProfile[0], groundProfile[1]);
ground.addTo(world);
worldObjects.add(ground);
robot.reset();
//position robot: translate on x
BoundingBox boundingBox = robot.boundingBox();
robot.translate(new Vector2(initialPlacement - boundingBox.min.x, 0));
//translate on y
double minYGap = robot.getVoxels().values().stream()
.filter(Objects::nonNull)
.mapToDouble(v -> v.boundingBox().min.y - ground.yAt(v.getCenter().x))
.min().orElse(0d);
robot.translate(new Vector2(0, INITIAL_PLACEMENT_Y_GAP - minYGap));
//get initial x
double initCenterX = robot.getCenter().x;
//add robot to world
robot.addTo(world);
worldObjects.add(robot);
//run
Map<Double, Outcome.Observation> observations = new HashMap<>((int) Math.ceil(finalT / settings.getStepFrequency()));
double t = 0d;
while (t < finalT) {
t = AbstractTask.updateWorld(t, settings.getStepFrequency(), world, worldObjects, listener);
observations.put(t, new Outcome.Observation(
getMLPState(robot),
Grid.create(robot.getVoxels(), v -> v == null ? null : v.getVoxelPoly()),
ground.yAt(robot.getCenter().x),
(double) stopWatch.getTime(TimeUnit.MILLISECONDS) / 1000d
));
}
stopWatch.stop();
//prepare outcome
return new Outcome(observations);
}
private static MLPState getMLPState(Robot<?> robot) {
Controller<?> controller = robot.getController();
if (!(controller instanceof CentralizedSensing)) {
return null;
}
TimedRealFunction controllerFunction = ((CentralizedSensing) controller).getFunction();
if (!(controllerFunction instanceof StatefulNN)) {
return null;
}
StatefulNN snn = (StatefulNN) controllerFunction;
return snn.getState();
}
private static double[][] randomTerrain(int n, double length, double peak, double borderHeight, Random random) {
double[] xs = new double[n + 2];
double[] ys = new double[n + 2];
xs[0] = 0d;
xs[n + 1] = length;
ys[0] = borderHeight;
ys[n + 1] = borderHeight;
for (int i = 1; i < n + 1; i++) {
xs[i] = 1 + (double) (i - 1) * (length - 2d) / (double) n;
ys[i] = random.nextDouble() * peak;
}
return new double[][]{xs, ys};
}
public static double[][] createTerrain(String name) {
String flat = "flat";
String flatWithStart = "flatWithStart-(?<seed>[0-9]+)";
String hilly = "hilly-(?<h>[0-9]+(\\.[0-9]+)?)-(?<w>[0-9]+(\\.[0-9]+)?)-(?<seed>[0-9]+)";
String steppy = "steppy-(?<h>[0-9]+(\\.[0-9]+)?)-(?<w>[0-9]+(\\.[0-9]+)?)-(?<seed>[0-9]+)";
String downhill = "downhill-(?<angle>[0-9]+(\\.[0-9]+)?)";
String uphill = "uphill-(?<angle>[0-9]+(\\.[0-9]+)?)";
Map<String, String> params;
if ((params = Utils.params(flat, name)) != null) {
return new double[][]{
new double[]{0, TERRAIN_BORDER_WIDTH, TERRAIN_LENGTH - TERRAIN_BORDER_WIDTH, TERRAIN_LENGTH},
new double[]{TERRAIN_BORDER_HEIGHT, 5, 5, TERRAIN_BORDER_HEIGHT}
};
}
if ((params = Utils.params(flatWithStart, name)) != null) {
Random random = new Random(Integer.parseInt(params.get("seed")));
IntStream.range(0, random.nextInt(10) + 10).forEach(i -> random.nextDouble()); //it looks like that otherwise the 1st double of nextDouble() is always around 0.73...
double angle = Math.PI / 18d * (random.nextDouble() * 2d - 1d);
double startLength = it.units.erallab.hmsrobots.core.objects.Voxel.SIDE_LENGTH * 8d;
return new double[][]{
new double[]{
0, TERRAIN_BORDER_WIDTH,
TERRAIN_BORDER_WIDTH + startLength, TERRAIN_LENGTH - TERRAIN_BORDER_WIDTH, TERRAIN_LENGTH
},
new double[]{
TERRAIN_BORDER_HEIGHT, 5 + startLength * Math.sin(angle),
5, 5, TERRAIN_BORDER_HEIGHT
}
};
}
if ((params = Utils.params(hilly, name)) != null) {
double h = Double.parseDouble(params.get("h"));
double w = Double.parseDouble(params.get("w"));
Random random = new Random(Integer.parseInt(params.get("seed")));
List<Double> xs = new ArrayList<>(List.of(0d, TERRAIN_BORDER_WIDTH));
List<Double> ys = new ArrayList<>(List.of(TERRAIN_BORDER_HEIGHT, 0d));
while (xs.get(xs.size() - 1) < TERRAIN_LENGTH - TERRAIN_BORDER_WIDTH) {
xs.add(xs.get(xs.size() - 1) + Math.max(1d, (random.nextGaussian() * 0.25 + 1) * w));
ys.add(ys.get(ys.size() - 1) + random.nextGaussian() * h);
}
xs.addAll(List.of(xs.get(xs.size() - 1) + TERRAIN_BORDER_WIDTH));
ys.addAll(List.of(TERRAIN_BORDER_HEIGHT));
return new double[][]{
xs.stream().mapToDouble(d -> d).toArray(),
ys.stream().mapToDouble(d -> d).toArray()
};
}
if ((params = Utils.params(steppy, name)) != null) {
double h = Double.parseDouble(params.get("h"));
double w = Double.parseDouble(params.get("w"));
Random random = new Random(Integer.parseInt(params.get("seed")));
List<Double> xs = new ArrayList<>(List.of(0d, TERRAIN_BORDER_WIDTH));
List<Double> ys = new ArrayList<>(List.of(TERRAIN_BORDER_HEIGHT, 0d));
while (xs.get(xs.size() - 1) < TERRAIN_LENGTH - TERRAIN_BORDER_WIDTH) {
xs.add(xs.get(xs.size() - 1) + Math.max(1d, (random.nextGaussian() * 0.25 + 1) * w));
xs.add(xs.get(xs.size() - 1) + 0.5d);
ys.add(ys.get(ys.size() - 1));
ys.add(ys.get(ys.size() - 1) + random.nextGaussian() * h);
}
xs.addAll(List.of(xs.get(xs.size() - 1) + TERRAIN_BORDER_WIDTH));
ys.addAll(List.of(TERRAIN_BORDER_HEIGHT));
return new double[][]{
xs.stream().mapToDouble(d -> d).toArray(),
ys.stream().mapToDouble(d -> d).toArray()
};
}
if ((params = Utils.params(downhill, name)) != null) {
double angle = Double.parseDouble(params.get("angle"));
double dY = (TERRAIN_LENGTH - 2 * TERRAIN_BORDER_WIDTH) * Math.sin(angle / 180 * Math.PI);
return new double[][]{
new double[]{0, TERRAIN_BORDER_WIDTH, TERRAIN_LENGTH - TERRAIN_BORDER_WIDTH, TERRAIN_LENGTH},
new double[]{TERRAIN_BORDER_HEIGHT + dY, 5 + dY, 5, TERRAIN_BORDER_HEIGHT}
};
}
if ((params = Utils.params(uphill, name)) != null) {
double angle = Double.parseDouble(params.get("angle"));
double dY = (TERRAIN_LENGTH - 2 * TERRAIN_BORDER_WIDTH) * Math.sin(angle / 180 * Math.PI);
return new double[][]{
new double[]{0, TERRAIN_BORDER_WIDTH, TERRAIN_LENGTH - TERRAIN_BORDER_WIDTH, TERRAIN_LENGTH},
new double[]{TERRAIN_BORDER_HEIGHT, 5, 5 + dY, TERRAIN_BORDER_HEIGHT + dY}
};
}
throw new IllegalArgumentException(String.format("Unknown terrain name: %s", name));
}
}
| 9,798
| 43.949541
| 171
|
java
|
EVCache
|
EVCache-master/evcache-client-sample/src/main/java/com/netflix/evcache/sample/EVCacheClientZipkinTracingSample.java
|
package com.netflix.evcache.sample;
import brave.Tracing;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheException;
import com.netflix.evcache.EVCacheTracingEventListener;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import zipkin2.Span;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Future;
public class EVCacheClientZipkinTracingSample {
private final EVCache evCache;
private final List<Span> reportedSpans;
private static boolean verboseMode = false;
/**
* Default constructor.
*
* <p>This tells the EVCache library to use the "simple node list provider" for EVCACHE_APP1 (by
* setting the relevant system property), and then it copies the EVC_SAMPLE_DEPLOYMENT environment
* variable to the EVCACHE_APP1-NODES system property.
*
* <p>If the environment variable isn't set, default memcached server is at localhost:11211.
*
* <p>Finally, this initializes "evCache" using EVCache.Builder, specifying the application name
* "EVCACHE_APP1."
*/
public EVCacheClientZipkinTracingSample() {
String deploymentDescriptor = System.getenv("EVC_SAMPLE_DEPLOYMENT");
if (deploymentDescriptor == null) {
// No deployment descriptor in the environment, use a defaul.
deploymentDescriptor = "SERVERGROUP1=localhost:11211";
}
System.setProperty("EVCACHE_APP1.use.simple.node.list.provider", "true");
System.setProperty("EVCACHE_APP1-NODES", deploymentDescriptor);
EVCacheClientPoolManager poolManager = EVCacheClientPoolManager.getInstance();
poolManager.initEVCache("EVCACHE_APP1");
reportedSpans = new ArrayList<>();
Tracing tracing = Tracing.newBuilder().spanReporter(reportedSpans::add).build();
EVCacheTracingEventListener tracingEventListener =
new EVCacheTracingEventListener(poolManager, tracing.tracer());
evCache = new EVCache.Builder().setAppName("EVCACHE_APP1").build();
}
/**
* Set a key in the cache.
*
* <p>See the memcached documentation for what "timeToLive" means. Zero means "never expires."
* Small integers (under some threshold) mean "expires this many seconds from now." Large integers
* mean "expires at this Unix timestamp" (seconds since 1/1/1970). Warranty expires 17-Jan 2038.
*/
public void setKey(String key, String value, int timeToLive) throws Exception {
try {
Future<Boolean>[] _future = evCache.set(key, value, timeToLive);
// Wait for all the Futures to complete.
// In "verbose" mode, show the status for each.
for (Future<Boolean> f : _future) {
boolean didSucceed = f.get();
if (verboseMode) {
System.out.println("per-shard set success code for key " + key + " is " + didSucceed);
}
}
if (!verboseMode) {
// Not verbose. Just give one line of output per "set," without a success code
System.out.println("finished setting key " + key);
}
} catch (EVCacheException e) {
e.printStackTrace();
}
}
/**
* Get the data for a key from the cache. Returns null if the key could not be retrieved, whether
* due to a cache miss or errors.
*/
public String getKey(String key) {
try {
String _response = evCache.<String>get(key);
return _response;
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
public void printZipkinSpans() {
System.out.println("--> " + reportedSpans.toString());
}
/** Main Program which does some simple sets and gets. */
public static void main(String[] args) {
// set verboseMode based on the environment variable
verboseMode = ("true".equals(System.getenv("EVCACHE_SAMPLE_VERBOSE")));
if (verboseMode) {
System.out.println("To run this sample app without using Gradle:");
System.out.println(
"java -cp "
+ System.getProperty("java.class.path")
+ " com.netflix.evcache.sample.EVCacheClientZipkinTracingSample");
}
try {
EVCacheClientZipkinTracingSample evCacheClientZipkinTracingSample =
new EVCacheClientZipkinTracingSample();
// Set ten keys to different values
for (int i = 0; i < 10; i++) {
String key = "key_" + i;
String value = "data_" + i;
// Set the TTL to 24 hours
int ttl = 86400;
evCacheClientZipkinTracingSample.setKey(key, value, ttl);
}
// Do a "get" for each of those same keys
for (int i = 0; i < 10; i++) {
String key = "key_" + i;
String value = evCacheClientZipkinTracingSample.getKey(key);
System.out.println("Get of " + key + " returned " + value);
}
// Print collected Zipkin Spans
evCacheClientZipkinTracingSample.printZipkinSpans();
} catch (Exception e) {
e.printStackTrace();
}
// We have to call System.exit() now, because some background
// threads were started without the "daemon" flag. This is
// probably a mistake somewhere, but hey, this is only a sample app.
System.exit(0);
}
}
| 5,090
| 34.601399
| 100
|
java
|
EVCache
|
EVCache-master/evcache-client-sample/src/main/java/com/netflix/evcache/sample/EVCacheClientSample.java
|
package com.netflix.evcache.sample;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheException;
import java.util.concurrent.Future;
/**
* Created by senugula on 3/24/16.
* Updated by akpratt on 5/13/16.
*/
/**
* This standalone program demonstrates how to use EVCacheClient for
* set/get operations using memcached running on your local box.
*
* By default, this program expects there to be two memcached processes
* on the local host, on ports 11211 and 11212. They get used as two
* replicas of a single shard each.
*
* You can override this configuration by setting the environment
* variable EVC_SAMPLE_DEPLOYMENT to a string which describes your
* deployment. The format for that string is as described in the EVCache
* documentation for a simple node list provider. It would look like
* this for a two-replica deployment with two shards per replica:
*
* SERVERGROUP1=host1:port1,host2:port2;SERVERGROUP2=host3:port3,host4:port4
*/
public class EVCacheClientSample {
private final EVCache evCache;
private static boolean verboseMode = false;
/**
* Default constructor.
*
* This tells the EVCache library to use the "simple node list
* provider" for EVCACHE_APP1 (by setting the relevant system
* property), and then it copies the EVC_SAMPLE_DEPLOYMENT
* environment variable to the EVCACHE_APP1-NODES system property.
*
* If the environment variable isn't set, default is two shards on
* localhost, on port 11211 and 11212, configured as two replicas with
* one shard each.
*
* Finally, this initializes "evCache" using EVCache.Builder,
* specifying the application name "EVCACHE_APP1."
*/
public EVCacheClientSample() {
String deploymentDescriptor = System.getenv("EVC_SAMPLE_DEPLOYMENT");
if (deploymentDescriptor == null) {
// No deployment descriptor in the environment, use a default: two local
// memcached processes configured as two replicas of one shard each.
deploymentDescriptor = "SERVERGROUP1=localhost:11211;SERVERGROUP2=localhost:11212";
}
System.setProperty("EVCACHE_APP1.use.simple.node.list.provider", "true");
System.setProperty("EVCACHE_APP1-NODES", deploymentDescriptor);
evCache = new EVCache.Builder().setAppName("EVCACHE_APP1").build();
}
/**
* Set a key in the cache.
*
* See the memcached documentation for what "timeToLive" means.
* Zero means "never expires."
* Small integers (under some threshold) mean "expires this many seconds from now."
* Large integers mean "expires at this Unix timestamp" (seconds since 1/1/1970).
* Warranty expires 17-Jan 2038.
*/
public void setKey(String key, String value, int timeToLive) throws Exception {
try {
Future<Boolean>[] _future = evCache.set(key, value, timeToLive);
// Wait for all the Futures to complete.
// In "verbose" mode, show the status for each.
for (Future<Boolean> f : _future) {
boolean didSucceed = f.get();
if (verboseMode) {
System.out.println("per-shard set success code for key " + key + " is " + didSucceed);
}
}
if (!verboseMode) {
// Not verbose. Just give one line of output per "set," without a success code
System.out.println("finished setting key " + key);
}
} catch (EVCacheException e) {
e.printStackTrace();
}
}
/**
* Get the data for a key from the cache. Returns null if the key
* could not be retrieved, whether due to a cache miss or errors.
*/
public String getKey(String key) {
try {
String _response = evCache.<String>get(key);
return _response;
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
/**
* Main Program which does some simple sets and gets.
*/
public static void main(String[] args) {
// set verboseMode based on the environment variable
verboseMode = ("true".equals(System.getenv("EVCACHE_SAMPLE_VERBOSE")));
if (verboseMode) {
System.out.println("To run this sample app without using Gradle:");
System.out.println("java -cp " + System.getProperty("java.class.path") + " com.netflix.evcache.sample.EVCacheClientSample");
}
try {
EVCacheClientSample evCacheClientSample = new EVCacheClientSample();
// Set ten keys to different values
for (int i = 0; i < 10; i++) {
String key = "key_" + i;
String value = "data_" + i;
// Set the TTL to 24 hours
int ttl = 86400;
evCacheClientSample.setKey(key, value, ttl);
}
// Do a "get" for each of those same keys
for (int i = 0; i < 10; i++) {
String key = "key_" + i;
String value = evCacheClientSample.getKey(key);
System.out.println("Get of " + key + " returned " + value);
}
} catch (Exception e) {
e.printStackTrace();
}
// We have to call System.exit() now, because some background
// threads were started without the "daemon" flag. This is
// probably a mistake somewhere, but hey, this is only a sample app.
System.exit(0);
}
}
| 5,562
| 36.33557
| 136
|
java
|
EVCache
|
EVCache-master/evcache-client/src/main/java/com/netflix/evcache/EVCacheModule.java
|
package com.netflix.evcache;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import com.google.inject.*;
import com.netflix.archaius.api.annotations.ConfigurationSource;
import com.netflix.evcache.connection.DIConnectionModule;
import com.netflix.evcache.connection.IConnectionBuilder;
import com.netflix.evcache.event.hotkey.HotKeyListener;
import com.netflix.evcache.event.throttle.ThrottleListener;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import com.netflix.evcache.pool.EVCacheNodeList;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.evcache.pool.eureka.DIEVCacheNodeListProvider;
import com.netflix.evcache.version.VersionTracker;
@Singleton
@SuppressWarnings("deprecation")
public class EVCacheModule extends AbstractModule {
public EVCacheModule() {
}
@Singleton
@ConfigurationSource("evcache")
public static class EVCacheModuleConfigLoader {
@Inject
public EVCacheModuleConfigLoader(Injector injector, EVCacheModule module) {
if(injector.getExistingBinding(Key.get(IConnectionBuilder.class)) == null) {
module.install(new DIConnectionModule());
}
}
}
@Override
protected void configure() {
// Make sure connection factory provider Module is initialized in your Module when you init EVCacheModule
bind(EVCacheModuleConfigLoader.class).asEagerSingleton();
bind(EVCacheNodeList.class).toProvider(DIEVCacheNodeListProvider.class);
bind(EVCacheClientPoolManager.class).asEagerSingleton();
bind(HotKeyListener.class).asEagerSingleton();
bind(ThrottleListener.class).asEagerSingleton();
bind(VersionTracker.class).asEagerSingleton();
requestStaticInjection(EVCacheModuleConfigLoader.class);
requestStaticInjection(EVCacheConfig.class);
}
@Inject
EVCacheClientPoolManager manager;
@PostConstruct
public void init() {
if(manager != null) {
manager.initAtStartup();
} else {
EVCacheClientPoolManager.getInstance().initAtStartup();
}
}
@PreDestroy
public void shutdown() {
if(manager != null) {
manager.shutdown();
} else {
EVCacheClientPoolManager.getInstance().shutdown();
}
}
@Override
public int hashCode() {
return getClass().hashCode();
}
@Override
public boolean equals(Object obj) {
return (obj != null) && (obj.getClass() == getClass());
}
}
| 2,567
| 29.571429
| 113
|
java
|
EVCache
|
EVCache-master/evcache-client/src/main/java/com/netflix/evcache/pool/DIEVCacheKetamaNodeLocatorConfiguration.java
|
package com.netflix.evcache.pool;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.shared.Application;
import net.spy.memcached.MemcachedNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.List;
public class DIEVCacheKetamaNodeLocatorConfiguration extends EVCacheKetamaNodeLocatorConfiguration {
private static final Logger log = LoggerFactory.getLogger(DIEVCacheKetamaNodeLocatorConfiguration.class);
private final EurekaClient eurekaClient;
public DIEVCacheKetamaNodeLocatorConfiguration(EVCacheClient client, EurekaClient eurekaClient) {
super(client);
this.eurekaClient = eurekaClient;
}
/**
* Returns the socket address of a given MemcachedNode.
*
* @param node - The MemcachedNode which we're interested in
* @return The socket address of the given node format is of the following
* format "publicHostname/privateIp:port" (ex -
ec2-174-129-159-31.compute-1.amazonaws.com/10.125.47.114:11211)
*/
@Override
public String getKeyForNode(MemcachedNode node, int repetition) {
String result = socketAddresses.get(node);
if(result == null) {
final SocketAddress socketAddress = node.getSocketAddress();
if(socketAddress instanceof InetSocketAddress) {
final InetSocketAddress isa = (InetSocketAddress)socketAddress;
if(eurekaClient != null ) {
final Application app = eurekaClient.getApplication(client.getAppName());
if(app != null) {
final List<InstanceInfo> instances = app.getInstances();
for(InstanceInfo info : instances) {
final String hostName = info.getHostName();
if(hostName.equalsIgnoreCase(isa.getHostName())) {
final String ip = info.getIPAddr();
result = hostName + '/' + ip + ":11211";
break;
}
}
} else {
result = ((InetSocketAddress)socketAddress).getHostName() + '/' + ((InetSocketAddress)socketAddress).getAddress().getHostAddress() + ":11211";
}
} else {
result = isa.getHostName() + '/' + isa.getAddress().getHostAddress() + ":11211";
}
} else {
result=String.valueOf(socketAddress);
if (result.startsWith("/")) {
result = result.substring(1);
}
}
socketAddresses.put(node, result);
}
if(log.isDebugEnabled()) log.debug("Returning : " + (result + "-" + repetition));
return result + "-" + repetition;
}
@Override
public String toString() {
return "DIEVCacheKetamaNodeLocatorConfiguration [" + super.toString() + ", EurekaClient=" + eurekaClient + "]";
}
}
| 3,153
| 41.621622
| 166
|
java
|
EVCache
|
EVCache-master/evcache-client/src/main/java/com/netflix/evcache/pool/eureka/EurekaNodeListProvider.java
|
package com.netflix.evcache.pool.eureka;
import com.google.common.net.InetAddresses;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.appinfo.DataCenterInfo;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.archaius.api.Property;
import com.netflix.archaius.api.PropertyRepository;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.shared.Application;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.pool.EVCacheClientPool;
import com.netflix.evcache.pool.EVCacheNodeList;
import com.netflix.evcache.pool.EVCacheServerGroupConfig;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Tag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class EurekaNodeListProvider implements EVCacheNodeList {
private static final Logger log = LoggerFactory.getLogger(EurekaNodeListProvider.class);
private final EurekaClient _eurekaClient;
private PropertyRepository props;
private final ApplicationInfoManager applicationInfoManager;
@SuppressWarnings("rawtypes") // Archaius2 PropertyRepository does not support ParameterizedTypes
private Property<Set> ignoreHosts = null;
public EurekaNodeListProvider(ApplicationInfoManager applicationInfoManager, EurekaClient eurekaClient, PropertyRepository props) {
this.applicationInfoManager = applicationInfoManager;
this._eurekaClient = eurekaClient;
this.props = props;
}
/*
* (non-Javadoc)
*
* @see com.netflix.evcache.pool.EVCacheNodeList#discoverInstances()
*/
@Override
public Map<ServerGroup, EVCacheServerGroupConfig> discoverInstances(String _appName) throws IOException {
final Property<Boolean> ignoreAppEurekaStatus = props.get("evcache.ignoreAppEurekaStatus", Boolean.class).orElse(false);
if (ignoreAppEurekaStatus.get())
log.info("Not going to consider the eureka status of the application, to initialize evcache client.");
if (!ignoreAppEurekaStatus.get() && (applicationInfoManager.getInfo().getStatus() == InstanceStatus.DOWN)) {
log.info("Not initializing evcache client as application eureka status is DOWN. " +
"One can override this behavior by setting evcache.ignoreAppEurekaStatus property to true, scoped to your application.");
return Collections.<ServerGroup, EVCacheServerGroupConfig> emptyMap();
}
/* Get a list of EVCACHE instances from the DiscoveryManager */
final Application app = _eurekaClient.getApplication(_appName);
if (app == null) return Collections.<ServerGroup, EVCacheServerGroupConfig> emptyMap();
final List<InstanceInfo> appInstances = app.getInstances();
final Map<ServerGroup, EVCacheServerGroupConfig> instancesSpecific = new HashMap<ServerGroup, EVCacheServerGroupConfig>();
/* Iterate all the discovered instances to find usable ones */
for (InstanceInfo iInfo : appInstances) {
final DataCenterInfo dcInfo = iInfo.getDataCenterInfo();
if (dcInfo == null) {
if (log.isErrorEnabled()) log.error("Data Center Info is null for appName - " + _appName);
continue;
}
/* Only AWS instances are usable; bypass all others */
if (DataCenterInfo.Name.Amazon != dcInfo.getName() || !(dcInfo instanceof AmazonInfo)) {
log.error("This is not an AWSDataCenter. You will not be able to use Discovery Nodelist Provider. Cannot proceed. " +
"DataCenterInfo : {}; appName - {}. Please use SimpleNodeList provider and specify the server groups manually.",
dcInfo, _appName);
continue;
}
final AmazonInfo amznInfo = (AmazonInfo) dcInfo;
// We checked above if this instance is Amazon so no need to do a instanceof check
final String zone = amznInfo.get(AmazonInfo.MetaDataKey.availabilityZone);
if(zone == null) {
final List<Tag> tagList = new ArrayList<Tag>(3);
EVCacheMetricsFactory.getInstance().addAppNameTags(tagList, _appName);
tagList.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, EVCacheMetricsFactory.NULL_ZONE));
EVCacheMetricsFactory.getInstance().increment(EVCacheMetricsFactory.CONFIG, tagList);
continue;
}
final String asgName = iInfo.getASGName();
if(asgName == null) {
final List<Tag> tagList = new ArrayList<Tag>(3);
EVCacheMetricsFactory.getInstance().addAppNameTags(tagList, _appName);
tagList.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, EVCacheMetricsFactory.NULL_SERVERGROUP));
EVCacheMetricsFactory.getInstance().increment(EVCacheMetricsFactory.CONFIG, tagList);
continue;
}
final Property<Boolean> asgEnabled = props.get(asgName + ".enabled", Boolean.class).orElse(true);
if (!asgEnabled.get()) {
if(log.isDebugEnabled()) log.debug("ASG " + asgName + " is disabled so ignoring it");
continue;
}
final Map<String, String> metaInfo = iInfo.getMetadata();
final int evcachePort = Integer.parseInt((metaInfo != null && metaInfo.containsKey("evcache.port")) ?
metaInfo.get("evcache.port") : EVCacheClientPool.DEFAULT_PORT);
int port = evcachePort;
final Property<Boolean> isSecure = props.get(asgName + ".use.secure", Boolean.class)
.orElseGet(_appName + ".use.secure")
.orElseGet("evcache.use.secure")
.orElse(false);
if(isSecure.get()) {
port = Integer.parseInt((metaInfo != null && metaInfo.containsKey("evcache.secure.port")) ?
metaInfo.get("evcache.secure.port") : EVCacheClientPool.DEFAULT_SECURE_PORT);
}
final ServerGroup serverGroup = new ServerGroup(zone, asgName);
final Set<InetSocketAddress> instances;
final EVCacheServerGroupConfig config;
if (instancesSpecific.containsKey(serverGroup)) {
config = instancesSpecific.get(serverGroup);
instances = config.getInetSocketAddress();
} else {
instances = new HashSet<InetSocketAddress>();
config = new EVCacheServerGroupConfig(serverGroup, instances);
instancesSpecific.put(serverGroup, config);
//EVCacheMetricsFactory.getInstance().getRegistry().gauge(EVCacheMetricsFactory.getInstance().getRegistry().createId(_appName + "-port", "ServerGroup", asgName, "APP", _appName), Long.valueOf(port));
}
/* Don't try to use downed instances */
final InstanceStatus status = iInfo.getStatus();
if (status == null || InstanceStatus.OUT_OF_SERVICE == status || InstanceStatus.DOWN == status) {
if (log.isDebugEnabled()) log.debug("The Status of the instance in Discovery is " + status + ". App Name : " + _appName + "; Zone : " + zone
+ "; Host : " + iInfo.getHostName() + "; Instance Id - " + iInfo.getId());
continue;
}
final InstanceInfo myInfo = applicationInfoManager.getInfo();
final DataCenterInfo myDC = myInfo.getDataCenterInfo();
final AmazonInfo myAmznDC = (myDC instanceof AmazonInfo) ? (AmazonInfo) myDC : null;
final String myInstanceId = myInfo.getInstanceId();
final String myIp = myInfo.getIPAddr();
final String myPublicHostName = (myAmznDC != null) ? myAmznDC.get(AmazonInfo.MetaDataKey.publicHostname) : null;
boolean isInCloud = false;
if (myPublicHostName != null) {
isInCloud = myPublicHostName.startsWith("ec2");
}
if (!isInCloud) {
if (myAmznDC != null && myAmznDC.get(AmazonInfo.MetaDataKey.vpcId) != null) {
isInCloud = true;
} else {
if (myIp.equals(myInstanceId)) {
isInCloud = false;
}
}
}
final String myZone = (myAmznDC != null) ? myAmznDC.get(AmazonInfo.MetaDataKey.availabilityZone) : null;
final String myRegion = (myZone != null) ? myZone.substring(0, myZone.length() - 1) : null;
final String region = (zone != null) ? zone.substring(0, zone.length() - 1) : null;
final String host = amznInfo.get(AmazonInfo.MetaDataKey.publicHostname);
InetSocketAddress address = null;
final String vpcId = amznInfo.get(AmazonInfo.MetaDataKey.vpcId);
final String localIp = amznInfo.get(AmazonInfo.MetaDataKey.localIpv4);
if (log.isDebugEnabled()) log.debug("myZone - " + myZone + "; zone : " + zone + "; myRegion : " + myRegion + "; region : " + region + "; host : " + host + "; vpcId : " + vpcId);
if(ignoreHosts == null) ignoreHosts = props.get(_appName + ".ignore.hosts", Set.class).orElse(Collections.emptySet());
if(localIp != null && ignoreHosts.get().contains(localIp)) continue;
if(host != null && ignoreHosts.get().contains(host)) continue;
if (vpcId != null) {
final InetAddress add = InetAddresses.forString(localIp);
final InetAddress inetAddress = InetAddress.getByAddress(localIp, add.getAddress());
address = new InetSocketAddress(inetAddress, port);
if (log.isDebugEnabled()) log.debug("VPC : localIp - " + localIp + " ; add : " + add + "; inetAddress : " + inetAddress + "; address - " + address
+ "; App Name : " + _appName + "; Zone : " + zone + "; myZone - " + myZone + "; Host : " + iInfo.getHostName() + "; Instance Id - " + iInfo.getId());
} else {
if(host != null && host.startsWith("ec2")) {
final InetAddress inetAddress = (localIp != null) ? InetAddress.getByAddress(host, InetAddresses.forString(localIp).getAddress()) : InetAddress.getByName(host);
address = new InetSocketAddress(inetAddress, port);
if (log.isDebugEnabled()) log.debug("myZone - " + myZone + ". host : " + host
+ "; inetAddress : " + inetAddress + "; address - " + address + "; App Name : " + _appName
+ "; Zone : " + zone + "; Host : " + iInfo.getHostName() + "; Instance Id - " + iInfo.getId());
} else {
final String ipToUse = (isInCloud) ? localIp : amznInfo.get(AmazonInfo.MetaDataKey.publicIpv4);
final InetAddress add = InetAddresses.forString(ipToUse);
final InetAddress inetAddress = InetAddress.getByAddress(ipToUse, add.getAddress());
address = new InetSocketAddress(inetAddress, port);
if (log.isDebugEnabled()) log.debug("CLASSIC : IPToUse - " + ipToUse + " ; add : " + add + "; inetAddress : " + inetAddress + "; address - " + address
+ "; App Name : " + _appName + "; Zone : " + zone + "; myZone - " + myZone + "; Host : " + iInfo.getHostName() + "; Instance Id - " + iInfo.getId());
}
}
instances.add(address);
}
return instancesSpecific;
}
}
| 12,052
| 55.586854
| 215
|
java
|
EVCache
|
EVCache-master/evcache-client/src/main/java/com/netflix/evcache/pool/eureka/DIEVCacheNodeListProvider.java
|
package com.netflix.evcache.pool.eureka;
import javax.inject.Inject;
import javax.inject.Provider;
import com.netflix.archaius.api.PropertyRepository;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.discovery.EurekaClient;
import com.netflix.evcache.pool.EVCacheNodeList;
import com.netflix.evcache.pool.SimpleNodeListProvider;
public class DIEVCacheNodeListProvider implements Provider<EVCacheNodeList> {
private static final Logger log = LoggerFactory.getLogger(DIEVCacheNodeListProvider.class);
private final EurekaClient eurekaClient;
private PropertyRepository props;
private final ApplicationInfoManager applicationInfoManager;
@Inject
public DIEVCacheNodeListProvider(ApplicationInfoManager applicationInfoManager, EurekaClient eurekaClient, PropertyRepository props) {
this.applicationInfoManager = applicationInfoManager;
this.eurekaClient = eurekaClient;
this.props = props;
}
@Override
public EVCacheNodeList get() {
final EVCacheNodeList provider;
if (props.get("evcache.use.simple.node.list.provider", Boolean.class).orElse(false).get()) {
provider = new SimpleNodeListProvider();
} else {
provider = new EurekaNodeListProvider(applicationInfoManager, eurekaClient, props);
}
if(log.isDebugEnabled()) log.debug("EVCache Node List Provider : " + provider);
return provider;
}
}
| 1,513
| 35.047619
| 138
|
java
|
EVCache
|
EVCache-master/evcache-client/src/main/java/com/netflix/evcache/connection/DIAsciiConnectionFactory.java
|
package com.netflix.evcache.connection;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.archaius.api.Property;
import com.netflix.discovery.EurekaClient;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.pool.DIEVCacheKetamaNodeLocatorConfiguration;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheNodeLocator;
import net.spy.memcached.DefaultHashAlgorithm;
import net.spy.memcached.MemcachedNode;
import net.spy.memcached.NodeLocator;
public class DIAsciiConnectionFactory extends BaseAsciiConnectionFactory {
private static Logger log = LoggerFactory.getLogger(DIAsciiConnectionFactory.class);
private final EurekaClient eurekaClient;
DIAsciiConnectionFactory(EVCacheClient client, EurekaClient eurekaClient, int len, Property<Integer> operationTimeout, long opMaxBlockTime) {
super(client, len, operationTimeout, opMaxBlockTime);
client.addTag(EVCacheMetricsFactory.CONNECTION, "ASCII");
this.eurekaClient = eurekaClient;
if(log.isInfoEnabled()) log.info("Using ASCII Connection Factory!!!");
}
@Override
public NodeLocator createLocator(List<MemcachedNode> list) {
this.locator = new EVCacheNodeLocator(client, list, DefaultHashAlgorithm.KETAMA_HASH, new DIEVCacheKetamaNodeLocatorConfiguration(client, eurekaClient));
return locator;
}
}
| 1,447
| 37.105263
| 162
|
java
|
EVCache
|
EVCache-master/evcache-client/src/main/java/com/netflix/evcache/connection/DIConnectionFactoryBuilderProvider.java
|
package com.netflix.evcache.connection;
import com.netflix.archaius.api.Property;
import com.netflix.archaius.api.PropertyRepository;
import com.netflix.discovery.EurekaClient;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.util.EVCacheConfig;
import net.spy.memcached.ConnectionFactory;
import javax.inject.Inject;
import javax.inject.Provider;
public class DIConnectionFactoryBuilderProvider extends ConnectionFactoryBuilder implements Provider<IConnectionBuilder> {
private final EurekaClient eurekaClient;
private final PropertyRepository props;
@Inject
public DIConnectionFactoryBuilderProvider(EurekaClient eurekaClient, PropertyRepository props) {
this.eurekaClient = eurekaClient;
this.props = props;
}
@Override
public ConnectionFactoryBuilder get() {
return this;
}
public int getMaxQueueLength(String appName) {
return props.get(appName + ".max.queue.length", Integer.class).orElse(16384).get();
}
public int getOPQueueMaxBlockTime(String appName) {
return props.get(appName + ".operation.QueueMaxBlockTime", Integer.class).orElse(10).get();
}
public Property<Integer> getOperationTimeout(String appName) {
return props.get(appName + ".operation.timeout", Integer.class).orElse(2500);
}
public boolean useBinaryProtocol() {
return EVCacheConfig.getInstance().getPropertyRepository().get("evcache.use.binary.protocol", Boolean.class).orElse(true).get();
}
public EurekaClient getEurekaClient() {
return eurekaClient;
}
public PropertyRepository getProps() {
return props;
}
@Override
public ConnectionFactory getConnectionFactory(EVCacheClient client) {
final String appName = client.getAppName();
if(useBinaryProtocol())
return new DIConnectionFactory(client, eurekaClient, getMaxQueueLength(appName), getOperationTimeout(appName), getOPQueueMaxBlockTime(appName));
else return new DIAsciiConnectionFactory(client, eurekaClient, getMaxQueueLength(appName), getOperationTimeout(appName), getOPQueueMaxBlockTime(appName));
}
}
| 2,188
| 33.746032
| 163
|
java
|
EVCache
|
EVCache-master/evcache-client/src/main/java/com/netflix/evcache/connection/DIConnectionFactory.java
|
package com.netflix.evcache.connection;
import com.netflix.archaius.api.Property;
import com.netflix.discovery.EurekaClient;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.pool.DIEVCacheKetamaNodeLocatorConfiguration;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheNodeLocator;
import net.spy.memcached.DefaultHashAlgorithm;
import net.spy.memcached.MemcachedNode;
import net.spy.memcached.NodeLocator;
import java.util.List;
public class DIConnectionFactory extends BaseConnectionFactory {
private final EurekaClient eurekaClient;
DIConnectionFactory(EVCacheClient client, EurekaClient eurekaClient, int len, Property<Integer> operationTimeout, long opMaxBlockTime) {
super(client, len, operationTimeout, opMaxBlockTime);
client.addTag(EVCacheMetricsFactory.CONNECTION, "BINARY");
this.eurekaClient = eurekaClient;
}
@Override
public NodeLocator createLocator(List<MemcachedNode> list) {
this.locator = new EVCacheNodeLocator(client, list, DefaultHashAlgorithm.KETAMA_HASH, new DIEVCacheKetamaNodeLocatorConfiguration(client, eurekaClient));
return locator;
}
}
| 1,212
| 36.90625
| 162
|
java
|
EVCache
|
EVCache-master/evcache-client/src/main/java/com/netflix/evcache/connection/DIConnectionModule.java
|
package com.netflix.evcache.connection;
import com.google.inject.AbstractModule;
import com.google.inject.Singleton;
@Singleton
public class DIConnectionModule extends AbstractModule {
public DIConnectionModule() {
}
@Override
protected void configure() {
bind(IConnectionBuilder.class).toProvider(DIConnectionFactoryBuilderProvider.class);
}
@Override
public int hashCode() {
return getClass().hashCode();
}
@Override
public boolean equals(Object obj) {
return (obj != null) && (obj.getClass() == getClass());
}
}
| 595
| 20.285714
| 92
|
java
|
EVCache
|
EVCache-master/evcache-client/src/main/java/com/netflix/evcache/version/VersionTracker.java
|
package com.netflix.evcache.version;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Tag;
import javax.inject.Inject;
import javax.inject.Singleton;
@Singleton
public class VersionTracker implements Runnable {
private static final Logger log = LoggerFactory.getLogger(VersionTracker.class);
private AtomicLong versionGauge;
private EVCacheClientPoolManager poolManager;
@Inject
public VersionTracker(EVCacheClientPoolManager poolManager) {
this.poolManager = poolManager;
poolManager.getEVCacheScheduledExecutor().schedule(this, 30, TimeUnit.SECONDS);
}
public void run() {
// init the version information
if(versionGauge == null) {
final String fullVersion;
final String jarName;
if(this.getClass().getPackage().getImplementationVersion() != null) {
fullVersion = this.getClass().getPackage().getImplementationVersion();
} else {
fullVersion = "unknown";
}
if(this.getClass().getPackage().getImplementationVersion() != null) {
jarName = this.getClass().getPackage().getImplementationTitle();
} else {
jarName = "unknown";
}
if(log.isInfoEnabled()) log.info("fullVersion : " + fullVersion + "; jarName : " + jarName);
final List<Tag> tagList = new ArrayList<Tag>(3);
tagList.add(new BasicTag("version", fullVersion));
tagList.add(new BasicTag("jarName", jarName));
versionGauge = EVCacheMetricsFactory.getInstance().getLongGauge("evcache-client", tagList);
}
versionGauge.set(Long.valueOf(1));
poolManager.getEVCacheScheduledExecutor().schedule(this, 30, TimeUnit.SECONDS);
}
@Override
public int hashCode() {
return getClass().hashCode();
}
@Override
public boolean equals(Object obj) {
return (obj != null) && (obj.getClass() == getClass());
}
}
| 2,316
| 32.1
| 104
|
java
|
EVCache
|
EVCache-master/evcache-client/test/com/netflix/evcache/test/EVCacheTestDI.java
|
package com.netflix.evcache.test;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import java.util.Map;
import java.util.Properties;
import java.util.*;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import com.netflix.evcache.*;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.evcache.util.KeyHasher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.netflix.evcache.operation.EVCacheOperationFuture;
import rx.schedulers.Schedulers;
import static org.testng.Assert.*;
public class EVCacheTestDI extends DIBase implements EVCacheGetOperationListener<String> {
private static final Logger log = LoggerFactory.getLogger(EVCacheTestDI.class);
private int loops = 1;
private Map<String, String> propertiesToSet;
private String appName = "EVCACHE_TEST";
public static void main(String args[]) {
try {
EVCacheTestDI test = new EVCacheTestDI();
test.testAll();
} catch(Throwable t) {
log.error(t.getMessage(), t);
}
}
public EVCacheTestDI() {
propertiesToSet = new HashMap<>();
propertiesToSet.putIfAbsent(appName + ".us-east-1d.EVCacheClientPool.writeOnly", "false");
propertiesToSet.putIfAbsent(appName + ".EVCacheClientPool.poolSize", "1");
propertiesToSet.putIfAbsent(appName + ".ping.servers", "false");
propertiesToSet.putIfAbsent(appName + ".cid.throw.exception", "true");
propertiesToSet.putIfAbsent(appName + ".EVCacheClientPool.readTimeout", "500");
propertiesToSet.putIfAbsent(appName + ".EVCacheClientPool.bulkReadTimeout", "500");
propertiesToSet.putIfAbsent(appName + ".max.read.queue.length", "20");
propertiesToSet.putIfAbsent("EVCacheClientPoolManager.log.apps", appName);
propertiesToSet.putIfAbsent(appName + ".fallback.zone", "true");
propertiesToSet.putIfAbsent(appName + ".enable.throttling", "false");
propertiesToSet.putIfAbsent(appName + ".throttle.time", "0");
propertiesToSet.putIfAbsent(appName + ".throttle.percent", "0");
propertiesToSet.putIfAbsent(appName + ".log.operation", "1000");
propertiesToSet.putIfAbsent(appName + ".EVCacheClientPool.validate.input.queue", "true");
propertiesToSet.putIfAbsent("evcache.use.binary.protocol", "false");
}
protected Properties getProps() {
Properties props = super.getProps();
propertiesToSet.entrySet().forEach(entry -> props.setProperty(entry.getKey(), entry.getValue()));
return props;
}
@Test
public void testEVCache() {
this.evCache = getNewBuilder().setAppName(appName).setCachePrefix("cid").enableRetry().build();
assertNotNull(evCache);
}
@Test(dependsOnMethods = { "testEVCache" })
public void testKeySizeCheck() throws Exception {
final String key = "This is an invalid key";
boolean exceptionThrown = false;
for (int i = 0; i < loops; i++) {
try {
if (log.isDebugEnabled()) log.debug("Check key : " + key );
evCache.<String>get(key);
} catch(Exception e) {
exceptionThrown = true;
if (log.isDebugEnabled()) log.debug("Check key : " + key + ": INVALID");
}
assertTrue(exceptionThrown);
}
final String longKey = "This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.This_is_an_a_very_long_key.";
exceptionThrown = false;
for (int i = 0; i < loops; i++) {
try {
if (log.isDebugEnabled()) log.debug("Check key length : " + longKey );
evCache.<String>get(longKey);
} catch(Exception e) {
exceptionThrown = true;
if (log.isDebugEnabled()) log.debug("Check key length: " + longKey + ": INVALID");
}
assertTrue(exceptionThrown);
}
}
@Test(dependsOnMethods = { "testKeySizeCheck" })
public void testTouch() throws Exception {
for (int i = 0; i < loops; i++) {
touch(i, evCache);
}
}
@Test(dependsOnMethods = { "testTouch" })
public void testDelete() throws Exception {
for (int i = 0; i < loops; i++) {
delete(i, evCache);
}
}
@Test(dependsOnMethods = { "testDelete" })
public void testAdd() throws Exception {
for (int i = 0; i < loops; i++) {
add(i, evCache);
}
}
@Test(dependsOnMethods = { "testAdd" })
public void testInsertBinary() throws Exception {
for (int i = 0; i < loops; i++) {
assertTrue(insertBytes(i, evCache));
}
}
private boolean insertBytes(int i, EVCache gCache) throws Exception {
byte[] val = ("val_" + i).getBytes();
String key = "key_b_" + i;
Future<Boolean>[] status = gCache.set(key, val, 24 * 60 * 60);
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("SET BYTES : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if (s.get() == Boolean.FALSE) return false;
}
return true;
}
@Test(dependsOnMethods = { "testInsertBinary" })
public void testGetBytes() throws Exception {
for (int i = 0; i < loops; i++) {
String key = "key_b_" + i;
byte[] value = evCache.<byte[]> get(key);
if(value != null) {
if (log.isDebugEnabled()) log.debug("get : key : " + key + " val length = " + value.length);
}
assertNotNull(value);
}
}
@Test(dependsOnMethods = { "testGetBytes" })
public void testInsert() throws Exception {
for (int i = 0; i < loops; i++) {
assertTrue(insert(i, evCache));
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testGet() throws Exception {
for (int i = 0; i < loops; i++) {
final String val = get(i, evCache);
assertNotNull(val);
assertTrue(val.equals("val_" + i));
}
}
@Test(dependsOnMethods = { "testGet" })
public void testGetAndTouch() throws Exception {
for (int i = 0; i < loops; i++) {
final String val = getAndTouch(i, evCache);
assertNotNull(val);
assertTrue(val.equals("val_" + i));
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testBulk() throws Exception {
final String[] keys = new String[loops];
for (int i = 0; i < loops; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulk(keys, evCache);
assertNotNull(vals);
for (int i = 0; i < keys.length; i++) {
String key = keys[i];
String val = vals.get(key);
if (val == null) {
if (log.isDebugEnabled()) log.debug("key " + key + " returned null");
} else {
assertTrue(val.equals("val_" + i));
}
}
}
@Test(dependsOnMethods = { "testBulk" })
public void testBulkAndTouch() throws Exception {
final String[] keys = new String[loops];
for (int i = 0; i < loops; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulkAndTouch(keys, evCache, 24 * 60 * 60);
assertNotNull(vals);
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
if (val == null) {
if (log.isDebugEnabled()) log.debug("key " + key + " returned null");
} else {
assertTrue(val.equals("val_" + i));
}
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testGetObservable() throws Exception {
for (int i = 0; i < loops; i++) {
final String val = getObservable(i, evCache, Schedulers.computation());
assertNotNull(val);
assertTrue(val.equals("val_" + i));
}
}
@Test(dependsOnMethods = { "testGetObservable" })
public void testGetAndTouchObservable() throws Exception {
for (int i = 0; i < loops; i++) {
final String val = getAndTouchObservable(i, evCache, Schedulers.computation());
assertNotNull(val);
assertTrue(val.equals("val_" + i));
}
}
@Test(dependsOnMethods = { "testGetAndTouchObservable" })
public void waitForCallbacks() throws Exception {
Thread.sleep(1000);
}
@Test(dependsOnMethods = { "waitForCallbacks" })
public void testReplace() throws Exception {
for (int i = 0; i < 10; i++) {
replace(i, evCache);
}
}
@Test(dependsOnMethods = { "testReplace" })
public void testAppendOrAdd() throws Exception {
for (int i = 0; i < loops; i++) {
assertTrue(appendOrAdd(i, evCache));
}
}
private void refreshEVCache() {
setupEnv();
testEVCache();
}
@Test(dependsOnMethods = {"testAppendOrAdd"})
public void functionalTestsWithAppLevelAndASGLevelHashingScenarios() throws Exception {
refreshEVCache();
// no hashing
assertFalse(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".hash.key", Boolean.class).orElse(false).get());
doFunctionalTests(false);
// hashing at app level
propertiesToSet.put(appName + ".hash.key", "true");
refreshEVCache();
assertTrue(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".hash.key", Boolean.class).orElse(false).get());
doFunctionalTests(true);
propertiesToSet.remove(appName + ".hash.key");
// hashing at app level due to auto hashing as a consequence of a large key
propertiesToSet.put(appName + ".auto.hash.keys", "true");
refreshEVCache();
assertTrue(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".auto.hash.keys", Boolean.class).orElse(false).get());
assertFalse(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".hash.key", Boolean.class).orElse(false).get());
testWithLargeKey();
// negative scenario
propertiesToSet.remove(appName + ".auto.hash.keys");
refreshEVCache();
assertFalse(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".auto.hash.keys", Boolean.class).orElse(false).get());
assertFalse(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".hash.key", Boolean.class).orElse(false).get());
assertThrows(IllegalArgumentException.class, () -> {
testWithLargeKey();
});
// hashing at app level by choice AND different hashing at each asg
Map<String, KeyHasher.HashingAlgorithm> hashingAlgorithmsByServerGroup = new HashMap<>();
propertiesToSet.put(appName + ".hash.key", "true");
refreshEVCache();
assertTrue(manager.getEVCacheConfig().getPropertyRepository().get(appName + ".hash.key", Boolean.class).orElse(false).get());
// get server group names, to be used to configure the ASG level hashing properties
Map<ServerGroup, List<EVCacheClient>> clientsByServerGroup = manager.getEVCacheClientPool(appName).getAllInstancesByServerGroup();
int i = 0;
KeyHasher.HashingAlgorithm hashingAlgorithm = KeyHasher.HashingAlgorithm.values()[0];
for (ServerGroup serverGroup : clientsByServerGroup.keySet()) {
// use below logic to have different hashing per asg once the code supports. Currently the code caches the value that it uses for all the asgs
// KeyHasher.HashingAlgorithm hashingAlgorithm = KeyHasher.HashingAlgorithm.values()[i++ % KeyHasher.HashingAlgorithm.values().length];
hashingAlgorithmsByServerGroup.put(serverGroup.getName(), hashingAlgorithm);
propertiesToSet.put(serverGroup.getName() + ".hash.key", "true");
propertiesToSet.put(serverGroup.getName() + ".hash.algo", hashingAlgorithm.name());
}
refreshEVCache();
clientsByServerGroup = manager.getEVCacheClientPool(appName).getAllInstancesByServerGroup();
// validate hashing properties of asgs
for (ServerGroup serverGroup : clientsByServerGroup.keySet()) {
assertEquals(clientsByServerGroup.get(serverGroup).get(0).getHashingAlgorithm(), hashingAlgorithmsByServerGroup.get(serverGroup.getName()));
}
doFunctionalTests(true);
for (ServerGroup serverGroup : clientsByServerGroup.keySet()) {
propertiesToSet.remove(serverGroup.getName());
}
}
private void testWithLargeKey() throws Exception {
StringBuilder sb = new StringBuilder();
for (int i= 0; i < 100; i++) {
sb.append(Long.toString(System.currentTimeMillis()));
}
String key = sb.toString();
String value = UUID.randomUUID().toString();
// set
EVCacheLatch latch = evCache.set(key, value, EVCacheLatch.Policy.ALL);
latch.await(1000, TimeUnit.MILLISECONDS);
// get
assertEquals(evCache.get(key), value);
}
private void doFunctionalTests(boolean isHashingEnabled) throws Exception {
String key1 = Long.toString(System.currentTimeMillis());
String value1 = UUID.randomUUID().toString();
// set
EVCacheLatch latch = evCache.set(key1, value1, EVCacheLatch.Policy.ALL);
latch.await(1000, TimeUnit.MILLISECONDS);
// get
assertEquals(evCache.get(key1), value1);
// replace
value1 = UUID.randomUUID().toString();
latch = evCache.replace(key1, value1, EVCacheLatch.Policy.ALL);
latch.await(1000, TimeUnit.MILLISECONDS);
// get
assertEquals(evCache.get(key1), value1);
// add a key
String key2 = Long.toString(System.currentTimeMillis());
String value2 = UUID.randomUUID().toString();
latch = evCache.add(key2, value2, null, 1000, EVCacheLatch.Policy.ALL);
latch.await(1000, TimeUnit.MILLISECONDS);
// get
assertEquals(evCache.get(key2), value2);
// appendoradd - append case
String value3 = UUID.randomUUID().toString();
if (isHashingEnabled) {
assertThrows(EVCacheException.class, () -> {
evCache.appendOrAdd(key2, value3, null, 1000, EVCacheLatch.Policy.ALL);
});
} else {
latch = evCache.appendOrAdd(key2, value3, null, 1000, EVCacheLatch.Policy.ALL);
latch.await(3000, TimeUnit.MILLISECONDS);
assertEquals(evCache.get(key2), value2 + value3);
}
// appendoradd - add case
String key3 = Long.toString(System.currentTimeMillis());
String value4 = UUID.randomUUID().toString();
if (isHashingEnabled) {
assertThrows(EVCacheException.class, () -> {
evCache.appendOrAdd(key3, value4, null, 1000, EVCacheLatch.Policy.ALL);
});
} else {
latch = evCache.appendOrAdd(key3, value4, null, 1000, EVCacheLatch.Policy.ALL);
latch.await(3000, TimeUnit.MILLISECONDS);
// get
assertEquals(evCache.get(key3), value4);
}
// append
String value5 = UUID.randomUUID().toString();
if (isHashingEnabled) {
assertThrows(EVCacheException.class, () -> {
evCache.append(key3, value5, 1000);
});
} else {
Future<Boolean> futures[] = evCache.append(key3, value5, 1000);
for (Future future : futures) {
assertTrue((Boolean) future.get());
}
// get
assertEquals(evCache.get(key3), value4 + value5);
}
String key4 = Long.toString(System.currentTimeMillis());
assertEquals(evCache.incr(key4, 1, 10, 1000), 10);
assertEquals(evCache.incr(key4, 10, 10, 1000), 20);
// decr
String key5 = Long.toString(System.currentTimeMillis());
assertEquals(evCache.decr(key5, 1, 10, 1000), 10);
assertEquals(evCache.decr(key5, 20, 10, 1000), 0);
// delete
latch = evCache.delete(key1, EVCacheLatch.Policy.ALL);
latch.await(1000, TimeUnit.MILLISECONDS);
latch = evCache.delete(key2, EVCacheLatch.Policy.ALL);
latch.await(1000, TimeUnit.MILLISECONDS);
latch = evCache.delete(key3, EVCacheLatch.Policy.ALL);
latch.await(1000, TimeUnit.MILLISECONDS);
latch = evCache.delete(key4, EVCacheLatch.Policy.ALL);
latch.await(1000, TimeUnit.MILLISECONDS);
latch = evCache.delete(key5, EVCacheLatch.Policy.ALL);
latch.await(1000, TimeUnit.MILLISECONDS);
// test expiry
String key6 = Long.toString(System.currentTimeMillis());
assertEquals(evCache.incr(key6, 1, 10, 5), 10);
Thread.sleep(5000);
assertNull(evCache.get(key6));
assertNull(evCache.get(key1));
assertNull(evCache.get(key2));
assertNull(evCache.get(key3));
assertNull(evCache.get(key4));
assertNull(evCache.get(key5));
}
public void testAll() {
try {
setupEnv();
testEVCache();
testDelete();
testAdd();
Thread.sleep(500);
// testInsertBinary();
testInsert();
int i = 0;
while (i++ < loops*1000) {
try {
testInsert();
testGet();
testGetAndTouch();
testBulk();
testBulkAndTouch();
testGetObservable();
testGetAndTouchObservable();
waitForCallbacks();
testAppendOrAdd();
testTouch();
testDelete();
testInsert();
if(i % 2 == 0) testDelete();
testAdd();
Thread.sleep(100);
} catch (Throwable e) {
log.error(e.getMessage(), e);
}
}
if (log.isDebugEnabled()) log.debug("All Done!!!. Will exit.");
System.exit(0);
} catch (Exception e) {
e.printStackTrace();
log.error(e.getMessage(), e);
}
}
public void onComplete(EVCacheOperationFuture<String> future) throws Exception {
if (log.isDebugEnabled()) log.debug("getl : key : " + future.getKey() + ", val = " + future.get());
}
}
| 19,099
| 38.70894
| 358
|
java
|
EVCache
|
EVCache-master/evcache-client/test/com/netflix/evcache/test/DIBase.java
|
package com.netflix.evcache.test;
import com.google.inject.Injector;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.archaius.config.MapConfig;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.discovery.guice.EurekaClientModule;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheLatch;
import com.netflix.evcache.EVCacheModule;
import com.netflix.evcache.EVCacheLatch.Policy;
import com.netflix.evcache.connection.DIConnectionModule;
import com.netflix.evcache.operation.EVCacheLatchImpl;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.guice.LifecycleInjectorBuilder;
import com.netflix.governator.lifecycle.LifecycleManager;
import com.netflix.spectator.nflx.SpectatorModule;
import java.util.Arrays;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import rx.Scheduler;
@SuppressWarnings("unused")
public abstract class DIBase {
private static final Logger log = LoggerFactory.getLogger(DIBase.class);
protected EVCache evCache = null;
protected Injector injector = null;
protected LifecycleManager lifecycleManager = null;
protected EVCacheClientPoolManager manager = null;
protected Properties getProps() {
String hostname = System.getenv("EC2_HOSTNAME");
Properties props = new Properties();
if(hostname == null) {
props.setProperty("eureka.datacenter", "datacenter");//change to ndc while running on desktop
props.setProperty("eureka.validateInstanceId","false");
props.setProperty("eureka.mt.connect_timeout","1");
props.setProperty("eureka.mt.read_timeout","1");
} else {
props.setProperty("eureka.datacenter", "cloud");
props.setProperty("eureka.validateInstanceId","true");
}
System.setProperty("@region", "us-east-1");
System.setProperty("@environment", "test");
System.setProperty("eureka.region", "us-east-1");
System.setProperty("eureka.environment", "test");
props.setProperty("eureka.environment", "test");
props.setProperty("eureka.region", "us-east-1");
props.setProperty("eureka.appid", "clatency");
props.setProperty("eureka.serviceUrl.default","http://${eureka.region}.discovery${eureka.environment}.netflix.net:7001/discovery/v2/");
props.setProperty("log4j.rootLogger", "DEBUG");
System.setProperty("log4j.rootLogger", "DEBUG");
props.setProperty("log4j.logger.com.netflix.evcache.test.DIBase", "DEBUG");
props.setProperty("log4j.logger.com.netflix.evcache.test.EVCacheTestDI", "DEBUG");
props.setProperty("log4j.logger.com.netflix.evcache.pool.EVCacheNodeLocator", "ERROR");
props.setProperty("log4j.logger.com.netflix.evcache.pool.EVCacheClientUtil", "DEBUG");
return props;
}
public void setupTest(Properties props) {
}
@BeforeSuite
public void setupEnv() {
Properties props = getProps();
try {
LifecycleInjectorBuilder builder = LifecycleInjector.builder();
builder.withModules(
new EurekaClientModule(),
new EVCacheModule(),
new DIConnectionModule(),
new SpectatorModule(),
new ArchaiusModule() {
protected void configureArchaius() {
bindApplicationConfigurationOverride().toInstance(MapConfig.from(props));
};
}
);
injector = builder.build().createInjector();
lifecycleManager = injector.getInstance(LifecycleManager.class);
lifecycleManager.start();
injector.getInstance(ApplicationInfoManager.class);
final EVCacheModule lib = injector.getInstance(EVCacheModule.class);
manager = injector.getInstance(EVCacheClientPoolManager.class);
} catch (Throwable e) {
e.printStackTrace();
log.error(e.getMessage(), e);
}
}
@AfterSuite
public void shutdownEnv() {
lifecycleManager.close();
}
protected EVCache.Builder getNewBuilder() {
final EVCache.Builder evCacheBuilder = injector.getInstance(EVCache.Builder.class);
if(log.isDebugEnabled()) log.debug("evCacheBuilder : " + evCacheBuilder);
return evCacheBuilder;
}
protected boolean append(int i, EVCache gCache) throws Exception {
String val = ";APP_" + i;
String key = "key_" + i;
Future<Boolean>[] status = gCache.append(key, val, 60 * 60);
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("APPEND : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if (s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean appendOrAdd(int i, EVCache gCache) throws Exception {
return appendOrAdd(i, gCache, 60 * 60);
}
protected boolean appendOrAdd(int i, EVCache gCache, int ttl) throws Exception {
String val = "val_aa_" + i;
String key = "key_" + i;
EVCacheLatch latch = gCache.appendOrAdd(key, val, null, ttl, Policy.ALL_MINUS_1);
if(log.isDebugEnabled()) log.debug("AppendOrAdd : key : " + key + "; Latch = " + latch);
boolean status = latch.await(2000, TimeUnit.MILLISECONDS);
if(log.isDebugEnabled()) log.debug("AppendOrAdd : key : " + key + "; success = " + status);
return true;
}
public boolean add(int i, EVCache gCache) throws Exception {
//String val = "This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah val_"+i;
String val = "val_add_"+i;
String key = "key_" + i;
boolean status = gCache.add(key, val, null, 60 * 60);
if(log.isDebugEnabled()) log.debug("ADD : key : " + key + "; success = " + status);
return status;
}
public boolean insert(int i, EVCache gCache) throws Exception {
//String val = "This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah val_"+i;
String val = "val_"+i;
String key = "key_" + i;
Future<Boolean>[] status = gCache.set(key, val, 60 * 60);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean replace(int i, EVCache gCache) throws Exception {
return replace(i, gCache, 60 * 60);
}
protected boolean replace(int i, EVCache gCache, int ttl) throws Exception {
String val = "val_replaced_" + i;
String key = "key_" + i;
EVCacheLatch status = gCache.replace(key, val, null, ttl, Policy.ALL);
boolean opStatus = status.await(1000, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled()) log.debug("REPLACE : key : " + key + "; success = " + opStatus + "; EVCacheLatch = " + status);
return status.getSuccessCount() > 0;
}
public boolean delete(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.delete(key);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("DELETE : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean touch(int i, EVCache gCache) throws Exception {
return touch(i, gCache, 60 * 60);
}
protected boolean touch(int i, EVCache gCache, int ttl) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.touch(key, ttl);
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("TOUCH : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if (s.get() == Boolean.FALSE) return false;
}
return true;
}
@SuppressWarnings("deprecation")
protected boolean insertUsingLatch(int i, String app) throws Exception {
String val = "val_" + i;
String key = "key_" + i;
long start = System.currentTimeMillis();
final EVCacheClient[] clients = manager.getEVCacheClientPool(app).getEVCacheClientForWrite();
final EVCacheLatch latch = new EVCacheLatchImpl(EVCacheLatch.Policy.ALL, clients.length, app);
for (EVCacheClient client : clients) {
client.set(key, val, 60 * 60, latch);
}
boolean success = latch.await(1000, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled()) log.debug("SET LATCH : key : " + key + "; Finished in " + (System.currentTimeMillis() - start) + " msec");
return success;
}
protected boolean deleteLatch(int i, String appName) throws Exception {
long start = System.currentTimeMillis();
String key = "key_" + i;
final EVCacheClient[] clients = manager.getEVCacheClientPool(appName).getEVCacheClientForWrite();
final EVCacheLatch latch = new EVCacheLatchImpl(Policy.ALL, clients.length, appName);
for (EVCacheClient client : clients) {
client.delete(key, latch);
}
latch.await(1000, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled()) log.debug("DELETE LATCH : key : " + key + "; Finished in " + (System.currentTimeMillis() - start) + " msec" + "; Latch : " + latch);
return true;
}
public String get(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key);
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
return value;
}
public String getAndTouch(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>getAndTouch(key, 60 * 60);
if(log.isDebugEnabled()) log.debug("getAndTouch : key : " + key + " val = " + value);
return value;
}
public Map<String, String> getBulk(String keys[], EVCache gCache) throws Exception {
final Map<String, String> value = gCache.<String>getBulk(keys);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
public Map<String, String> getBulkAndTouch(String keys[], EVCache gCache, int ttl) throws Exception {
final Map<String, String> value = gCache.<String>getBulkAndTouch(Arrays.asList(keys), null, ttl);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
public String getObservable(int i, EVCache gCache, Scheduler scheduler) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key, scheduler).toBlocking().value();
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
return value;
}
public String getAndTouchObservable(int i, EVCache gCache, Scheduler scheduler) throws Exception {
String key = "key_" + i;
String value = gCache.<String>getAndTouch(key, 60 * 60, scheduler).toBlocking().value();
if(log.isDebugEnabled()) log.debug("getAndTouch : key : " + key + " val = " + value);
return value;
}
class RemoteCaller implements Runnable {
EVCache gCache;
public RemoteCaller(EVCache c) {
this.gCache = c;
}
public void run() {
try {
int count = 1;
for(int i = 0; i < 100; i++) {
insert(i, gCache);
get(i, gCache);
delete(i, gCache);
}
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
}
}
| 12,604
| 41.87415
| 215
|
java
|
EVCache
|
EVCache-master/evcache-zipkin-tracing/src/test/java/com/netflix/evcache/EVCacheTracingEventListenerUnitTests.java
|
package com.netflix.evcache;
import brave.Tracing;
import com.netflix.evcache.event.EVCacheEvent;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import net.spy.memcached.CachedData;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import zipkin2.Span;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.mockito.Mockito.*;
public class EVCacheTracingEventListenerUnitTests {
List<zipkin2.Span> reportedSpans;
EVCacheTracingEventListener tracingListener;
EVCacheClient mockEVCacheClient;
EVCacheEvent mockEVCacheEvent;
@BeforeMethod
public void resetMocks() {
mockEVCacheClient = mock(EVCacheClient.class);
when(mockEVCacheClient.getServerGroupName()).thenReturn("dummyServerGroupName");
mockEVCacheEvent = mock(EVCacheEvent.class);
when(mockEVCacheEvent.getClients()).thenReturn(Arrays.asList(mockEVCacheClient));
when(mockEVCacheEvent.getCall()).thenReturn(EVCache.Call.GET);
when(mockEVCacheEvent.getAppName()).thenReturn("dummyAppName");
when(mockEVCacheEvent.getCacheName()).thenReturn("dummyCacheName");
when(mockEVCacheEvent.getEVCacheKeys())
.thenReturn(Arrays.asList(new EVCacheKey("dummyAppName", "dummyKey", "dummyCanonicalKey", null, null, null, null)));
when(mockEVCacheEvent.getStatus()).thenReturn("success");
when(mockEVCacheEvent.getDurationInMillis()).thenReturn(1L);
when(mockEVCacheEvent.getTTL()).thenReturn(0);
when(mockEVCacheEvent.getCachedData())
.thenReturn(new CachedData(1, "dummyData".getBytes(), 255));
Map<String, Object> eventAttributes = new HashMap<>();
doAnswer(
new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
Object[] arguments = invocation.getArguments();
String key = (String) arguments[0];
Object value = arguments[1];
eventAttributes.put(key, value);
return null;
}
})
.when(mockEVCacheEvent)
.setAttribute(any(), any());
doAnswer(
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] arguments = invocation.getArguments();
String key = (String) arguments[0];
return eventAttributes.get(key);
}
})
.when(mockEVCacheEvent)
.getAttribute(any());
reportedSpans = new ArrayList<>();
Tracing tracing = Tracing.newBuilder().spanReporter(reportedSpans::add).build();
tracingListener =
new EVCacheTracingEventListener(mock(EVCacheClientPoolManager.class), tracing.tracer());
}
public void verifyCommonTags(List<zipkin2.Span> spans) {
Assert.assertEquals(spans.size(), 1, "Number of expected spans are not matching");
zipkin2.Span span = spans.get(0);
Assert.assertEquals(span.kind(), Span.Kind.CLIENT, "Span Kind are not equal");
Assert.assertEquals(
span.name(), EVCacheTracingEventListener.EVCACHE_SPAN_NAME, "Cache name are not equal");
Map<String, String> tags = span.tags();
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.APP_NAME), "APP_NAME tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.CACHE_NAME_PREFIX), "CACHE_NAME_PREFIX tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.CALL), "CALL tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.SERVER_GROUPS), "SERVER_GROUPS tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.CANONICAL_KEYS), "CANONICAL_KEYS tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.STATUS), "STATUS tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.LATENCY), "LATENCY tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.DATA_TTL), "DATA_TTL tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.DATA_SIZE), "DATA_SIZE tag is missing");
}
public void verifyErrorTags(List<zipkin2.Span> spans) {
zipkin2.Span span = spans.get(0);
Map<String, String> tags = span.tags();
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.ERROR), "ERROR tag is missing");
}
@Test
public void testEVCacheListenerOnComplete() {
tracingListener.onStart(mockEVCacheEvent);
tracingListener.onComplete(mockEVCacheEvent);
verifyCommonTags(reportedSpans);
}
@Test
public void testEVCacheListenerOnError() {
tracingListener.onStart(mockEVCacheEvent);
tracingListener.onError(mockEVCacheEvent, new RuntimeException("Unexpected Error"));
verifyCommonTags(reportedSpans);
verifyErrorTags(reportedSpans);
}
}
| 5,064
| 38.88189
| 124
|
java
|
EVCache
|
EVCache-master/evcache-zipkin-tracing/src/main/java/com/netflix/evcache/EVCacheTracingEventListener.java
|
package com.netflix.evcache;
import brave.Span;
import brave.Tracer;
import com.netflix.evcache.event.EVCacheEvent;
import com.netflix.evcache.event.EVCacheEventListener;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import net.spy.memcached.CachedData;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/** Adds tracing tags for EvCache calls. */
public class EVCacheTracingEventListener implements EVCacheEventListener {
public static String EVCACHE_SPAN_NAME = "evcache";
private static Logger logger = LoggerFactory.getLogger(EVCacheTracingEventListener.class);
private static String CLIENT_SPAN_ATTRIBUTE_KEY = "clientSpanAttributeKey";
private final Tracer tracer;
public EVCacheTracingEventListener(EVCacheClientPoolManager poolManager, Tracer tracer) {
poolManager.addEVCacheEventListener(this);
this.tracer = tracer;
}
@Override
public void onStart(EVCacheEvent e) {
try {
Span clientSpan =
this.tracer.nextSpan().kind(Span.Kind.CLIENT).name(EVCACHE_SPAN_NAME).start();
// Return if tracing has been disabled
if(clientSpan.isNoop()){
return;
}
String appName = e.getAppName();
this.safeTag(clientSpan, EVCacheTracingTags.APP_NAME, appName);
String cacheNamePrefix = e.getCacheName();
this.safeTag(clientSpan, EVCacheTracingTags.CACHE_NAME_PREFIX, cacheNamePrefix);
String call = e.getCall().name();
this.safeTag(clientSpan, EVCacheTracingTags.CALL, call);
/**
* Note - e.getClients() returns a list of clients associated with the EVCacheEvent.
*
* <p>Read operation will have only 1 EVCacheClient as reading from just 1 instance of cache
* is sufficient. Write operations will have appropriate number of clients as each client will
* attempt to write to its cache instance.
*/
String serverGroup;
List<String> serverGroups = new ArrayList<>();
for (EVCacheClient client : e.getClients()) {
serverGroup = client.getServerGroupName();
if (StringUtils.isNotBlank(serverGroup)) {
serverGroups.add("\"" + serverGroup + "\"");
}
}
clientSpan.tag(EVCacheTracingTags.SERVER_GROUPS, serverGroups.stream().collect(Collectors.joining(",", "[", "]")));
/**
* Note - EVCache client creates a hash key if the given canonical key size exceeds 255
* characters.
*
* <p>There have been cases where canonical key size exceeded few megabytes. As caching client
* creates a hash of such canonical keys and optimizes the storage in the cache servers, it is
* safe to annotate hash key instead of canonical key in such cases.
*/
String hashKey;
List<String> hashKeys = new ArrayList<>();
List<String> canonicalKeys = new ArrayList<>();
for (EVCacheKey keyObj : e.getEVCacheKeys()) {
hashKey = keyObj.getHashKey();
if (StringUtils.isNotBlank(hashKey)) {
hashKeys.add("\"" + hashKey + "\"");
} else {
canonicalKeys.add("\"" + keyObj.getCanonicalKey() + "\"");
}
}
if(hashKeys.size() > 0) {
this.safeTag(clientSpan, EVCacheTracingTags.HASH_KEYS,
hashKeys.stream().collect(Collectors.joining(",", "[", "]")));
}
if(canonicalKeys.size() > 0) {
this.safeTag(clientSpan, EVCacheTracingTags.CANONICAL_KEYS,
canonicalKeys.stream().collect(Collectors.joining(",", "[", "]")));
}
/**
* Note - tracer.spanInScope(...) method stores Spans in the thread local object.
*
* <p>As EVCache write operations are asynchronous and quorum based, we are avoiding attaching
* clientSpan with tracer.spanInScope(...) method. Instead, we are storing the clientSpan as
* an object in the EVCacheEvent's attributes.
*/
e.setAttribute(CLIENT_SPAN_ATTRIBUTE_KEY, clientSpan);
} catch (Exception exception) {
logger.error("onStart exception", exception);
}
}
@Override
public void onComplete(EVCacheEvent e) {
try {
this.onFinishHelper(e, null);
} catch (Exception exception) {
logger.error("onComplete exception", exception);
}
}
@Override
public void onError(EVCacheEvent e, Throwable t) {
try {
this.onFinishHelper(e, t);
} catch (Exception exception) {
logger.error("onError exception", exception);
}
}
/**
* On throttle is not a trace event, but it is used to decide whether to throttle. We don't want
* to interfere so always return false.
*/
@Override
public boolean onThrottle(EVCacheEvent e) throws EVCacheException {
return false;
}
private void onFinishHelper(EVCacheEvent e, Throwable t) {
Object clientSpanObj = e.getAttribute(CLIENT_SPAN_ATTRIBUTE_KEY);
// Return if the previously saved Client Span is null
if (clientSpanObj == null) {
return;
}
Span clientSpan = (Span) clientSpanObj;
try {
if (t != null) {
this.safeTag(clientSpan, EVCacheTracingTags.ERROR, t.toString());
}
String status = e.getStatus();
this.safeTag(clientSpan, EVCacheTracingTags.STATUS, status);
long latency = this.getDurationInMicroseconds(e.getDurationInMillis());
clientSpan.tag(EVCacheTracingTags.LATENCY, String.valueOf(latency));
int ttl = e.getTTL();
clientSpan.tag(EVCacheTracingTags.DATA_TTL, String.valueOf(ttl));
CachedData cachedData = e.getCachedData();
if (cachedData != null) {
int cachedDataSize = cachedData.getData().length;
clientSpan.tag(EVCacheTracingTags.DATA_SIZE, String.valueOf(cachedDataSize));
}
} finally {
clientSpan.finish();
}
}
private void safeTag(Span span, String key, String value) {
if (StringUtils.isNotBlank(value)) {
span.tag(key, value);
}
}
private long getDurationInMicroseconds(long durationInMillis) {
// EVCacheEvent returns durationInMillis as -1 if endTime is not available.
if(durationInMillis == -1){
return durationInMillis;
} else {
// Since the underlying EVCacheEvent returns duration in milliseconds we already
// lost the required precision for conversion to microseconds. Multiplication
// by 1000 should suffice here.
return durationInMillis * 1000;
}
}
}
| 6,573
| 32.886598
| 121
|
java
|
EVCache
|
EVCache-master/evcache-zipkin-tracing/src/main/java/com/netflix/evcache/EVCacheTracingTags.java
|
package com.netflix.evcache;
public class EVCacheTracingTags {
public static String CACHE_NAME_PREFIX = "evcache.cache_name_prefix";
public static String APP_NAME = "evcache.app_name";
public static String STATUS = "evcache.status";
public static String LATENCY = "evcache.latency";
public static String CALL = "evcache.call";
public static String SERVER_GROUPS = "evcache.server_groups";
public static String HASH_KEYS = "evcache.hash_keys";
public static String CANONICAL_KEYS = "evcache.canonical_keys";
public static String DATA_TTL = "evcache.data_ttl";
public static String DATA_SIZE = "evcache.data_size";
public static String ERROR = "evcache.error";
}
| 706
| 43.1875
| 73
|
java
|
EVCache
|
EVCache-master/evcacheproxy/src/main/java/com/netflix/evcservice/service/StatusPage.java
|
package com.netflix.evcservice.service;
import com.google.inject.Singleton;
import java.io.PrintWriter;
import com.netflix.server.base.BaseStatusPage;
/**
* Created by senugula on 03/22/15.
*/
@Singleton
public class StatusPage extends BaseStatusPage {
private static final long serialVersionUID = 1L;
@Override
protected void getDetails(PrintWriter out, boolean htmlize) {
super.getDetails(out, htmlize);
// Add any extra status info here
}
}
| 473
| 22.7
| 64
|
java
|
EVCache
|
EVCache-master/evcacheproxy/src/main/java/com/netflix/evcache/service/EVCacheServiceModule.java
|
package com.netflix.evcache.service;
import com.google.inject.AbstractModule;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.config.ConfigurationManager;
import com.netflix.discovery.guice.EurekaModule;
import com.netflix.evcache.EVCacheModule;
import com.netflix.evcache.connection.ConnectionModule;
import com.netflix.evcache.service.resources.EVCacheRESTService;
import com.netflix.governator.ShutdownHookModule;
import com.netflix.spectator.nflx.SpectatorModule;
import com.sun.jersey.guice.JerseyServletModule;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import netflix.adminresources.resources.KaryonWebAdminModule;
public class EVCacheServiceModule extends AbstractModule {
@Override
protected void configure() {
// try {
// ConfigurationManager.loadAppOverrideProperties("evcacheproxy");
// final String env = ConfigurationManager.getConfigInstance().getString("eureka.environment", "test");
// if(env != null && env.length() > 0) {
// ConfigurationManager.loadAppOverrideProperties("evcacheproxy-"+env);
// }
// } catch (Exception e) {
// e.printStackTrace();
// }
//
//
// install(new ShutdownHookModule());
// install(new EurekaModule());
// install(new SpectatorModule());
// install(new ConnectionModule());
// install(new EVCacheModule());
// install(new KaryonWebAdminModule());
// install(new JerseyServletModule() {
// protected void configureServlets() {
// serve("/*").with(GuiceContainer.class);
// binder().bind(GuiceContainer.class).asEagerSingleton();
// bind(EVCacheRESTService.class).asEagerSingleton();
// bind(HealthCheckHandlerImpl.class).asEagerSingleton();
// }
// });
}
}
| 1,903
| 37.857143
| 114
|
java
|
EVCache
|
EVCache-master/evcacheproxy/src/main/java/com/netflix/evcache/service/HealthCheckHandlerImpl.java
|
package com.netflix.evcache.service;
import com.google.inject.Singleton;
import com.netflix.server.base.BaseHealthCheckServlet;
/**
* Created by senugula on 03/22/15.
*/
@Singleton
public class HealthCheckHandlerImpl extends BaseHealthCheckServlet {
public int getStatus() {
return 200; // TODO
}
}
| 320
| 20.4
| 68
|
java
|
EVCache
|
EVCache-master/evcacheproxy/src/main/java/com/netflix/evcache/service/StartServer.java
|
package com.netflix.evcache.service;
import java.util.HashMap;
import java.util.Map;
import javax.servlet.ServletContextEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Injector;
import com.google.inject.servlet.ServletModule;
import com.netflix.evcache.EVCacheClientLibrary;
import com.netflix.evcache.service.resources.EVCacheRESTService;
import com.netflix.evcservice.service.StatusPage;
import com.netflix.server.base.BaseHealthCheckServlet;
import com.netflix.server.base.BaseStatusPage;
import com.netflix.server.base.NFFilter;
import com.netflix.server.base.lifecycle.BaseServerLifecycleListener;
import com.sun.jersey.api.core.ResourceConfig;
import com.sun.jersey.api.core.PackagesResourceConfig;
import com.sun.jersey.guice.JerseyServletModule;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
public class StartServer extends BaseServerLifecycleListener
{
private static final Logger logger = LoggerFactory.getLogger(StartServer.class);
private static final String APP_NAME = "evcacheproxy";
private static final String CONFIG_NAME = "evcacheproxy";
/**
* Creates a new StartServer object.
*/
public StartServer() {
super(CONFIG_NAME, APP_NAME, null);
}
@Override
protected void initialize(ServletContextEvent sce) throws Exception {
Injector injector = getInjector();
injector.getInstance(EVCacheClientLibrary.class);
}
@Override
protected ServletModule getServletModule() {
return new JerseyServletModule() {
@Override
protected void configureServlets() {
logger.info("########## CONFIGURING SERVLETS ##########");
// initialize NFFilter
Map<String, String> initParams = new HashMap<String,String>();
// initParams.put(ServletContainer.JSP_TEMPLATES_BASE_PATH, "/WEB-INF/jsp");
// initParams.put(ServletContainer.FEATURE_FILTER_FORWARD_ON_404, "true");
// initParams.put("requestId.accept", "true");
// initParams.put("requestId.require", "true");
initParams.put(ResourceConfig.FEATURE_DISABLE_WADL, "true");
initParams.put(PackagesResourceConfig.PROPERTY_PACKAGES, "com.netflix.evcache.service.resources");
filter("/*").through(NFFilter.class, initParams);
filter("/healthcheck", "/status").through(NFFilter.class, initParams);
serve("/Status", "/status").with(BaseStatusPage.class);
serve("/healthcheck", "/Healthcheck").with(BaseHealthCheckServlet.class);
serve("/*").with(GuiceContainer.class, initParams);
bind(EVCacheRESTService.class).asEagerSingleton();
binder().bind(GuiceContainer.class).asEagerSingleton();
install(new EVCacheServiceModule());
}
};
}
}
| 2,968
| 39.121622
| 114
|
java
|
EVCache
|
EVCache-master/evcacheproxy/src/main/java/com/netflix/evcache/service/transcoder/RESTServiceTranscoder.java
|
package com.netflix.evcache.service.transcoder;
import net.spy.memcached.CachedData;
import net.spy.memcached.transcoders.SerializingTranscoder;
/**
* Created by senugula on 6/23/16.
*/
public class RESTServiceTranscoder extends SerializingTranscoder {
static final int COMPRESSED = 2;
public RESTServiceTranscoder() {
}
public boolean asyncDecode(CachedData d) {
return false;
}
public CachedData decode(CachedData d) {
if ((d.getFlags() & COMPRESSED) != 0) {
d = new CachedData(d.getFlags(), super.decompress(d.getData()), d.MAX_SIZE);
}
return d;
}
public CachedData encode(CachedData o) {
return o;
}
public int getMaxSize() {
return CachedData.MAX_SIZE;
}
}
| 777
| 21.228571
| 88
|
java
|
EVCache
|
EVCache-master/evcacheproxy/src/main/java/com/netflix/evcache/service/resources/EVCacheRESTService.java
|
package com.netflix.evcache.service.resources;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheException;
import com.netflix.evcache.EVCacheLatch;
import com.netflix.evcache.EVCacheLatch.Policy;
import com.netflix.evcache.service.transcoder.RESTServiceTranscoder;
import net.spy.memcached.CachedData;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
/**
* Created by senugula on 3/22/16.
*/
@Singleton
@Path("/evcrest/v1.0")
public class EVCacheRESTService {
private static final Logger logger = LoggerFactory.getLogger(EVCacheRESTService.class);
private final EVCache.Builder builder;
private final Map<String, EVCache> evCacheMap;
private final RESTServiceTranscoder evcacheTranscoder = new RESTServiceTranscoder();
@Inject
public EVCacheRESTService(EVCache.Builder builder) {
this.builder = builder;
this.evCacheMap = new HashMap<>();
}
@POST
@Path("{appId}/{key}")
@Consumes({MediaType.APPLICATION_OCTET_STREAM})
@Produces(MediaType.TEXT_PLAIN)
public Response setOperation(final InputStream in, @PathParam("appId") String pAppId, @PathParam("key") String key,
@QueryParam("ttl") String ttl, @DefaultValue("") @QueryParam("flag") String flag) {
try {
final String appId = pAppId.toUpperCase();
final byte[] bytes = IOUtils.toByteArray(in);
return setData(appId, ttl, flag, key, bytes);
} catch (EVCacheException e) {
e.printStackTrace();
return Response.serverError().build();
} catch (Throwable t) {
return Response.serverError().build();
}
}
@PUT
@Path("{appId}/{key}")
@Consumes({MediaType.APPLICATION_OCTET_STREAM})
@Produces(MediaType.TEXT_PLAIN)
public Response putOperation(final InputStream in, @PathParam("appId") String pAppId, @PathParam("key") String key,
@QueryParam("ttl") String ttl, @DefaultValue("") @QueryParam("flag") String flag) {
try {
final String appId = pAppId.toUpperCase();
final byte[] bytes = IOUtils.toByteArray(in);
return setData(appId, ttl, flag, key, bytes);
} catch (EVCacheException e) {
e.printStackTrace();
return Response.serverError().build();
} catch (Throwable t) {
return Response.serverError().build();
}
}
private Response setData(String appId, String ttl, String flag, String key, byte[] bytes) throws EVCacheException, InterruptedException {
final EVCache evcache = getEVCache(appId);
if (ttl == null) {
return Response.status(400).type("text/plain").entity("Please specify ttl for the key " + key + " as query parameter \n").build();
}
final int timeToLive = Integer.valueOf(ttl).intValue();
EVCacheLatch latch = null;
if(flag != null && flag.length() > 0) {
final CachedData cd = new CachedData(Integer.parseInt(flag), bytes, Integer.MAX_VALUE);
latch = evcache.set(key, cd, timeToLive, Policy.ALL_MINUS_1);
} else {
latch = evcache.set(key, bytes, timeToLive, Policy.ALL_MINUS_1);
}
if(latch != null) {
final boolean status = latch.await(2500, TimeUnit.MILLISECONDS);
if(status) {
return Response.ok("Set Operation for Key - " + key + " was successful. \n").build();
} else {
if(latch.getCompletedCount() > 0) {
if(latch.getSuccessCount() == 0){
return Response.serverError().build();
} else if(latch.getSuccessCount() > 0 ) {
return Response.ok("Set Operation for Key - " + key + " was successful in " + latch.getSuccessCount() + " Server Groups. \n").build();
}
} else {
return Response.serverError().build();
}
}
}
return Response.serverError().build();
}
@GET
@Path("{appId}/{key}")
@Produces({MediaType.APPLICATION_OCTET_STREAM})
public Response getOperation(@PathParam("appId") String appId,
@PathParam("key") String key) {
appId = appId.toUpperCase();
if (logger.isDebugEnabled()) logger.debug("Get for application " + appId + " for Key " + key);
try {
final EVCache evCache = getEVCache(appId);
CachedData cachedData = (CachedData) evCache.get(key, evcacheTranscoder);
if (cachedData == null) {
return Response.status(404).type("text/plain").entity("Key " + key + " Not Found in cache " + appId + "\n").build();
}
byte[] bytes = cachedData.getData();
if (bytes == null) {
return Response.status(404).type("text/plain").entity("Key " + key + " Not Found in cache " + appId + "\n").build();
} else {
return Response.status(200).type("application/octet-stream").entity(bytes).build();
}
} catch (EVCacheException e) {
e.printStackTrace();
return Response.serverError().build();
}
}
@DELETE
@Path("{appId}/{key}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces("text/plain")
public Response deleteOperation(@PathParam("appId") String appId, @PathParam("key") String key) {
if (logger.isDebugEnabled()) logger.debug("Get for application " + appId + " for Key " + key);
appId = appId.toUpperCase();
final EVCache evCache = getEVCache(appId);
try {
Future<Boolean>[] _future = evCache.delete(key);
if (_future.equals(Boolean.TRUE)) {
if (logger.isDebugEnabled()) logger.debug("set key is successful");
}
return Response.ok("Deleted Operation for Key - " + key + " was successful. \n").build();
} catch (EVCacheException e) {
e.printStackTrace();
return Response.serverError().build();
}
}
private EVCache getEVCache(String appId) {
EVCache evCache = evCacheMap.get(appId);
if (evCache != null) return evCache;
evCache = builder.setAppName(appId).build();
evCacheMap.put(appId, evCache);
return evCache;
}
}
| 6,689
| 38.585799
| 146
|
java
|
EVCache
|
EVCache-master/evcache-core/src/test/java/com/netflix/evcache/test/SimpleEVCacheTest.java
|
package com.netflix.evcache.test;
import java.util.Arrays;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import com.netflix.evcache.EVCacheSerializingTranscoder;
import net.spy.memcached.CachedData;
import net.spy.memcached.transcoders.SerializingTranscoder;
import org.apache.log4j.BasicConfigurator;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheImpl;
import com.netflix.evcache.EVCacheLatch.Policy;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPool;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import rx.schedulers.Schedulers;
import static org.testng.Assert.*;
@SuppressWarnings({"unused","deprecation"})
public class SimpleEVCacheTest extends Base {
private static final Logger log = LogManager.getLogger(SimpleEVCacheTest.class);
private static final String APP_NAME = "EVCACHE_TEST";
private static final String ALIAS_APP_NAME = "EVCACHE";
private ThreadPoolExecutor pool = null;
public static void main(String args[]) {
SimpleEVCacheTest test = new SimpleEVCacheTest();
test.setProps();
test.setupEnv();
test.testAll();
}
@BeforeSuite
public void setProps() {
BasicConfigurator.resetConfiguration();
BasicConfigurator.configure(new ConsoleAppender(new PatternLayout("%d{HH:mm:ss,SSS} [%t] %p %c %x - %m%n")));
Logger.getRootLogger().setLevel(Level.INFO);
Logger.getLogger(SimpleEVCacheTest.class).setLevel(Level.DEBUG);
Logger.getLogger(Base.class).setLevel(Level.DEBUG);
Logger.getLogger(EVCacheImpl.class).setLevel(Level.DEBUG);
Logger.getLogger(EVCacheClient.class).setLevel(Level.DEBUG);
Logger.getLogger(EVCacheClientPool.class).setLevel(Level.DEBUG);
final Properties props = getProps();
props.setProperty(APP_NAME + ".EVCacheClientPool.zoneAffinity", "false");
props.setProperty(APP_NAME + ".use.simple.node.list.provider", "true");
props.setProperty(APP_NAME + ".EVCacheClientPool.readTimeout", "1000");
props.setProperty(APP_NAME + ".EVCacheClientPool.bulkReadTimeout", "1000");
props.setProperty(APP_NAME + ".max.read.queue.length", "100");
props.setProperty(APP_NAME + ".operation.timeout", "10000");
props.setProperty(APP_NAME + ".throw.exception", "false");
// Setting properties here for testing how we can disable aliases. If there are test case
// that requires aliases, these properties should go under a special condition.
props.setProperty("EVCacheClientPoolManager." + APP_NAME + ".alias", ALIAS_APP_NAME);
props.setProperty("EVCacheClientPoolManager." + APP_NAME + ".ignoreAlias", "true");
// End alias properties
int maxThreads = 2;
final BlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(100000);
pool = new ThreadPoolExecutor(maxThreads * 4, maxThreads * 4, 30, TimeUnit.SECONDS, queue);
pool.prestartAllCoreThreads();
}
public SimpleEVCacheTest() {
}
@BeforeSuite(dependsOnMethods = { "setProps" })
public void setupClusterDetails() {
manager = EVCacheClientPoolManager.getInstance();
}
@Test public void testDisablingAlias()
{
// Ensure alias is disabled, we see "EVCACHE_TEST" instead of "EVCACHE" as we have set above.
EVCacheClientPool pool = EVCacheClientPoolManager.getInstance().getEVCacheClientPool(APP_NAME);
assertEquals(pool.getAppName(), APP_NAME);
}
public void testAll() {
try {
EVCacheClientPoolManager.getInstance().initEVCache(APP_NAME);
testDisablingAlias();
testEVCache();
int i = 1;
boolean flag = true;
while (flag) {
try {
// testAdd();
testInsert();
// testAppend();
testGet();
testGetWithPolicy();
testEVCacheTranscoder();
// testGetObservable();
// testGetAndTouch();
// testBulk();
// testBulkAndTouch();
// testAppendOrAdd();
// testCompletableFutureGet();
// testCompletableFutureBulk();
// if(i++ % 5 == 0) testDelete();
//Thread.sleep(3000);
} catch (Exception e) {
log.error(e);
}
//Thread.sleep(3000);
}
} catch (Exception e) {
log.error(e);
}
}
public void testGetForKey(String key) throws Exception {
String value = evCache.<String>get(key);
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
}
@BeforeSuite
public void setupEnv() {
super.setupEnv();
}
protected EVCache evCache = null;
@Test
public void testEVCache() {
this.evCache = (new EVCache.Builder()).setAppName("EVCACHE_TEST").setCachePrefix(null).enableRetry().build();
assertNotNull(evCache);
}
@Test(dependsOnMethods = { "testEVCache" })
public void testAdd() throws Exception {
for (int i = 0; i < 10; i++) {
add(i, evCache);
}
}
@Test(dependsOnMethods = { "testAdd" })
public void testInsert() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(insert(i, evCache), "SET : Following Index failed - " + i + " for evcache - " + evCache);
//insert(i, evCache);
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testAppend() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(append(i, evCache), "APPEND : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testAppend" })
public void testGet() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = get(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testCompletableFutureGet() throws Exception {
for (int i = 0; i < 1000; i++) {
final String val = completableFutureGet(i, evCache);
//assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGet" })
public void testGetWithPolicy() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getWithPolicy(i, evCache, Policy.QUORUM);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetWithPolicy" })
public void testGetAndTouch() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getAndTouch(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testBulk() throws Exception {
final String[] keys = new String[12];
for (int i = 0; i < keys.length; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulk(keys, evCache);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testCompletableFutureBulk() throws Exception {
final String[] keys = new String[12];
for (int i = 0; i < keys.length; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getAsyncBulk(keys, evCache);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
@Test(dependsOnMethods = { "testBulk" })
public void testBulkAndTouch() throws Exception {
final String[] keys = new String[10];
for (int i = 0; i < 10; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulkAndTouch(keys, evCache, 60 * 60);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
public void testAppendOrAdd() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(appendOrAdd(i, evCache));
}
}
@Test(dependsOnMethods = { "testBulkAndTouch" })
public void testReplace() throws Exception {
for (int i = 0; i < 10; i++) {
replace(i, evCache);
}
}
@Test(dependsOnMethods = { "testReplace" })
public void testDelete() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(delete(i, evCache), "DELETE : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testDelete" })
public void testInsertAsync() throws Exception {
for (int i = 0; i < 10; i++) {
boolean flag = insertAsync(i, evCache);
assertTrue(flag, "SET ASYNC : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testInsertAsync" })
public void testTouch() throws Exception {
for (int i = 0; i < 10; i++) {
touch(i, evCache, 1000);
String val = get(i, evCache);
assertTrue(val != null);
}
}
public boolean insertAsync(int i, EVCache gCache) throws Exception {
// String val = "This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah.This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah .This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah
// blah blah blah blah blah blah blah blah blah blah blah val_"
// + i;
String val = "val_" + i;
String key = "key_" + i;
Future<Boolean>[] statuses = gCache.set(key, val, 24 * 60 * 60);
for(Future<Boolean> status : statuses) {
assertTrue(status.get(), "SET ASYNC : Following Index failed - " + i + " for evcache - " + evCache);
}
pool.submit(new StatusChecker(key, statuses));
return true;
}
@Test(dependsOnMethods = { "testTouch" })
public void testInsertLatch() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(insertUsingLatch(i, "EVCACHE"));
}
}
@Test(dependsOnMethods = { "testInsertLatch" })
public void testDeleteLatch() throws Exception {
for (int i = 0; i < 10; i++) {
deleteLatch(i, "EVCACHE");
}
}
public void testGetObservable() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getObservable(i, evCache, Schedulers.computation());
// Observable<String> obs = evCache.<String> observeGet(key);
// obs.doOnNext(new OnNextHandler(key)).doOnError(new OnErrorHandler(key)).subscribe();
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testEVCacheTranscoder() throws Exception {
EVCacheSerializingTranscoder evcacheTranscoder = new EVCacheSerializingTranscoder();
SerializingTranscoder serializingTranscoder = new SerializingTranscoder();
// long string to trigger compression
String val = "val_01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789";
CachedData evCachedData = evcacheTranscoder.encode(val);
CachedData serializingCachedData = serializingTranscoder.encode(val);
assertTrue(Arrays.equals(evCachedData.getData(), serializingCachedData.getData()), "cacheData same" + evCachedData.toString());
if(log.isDebugEnabled()) log.debug("EVCacheTranscoder result equal to SerializingTranscoder: " + Arrays.equals(evCachedData.getData(), serializingCachedData.getData()));
}
class StatusChecker implements Runnable {
Future<Boolean>[] status;
String key;
public StatusChecker(String key, Future<Boolean>[] status) {
this.status = status;
this.key = key;
}
public void run() {
try {
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get());
}
} catch (Exception e) {
log.error(e);
}
}
}
}
| 13,769
| 37.144044
| 798
|
java
|
EVCache
|
EVCache-master/evcache-core/src/test/java/com/netflix/evcache/test/Base.java
|
package com.netflix.evcache.test;
import java.util.Arrays;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.apache.log4j.Appender;
import org.apache.log4j.BasicConfigurator;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Layout;
import org.apache.log4j.Level;
import org.apache.log4j.PatternLayout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheLatch;
import com.netflix.evcache.EVCacheLatch.Policy;
import com.netflix.evcache.operation.EVCacheLatchImpl;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import rx.Scheduler;
@SuppressWarnings("unused")
public abstract class Base {
static {
BasicConfigurator.configure();
final Layout LAYOUT = new PatternLayout("%d{ISO8601} %-5p [%c{1}:%M:%L] %m%n");
final Appender STDOUT = new ConsoleAppender(LAYOUT, ConsoleAppender.SYSTEM_OUT);
final org.apache.log4j.Logger ROOT_LOGGER = org.apache.log4j.Logger.getRootLogger();
ROOT_LOGGER.removeAllAppenders();
ROOT_LOGGER.setLevel(Level.WARN);
ROOT_LOGGER.addAppender(STDOUT);
}
private static final Logger log = LoggerFactory.getLogger(Base.class);
protected EVCache evCache = null;
protected EVCacheClientPoolManager manager = null;
protected Properties props = null;
protected Properties getProps() {
if(props != null) return props;
props = new Properties();
initProps();
return props;
}
protected void initProps() {
String hostname = System.getenv("EC2_HOSTNAME");
if(hostname == null) {
props.setProperty("eureka.datacenter", "datacenter");//change to ndc while running on desktop
props.setProperty("eureka.validateInstanceId","false");
props.setProperty("eureka.mt.connect_timeout","1");
props.setProperty("eureka.mt.read_timeout","1");
} else {
props.setProperty("eureka.datacenter", "cloud");
props.setProperty("eureka.validateInstanceId","true");
}
props.setProperty("eureka.environment", "test");
props.setProperty("eureka.region", "us-east-1");
props.setProperty("eureka.appid", "clatency");
props.setProperty("log4j.logger.com.netflix.evcache.pool.EVCacheNodeLocator", "ERROR");
props.setProperty("log4j.logger.com.netflix.evcache.pool.EVCacheClientUtil", "ERROR");
}
@BeforeSuite
public void setupEnv() {
Properties props = getProps();
try {
for(Entry<Object, Object> prop : props.entrySet()) {
System.setProperty(prop.getKey().toString(), prop.getValue().toString());
}
} catch (Throwable e) {
e.printStackTrace();
log.error(e.getMessage(), e);
}
}
@AfterSuite
public void shutdown() {
manager.shutdown();
}
protected EVCache.Builder getNewBuilder() {
final EVCache.Builder evCacheBuilder = new EVCache.Builder();
if(log.isDebugEnabled()) log.debug("evCacheBuilder : " + evCacheBuilder);
return evCacheBuilder;
}
protected boolean append(int i, EVCache gCache) throws Exception {
String val = ";APP_" + i;
String key = "key_" + i;
Future<Boolean>[] status = gCache.append(key, val, 60 * 60);
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("APPEND : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if (s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean appendOrAdd(int i, EVCache gCache) throws Exception {
return appendOrAdd(i, gCache, 60 * 60);
}
protected boolean appendOrAdd(int i, EVCache gCache, int ttl) throws Exception {
String val = "val_aa_" + i;
String key = "key_" + i;
EVCacheLatch latch = gCache.appendOrAdd(key, val, null, ttl, Policy.ALL_MINUS_1);
if(log.isDebugEnabled()) log.debug("AppendOrAdd : key : " + key + "; Latch = " + latch);
boolean status = latch.await(2000, TimeUnit.MILLISECONDS);
if(log.isDebugEnabled()) log.debug("AppendOrAdd : key : " + key + "; success = " + status);
return true;
}
public boolean add(int i, EVCache gCache) throws Exception {
//String val = "This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah val_"+i;
String val = "val_add_"+System.currentTimeMillis();
String key = "key_" + i;
boolean status = gCache.add(key, val, null, 60 * 60);
if(log.isDebugEnabled()) log.debug("ADD : key : " + key + "; success = " + status);
return status;
}
public boolean insert(int i, EVCache gCache) throws Exception {
//String val = "This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah val_"+i;
String val = "val_01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"+i;
String key = "key_" + i;
Future<Boolean>[] status = gCache.set(key, val, 60 * 60);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean replace(int i, EVCache gCache) throws Exception {
return replace(i, gCache, 60 * 60);
}
protected boolean replace(int i, EVCache gCache, int ttl) throws Exception {
String val = "val_replaced_" + i;
String key = "key_" + i;
EVCacheLatch status = gCache.replace(key, val, null, ttl, Policy.ALL);
boolean opStatus = status.await(1000, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled()) log.debug("REPLACE : key : " + key + "; success = " + opStatus + "; EVCacheLatch = " + status);
return status.getSuccessCount() > 0;
}
public boolean delete(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.delete(key);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("DELETE : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean touch(int i, EVCache gCache) throws Exception {
return touch(i, gCache, 60 * 60);
}
protected boolean touch(int i, EVCache gCache, int ttl) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.touch(key, ttl);
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("TOUCH : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if (s.get() == Boolean.FALSE) return false;
}
return true;
}
@SuppressWarnings("deprecation")
protected boolean insertUsingLatch(int i, String app) throws Exception {
String val = "val_" + i;
String key = "key_" + i;
long start = System.currentTimeMillis();
final EVCacheClient[] clients = manager.getEVCacheClientPool(app).getEVCacheClientForWrite();
final EVCacheLatch latch = new EVCacheLatchImpl(EVCacheLatch.Policy.ALL, clients.length, app);
for (EVCacheClient client : clients) {
client.set(key, val, 60 * 60, latch);
}
boolean success = latch.await(1000, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled()) log.debug("SET LATCH : key : " + key + "; Finished in " + (System.currentTimeMillis() - start) + " msec");
return success;
}
protected boolean deleteLatch(int i, String appName) throws Exception {
long start = System.currentTimeMillis();
String key = "key_" + i;
final EVCacheClient[] clients = manager.getEVCacheClientPool(appName).getEVCacheClientForWrite();
final EVCacheLatch latch = new EVCacheLatchImpl(Policy.ALL, clients.length, appName);
for (EVCacheClient client : clients) {
client.delete(key, latch);
}
latch.await(1000, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled()) log.debug("DELETE LATCH : key : " + key + "; Finished in " + (System.currentTimeMillis() - start) + " msec" + "; Latch : " + latch);
return true;
}
public String get(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key);
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
return value;
}
public String completableFutureGet(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
gCache.<String>getAsync(key).handle((data, ex) -> {
System.out.println(data);
return data;
});
/*
String val = value.get();
if(log.isDebugEnabled()) log.debug("get : key : " + key
+ " completableFuture value = " + value
+ " actual value = " + val);
return val;
*/
return null;
}
public String getWithPolicy(int i, EVCache gCache, Policy policy) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key, null, policy);
if(log.isDebugEnabled()) log.debug("get with Policy : key : " + key + " val = " + value);
return value;
}
public String getAndTouch(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>getAndTouch(key, 60 * 60);
if(log.isDebugEnabled()) log.debug("getAndTouch : key : " + key + " val = " + value);
return value;
}
public Map<String, String> getBulk(String keys[], EVCache gCache) throws Exception {
final Map<String, String> value = gCache.<String>getBulk(keys);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
public Map<String, String> getAsyncBulk(String keys[], EVCache gCache) throws Exception {
final CompletableFuture<Map<String, String>> value = gCache.<String>getAsyncBulk(keys);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value.get();
}
public Map<String, String> getBulkAndTouch(String keys[], EVCache gCache, int ttl) throws Exception {
final Map<String, String> value = gCache.<String>getBulkAndTouch(Arrays.asList(keys), null, ttl);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
public String getObservable(int i, EVCache gCache, Scheduler scheduler) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key, scheduler).toBlocking().value();
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
return value;
}
public String getAndTouchObservable(int i, EVCache gCache, Scheduler scheduler) throws Exception {
String key = "key_" + i;
String value = gCache.<String>getAndTouch(key, 60 * 60, scheduler).toBlocking().value();
if(log.isDebugEnabled()) log.debug("getAndTouch : key : " + key + " val = " + value);
return value;
}
class RemoteCaller implements Runnable {
EVCache gCache;
public RemoteCaller(EVCache c) {
this.gCache = c;
}
public void run() {
try {
int count = 1;
for(int i = 0; i < 100; i++) {
insert(i, gCache);
get(i, gCache);
delete(i, gCache);
}
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
}
}
| 12,670
| 40.544262
| 215
|
java
|
EVCache
|
EVCache-master/evcache-core/src/test/java/com/netflix/evcache/test/MockEVCacheTest.java
|
package com.netflix.evcache.test;
import static org.mockito.Matchers.anyCollection;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.Future;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheException;
import com.netflix.evcache.operation.EVCacheOperationFuture;
import rx.functions.Action1;
public class MockEVCacheTest {
protected EVCache evCache = null;
private static final Logger log = LoggerFactory.getLogger(MockEVCacheTest.class);
private int loops = 10;
public MockEVCacheTest() {
}
@Test
public void testEVCache() {
this.evCache = new DummyEVCacheImpl().getDummyCache();
assertNotNull(evCache);
}
public boolean insert(int i, EVCache gCache) throws Exception {
String val = "val_"+i;
String key = "key_" + i;
Future<Boolean>[] status = gCache.set(key, val, 24 * 60 * 60);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
public boolean delete(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.delete(key);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("DELETE : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean touch(int i, EVCache gCache) throws Exception {
return touch(i, gCache, 24 * 60 * 60);
}
protected boolean touch(int i, EVCache gCache, int ttl) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.touch(key, ttl);
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("TOUCH : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if (s.get() == Boolean.FALSE) return false;
}
return true;
}
public String get(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key);
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
return value;
}
public String getAndTouch(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>getAndTouch(key, 24 * 60 * 60);
if(log.isDebugEnabled()) log.debug("getAndTouch : key : " + key + " val = " + value);
return value;
}
public Map<String, String> getBulk(String keys[], EVCache gCache) throws Exception {
final Map<String, String> value = gCache.<String>getBulk(keys);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
public Map<String, String> getBulkAndTouch(String keys[], EVCache gCache, int ttl) throws Exception {
final Map<String, String> value = gCache.<String>getBulkAndTouch(Arrays.asList(keys), null, ttl);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
@Test(dependsOnMethods = { "testEVCache" })
public void testInsert() throws Exception {
for (int i = 0; i < loops; i++) {
assertTrue(insert(i, evCache));
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testGet() throws Exception {
for (int i = 0; i < loops; i++) {
final String val = get(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGet" })
public void testGetAndTouch() throws Exception {
for (int i = 0; i < loops; i++) {
final String val = getAndTouch(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testBulk() throws Exception {
final String[] keys = new String[loops];
for (int i = 0; i < loops; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulk(keys, evCache);
assertNotNull(vals);
for (int i = 0; i < keys.length; i++) {
String key = keys[i];
String val = vals.get(key);
if (log.isDebugEnabled()) log.debug("key " + key + " returned val " + val);
}
}
@Test(dependsOnMethods = { "testBulk" })
public void testBulkAndTouch() throws Exception {
final String[] keys = new String[loops];
for (int i = 0; i < loops; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulkAndTouch(keys, evCache, 24 * 60 * 60);
assertNotNull(vals);
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
if (val == null) {
if (log.isDebugEnabled()) log.debug("key " + key + " returned null");
} else {
assertTrue(val.equals("val_" + i));
}
}
}
@Test(dependsOnMethods = { "testBulkAndTouch" })
public void testDelete() throws Exception {
for (int i = 0; i < loops; i++) {
delete(i, evCache);
}
}
public void onComplete(EVCacheOperationFuture<String> future) throws Exception {
if (log.isDebugEnabled()) log.debug("getl : key : " + future.getKey() + ", val = " + future.get());
}
static class OnErrorHandler implements Action1<Throwable> {
private final String key;
public OnErrorHandler(String key) {
this.key = key;
}
@Override
public void call(Throwable t1) {
if (log.isDebugEnabled()) log.debug("Could not get value for key: " + key + "; Exception is ", t1);
}
}
static class OnNextHandler implements Action1<String> {
private final String key;
public OnNextHandler(String key) {
this.key = key;
}
@Override
public void call(String val) {
if (log.isDebugEnabled()) log.debug("Observable : key " + key + "; val = " + val);
}
}
/**
* Dummy Cache used for debugging purpose (simple way to disable cache)
*/
private static class DummyEVCacheImpl {
private final EVCache cache;
@SuppressWarnings("unchecked")
public DummyEVCacheImpl() {
cache = mock(EVCache.class);
try {
when(cache.set(anyString(), anyObject(), anyInt())).thenReturn(new Future[0]);
when(cache.get(anyString())).thenReturn("");
when(cache.getAndTouch(anyString(), anyInt())).thenReturn("");
when(cache.getBulk(anyCollection())).thenReturn(Collections.emptyMap());
when(cache.delete(anyString())).thenReturn(new Future[0]);
} catch (EVCacheException e) {
log.error("Unable to create mock EVCache", e);
}
}
public EVCache getDummyCache() {
return cache;
}
}
}
| 7,762
| 33.811659
| 130
|
java
|
EVCache
|
EVCache-master/evcache-core/src/test/java/com/netflix/evcache/test/SimpleEurekaEVCacheTest.java
|
package com.netflix.evcache.test;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.log4j.Level;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheImpl;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPool;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import rx.schedulers.Schedulers;
@SuppressWarnings({"unused","deprecation"})
public class SimpleEurekaEVCacheTest extends Base {
private static final Logger log = LoggerFactory.getLogger(SimpleEurekaEVCacheTest.class);
private ThreadPoolExecutor pool = null;
public static void main(String args[]) {
SimpleEurekaEVCacheTest test = new SimpleEurekaEVCacheTest();
test.setProps();
test.testAll();
}
@BeforeSuite
public void setProps() {
org.apache.log4j.Logger.getLogger(SimpleEurekaEVCacheTest.class).setLevel(Level.DEBUG);
org.apache.log4j.Logger.getLogger(Base.class).setLevel(Level.DEBUG);
org.apache.log4j.Logger.getLogger(EVCacheImpl.class).setLevel(Level.ERROR);
org.apache.log4j.Logger.getLogger(EVCacheClient.class).setLevel(Level.ERROR);
org.apache.log4j.Logger.getLogger(EVCacheClientPool.class).setLevel(Level.ERROR);
System.setProperty("evcache.use.simple.node.list.provider", "true");
System.setProperty("EVCACHE_AB.EVCacheClientPool.readTimeout", "100000");
System.setProperty("EVCACHE_AB.EVCacheClientPool.bulkReadTimeout", "10000");
System.setProperty("EVCACHE_AB.max.read.queue.length", "100");
System.setProperty("EVCACHE_AB.operation.timeout", "10000");
System.setProperty("EVCACHE_AB.throw.exception", "false");
System.setProperty("EVCACHE_AB.chunk.data", "false");
System.setProperty("NETFLIX_ENVIRONMENT", "test");
System.setProperty("EC2_REGION", "us-east-1");
System.setProperty("evcache.thread.daemon", "true");
int maxThreads = 2;
final BlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(100000);
pool = new ThreadPoolExecutor(maxThreads * 4, maxThreads * 4, 30, TimeUnit.SECONDS, queue);
pool.prestartAllCoreThreads();
}
public SimpleEurekaEVCacheTest() {
}
@BeforeSuite(dependsOnMethods = { "setProps" })
public void setupClusterDetails() {
manager = EVCacheClientPoolManager.getInstance();
}
public void testAll() {
try {
setupClusterDetails();
EVCacheClientPoolManager.getInstance().initEVCache("EVCACHE_AB");
testEVCache();
int i = 1;
boolean flag = true;
while (flag) {
try {
testAdd();
testInsert();
testInsertAsync();
//// testAppend();
testGet();
testGetObservable();
testGetAndTouch();
testBulk();
testBulkAndTouch();
testAppendOrAdd();
testCompletableFutureGet();
testCompletableFutureBulk();
if(i++ % 5 == 0) testDelete();
Thread.sleep(1000);
if (i > 100) break;
} catch (Exception e) {
log.error("Exception", e);
}
//Thread.sleep(3000);
}
Thread.sleep(100);
} catch (Exception e) {
log.error("Exception", e);
}
shutdown();
}
public void testGetForKey(String key) throws Exception {
String value = evCache.<String>get(key);
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
}
@BeforeSuite
public void setupEnv() {
}
protected EVCache evCache = null;
@Test
public void testEVCache() {
this.evCache = (new EVCache.Builder()).setAppName("EVCACHE_AB").setCachePrefix(null).enableRetry().build();
assertNotNull(evCache);
}
@Test(dependsOnMethods = { "testEVCache" })
public void testAdd() throws Exception {
for (int i = 0; i < 10; i++) {
add(i, evCache);
}
}
@Test(dependsOnMethods = { "testAdd" })
public void testInsert() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(insert(i, evCache), "SET : Following Index failed - " + i + " for evcache - " + evCache);
insert(i, evCache);
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testAppend() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(append(i, evCache), "APPEND : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testAppend" })
public void testGet() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = get(i, evCache);
// assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testCompletableFutureGet() throws Exception {
for (int i = 0; i < 1000; i++) {
final String val = completableFutureGet(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testCompletableFutureBulk() throws Exception {
final String[] keys = new String[12];
for (int i = 0; i < keys.length; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getAsyncBulk(keys, evCache);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
@Test(dependsOnMethods = { "testGet" })
public void testGetAndTouch() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getAndTouch(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testBulk() throws Exception {
final String[] keys = new String[12];
for (int i = 0; i < keys.length; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulk(keys, evCache);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
@Test(dependsOnMethods = { "testBulk" })
public void testBulkAndTouch() throws Exception {
final String[] keys = new String[10];
for (int i = 0; i < 10; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulkAndTouch(keys, evCache, 60 * 60);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
public void testAppendOrAdd() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(appendOrAdd(i, evCache));
}
}
@Test(dependsOnMethods = { "testBulkAndTouch" })
public void testReplace() throws Exception {
for (int i = 0; i < 10; i++) {
replace(i, evCache);
}
}
@Test(dependsOnMethods = { "testReplace" })
public void testDelete() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(delete(i, evCache), "DELETE : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testDelete" })
public void testInsertAsync() throws Exception {
for (int i = 0; i < 10; i++) {
boolean flag = insertAsync(i, evCache);
if(log.isDebugEnabled()) log.debug("SET : async : i: " + i + " flag = " + flag);
assertTrue(flag, "SET ASYNC : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testInsertAsync" })
public void testTouch() throws Exception {
for (int i = 0; i < 10; i++) {
touch(i, evCache, 1000);
String val = get(i, evCache);
assertTrue(val != null);
}
}
public boolean insertAsync(int i, EVCache gCache) throws Exception {
// String val = "This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah.This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah .This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah
// blah blah blah blah blah blah blah blah blah blah blah val_"
// + i;
String val = "val_" + i;
String key = "key_" + i;
Future<Boolean>[] statuses = gCache.set(key, val, 24 * 60 * 60);
// for(Future<Boolean> status : statuses) {
// assertTrue(status.get(), "SET ASYNC : Following Index failed - " + i + " for evcache - " + evCache);
// }
// pool.submit(new StatusChecker(key, statuses));
return true;
}
@Test(dependsOnMethods = { "testTouch" })
public void testInsertLatch() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(insertUsingLatch(i, "EVCACHE"));
}
}
@Test(dependsOnMethods = { "testInsertLatch" })
public void testDeleteLatch() throws Exception {
for (int i = 0; i < 10; i++) {
deleteLatch(i, "EVCACHE");
}
}
public void testGetObservable() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getObservable(i, evCache, Schedulers.computation());
// Observable<String> obs = evCache.<String> observeGet(key);
// obs.doOnNext(new OnNextHandler(key)).doOnError(new OnErrorHandler(key)).subscribe();
}
}
class StatusChecker implements Runnable {
Future<Boolean>[] status;
String key;
public StatusChecker(String key, Future<Boolean>[] status) {
this.status = status;
this.key = key;
}
public void run() {
try {
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get());
}
} catch (Exception e) {
log.error("Exception", e);
}
}
}
@AfterSuite
public void shutdown() {
pool.shutdown();
super.shutdown();
}
}
| 11,595
| 35.2375
| 798
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheTranscoder.java
|
package com.netflix.evcache;
import com.netflix.evcache.util.EVCacheConfig;
import net.spy.memcached.CachedData;
public class EVCacheTranscoder extends EVCacheSerializingTranscoder {
public EVCacheTranscoder() {
this(EVCacheConfig.getInstance().getPropertyRepository().get("default.evcache.max.data.size", Integer.class).orElse(20 * 1024 * 1024).get());
}
public EVCacheTranscoder(int max) {
this(max, EVCacheConfig.getInstance().getPropertyRepository().get("default.evcache.compression.threshold", Integer.class).orElse(120).get());
}
public EVCacheTranscoder(int max, int compressionThreshold) {
super(max);
setCompressionThreshold(compressionThreshold);
}
@Override
public boolean asyncDecode(CachedData d) {
return super.asyncDecode(d);
}
@Override
public Object decode(CachedData d) {
return super.decode(d);
}
@Override
public CachedData encode(Object o) {
if (o != null && o instanceof CachedData) return (CachedData) o;
return super.encode(o);
}
}
| 1,090
| 26.974359
| 149
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheInternal.java
|
package com.netflix.evcache;
import com.netflix.evcache.operation.EVCacheItem;
import com.netflix.evcache.operation.EVCacheItemMetaData;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import net.spy.memcached.CachedData;
import net.spy.memcached.MemcachedNode;
import net.spy.memcached.transcoders.Transcoder;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Future;
public interface EVCacheInternal extends EVCache {
EVCacheItem<CachedData> metaGet(String key, Transcoder<CachedData> tc, boolean isOriginalKeyHashed) throws EVCacheException;
Map<MemcachedNode, CachedValues> metaGetPerClient(String key, Transcoder<CachedData> tc, boolean isOriginalKeyHashed) throws EVCacheException;
EVCacheItemMetaData metaDebug(String key, boolean isOriginalKeyHashed) throws EVCacheException;
Map<MemcachedNode, EVCacheItemMetaData> metaDebugPerClient(String key, boolean isOriginalKeyHashed) throws EVCacheException;
Future<Boolean>[] delete(String key, boolean isOriginalKeyHashed) throws EVCacheException;
EVCacheLatch addOrSetToWriteOnly(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy) throws EVCacheException;
EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, List<String> serverGroups) throws EVCacheException;
EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, String serverGroup) throws EVCacheException;
EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, String serverGroupName, List<String> destinationIps) throws EVCacheException;
KeyHashedState isKeyHashed(String appName, String serverGroup);
public enum KeyHashedState {
YES,
NO,
MAYBE
}
public static class CachedValues {
private final String key;
private final CachedData data;
private EVCacheItemMetaData itemMetaData;
public CachedValues(String key, CachedData data, EVCacheItemMetaData itemMetaData) {
this.key = key;
this.data = data;
this.itemMetaData = itemMetaData;
}
public String getKey() {
return key;
}
public CachedData getData() {
return data;
}
public EVCacheItemMetaData getEVCacheItemMetaData() {
return itemMetaData;
}
}
public class Builder extends EVCache.Builder {
public Builder() {
super();
}
@Override
protected EVCache newImpl(String appName, String cachePrefix, int ttl, Transcoder<?> transcoder, boolean serverGroupRetry, boolean enableExceptionThrowing, EVCacheClientPoolManager poolManager) {
return new EVCacheInternalImpl(appName, cachePrefix, ttl, transcoder, serverGroupRetry, enableExceptionThrowing, poolManager);
}
}
}
| 3,023
| 38.272727
| 203
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheClientPoolConfigurationProperties.java
|
package com.netflix.evcache;
import java.time.Duration;
public class EVCacheClientPoolConfigurationProperties {
/**
* Prefix to be applied to keys.
*/
private String keyPrefix;
/**
* Time-to-live in seconds.
*/
private Duration timeToLive;
/**
* Whether or not retry is to be enabled.
*/
private Boolean retryEnabled = true;
/**
* Whether or not exception throwing is to be enabled.
*/
private Boolean exceptionThrowingEnabled = false;
public EVCacheClientPoolConfigurationProperties() {
this.keyPrefix = "";
this.timeToLive = Duration.ofSeconds(900);
this.retryEnabled = true;
this.exceptionThrowingEnabled = false;
}
public String getKeyPrefix() {
return keyPrefix;
}
public void setKeyPrefix(String keyPrefix) {
this.keyPrefix = keyPrefix;
}
public Duration getTimeToLive() {
return timeToLive;
}
public void setTimeToLive(Duration timeToLive) {
this.timeToLive = timeToLive;
}
public Boolean getRetryEnabled() {
return retryEnabled;
}
public void setRetryEnabled(Boolean retryEnabled) {
this.retryEnabled = retryEnabled;
}
public Boolean getExceptionThrowingEnabled() {
return exceptionThrowingEnabled;
}
public void setExceptionThrowingEnabled(Boolean exceptionThrowingEnabled) {
this.exceptionThrowingEnabled = exceptionThrowingEnabled;
}
}
| 1,386
| 20.015152
| 77
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheKey.java
|
package com.netflix.evcache;
import java.util.HashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.util.KeyHasher;
import com.netflix.evcache.util.KeyHasher.HashingAlgorithm;
public class EVCacheKey {
private static final Logger log = LoggerFactory.getLogger(EVCacheKey.class);
private final String appName;
private final HashingAlgorithm hashingAlgorithmAtAppLevel;
private final Property<Boolean> shouldEncodeHashKeyAtAppLevel;
private final Property<Integer> maxDigestBytesAtAppLevel;
private final Property<Integer> maxHashLengthAtAppLevel;
private final String key;
private final String canonicalKey;
private String canonicalKeyForDuet;
// Note that this we cache hashed keys based on Hashing Algorithm alone, but not based on other hashing properties
// like max.hash.length. So changing max.hash.length alone would not necessarily trigger hash recalculation, but
// one would have to change the hashing algorithm in order to having hashing properties taken into account.
// This is to make such a hashing property change very obvious and not subtle.
private final Map<String, String> hashedKeysByAlgorithm;
private final Map<String, String> hashedKeysByAlgorithmForDuet;
private final String encoder;
public EVCacheKey(String appName, String key, String canonicalKey, HashingAlgorithm hashingAlgorithmAtAppLevel, Property<Boolean> shouldEncodeHashKeyAtAppLevel, Property<Integer> maxDigestBytesAtAppLevel, Property<Integer> maxHashLengthAtAppLevel) {
this(appName, key, canonicalKey, hashingAlgorithmAtAppLevel, shouldEncodeHashKeyAtAppLevel, maxDigestBytesAtAppLevel, maxHashLengthAtAppLevel, null);
}
public EVCacheKey(String appName, String key, String canonicalKey, HashingAlgorithm hashingAlgorithmAtAppLevel, Property<Boolean> shouldEncodeHashKeyAtAppLevel, Property<Integer> maxDigestBytesAtAppLevel, Property<Integer> maxHashLengthAtAppLevel, String encoder) {
super();
this.appName = appName;
this.key = key;
this.canonicalKey = canonicalKey;
this.hashingAlgorithmAtAppLevel = hashingAlgorithmAtAppLevel;
this.shouldEncodeHashKeyAtAppLevel = shouldEncodeHashKeyAtAppLevel;
this.maxDigestBytesAtAppLevel = maxDigestBytesAtAppLevel;
this.maxHashLengthAtAppLevel = maxHashLengthAtAppLevel;
this.encoder = encoder;
hashedKeysByAlgorithm = new HashMap<>();
hashedKeysByAlgorithmForDuet = new HashMap<>();
}
public String getKey() {
return key;
}
@Deprecated
public String getCanonicalKey() {
return canonicalKey;
}
public String getCanonicalKey(boolean isDuet) {
return isDuet ? getCanonicalKeyForDuet() : canonicalKey;
}
private String getCanonicalKeyForDuet() {
if (null == canonicalKeyForDuet) {
final int duetKeyLength = appName.length() + 1 + canonicalKey.length();
canonicalKeyForDuet = new StringBuilder(duetKeyLength).append(appName).append(':').append(canonicalKey).toString();
if (log.isDebugEnabled()) log.debug("canonicalKeyForDuet : " + canonicalKeyForDuet);
}
return canonicalKeyForDuet;
}
@Deprecated
public String getHashKey() {
return getHashKey(hashingAlgorithmAtAppLevel, null == shouldEncodeHashKeyAtAppLevel ? null : shouldEncodeHashKeyAtAppLevel.get(), null == maxDigestBytesAtAppLevel ? null : maxDigestBytesAtAppLevel.get(), null == maxHashLengthAtAppLevel ? null : maxHashLengthAtAppLevel.get(), encoder);
}
// overlays app level hashing and client level hashing
public String getHashKey(boolean isDuet, HashingAlgorithm hashingAlgorithm, Boolean shouldEncodeHashKey, Integer maxDigestBytes, Integer maxHashLength, String baseEnoder) {
if (hashingAlgorithm == HashingAlgorithm.NO_HASHING) {
return null;
}
if (null == hashingAlgorithm) {
hashingAlgorithm = hashingAlgorithmAtAppLevel;
}
if (null == shouldEncodeHashKey) {
shouldEncodeHashKey = this.shouldEncodeHashKeyAtAppLevel.get();
}
if (null == maxDigestBytes) {
maxDigestBytes = this.maxDigestBytesAtAppLevel.get();
}
if (null == maxHashLength) {
maxHashLength = this.maxHashLengthAtAppLevel.get();
}
if(null == baseEnoder) {
baseEnoder = encoder;
}
final String rKey = isDuet ? getHashKeyForDuet(hashingAlgorithm, shouldEncodeHashKey, maxDigestBytes, maxHashLength, baseEnoder) : getHashKey(hashingAlgorithm, shouldEncodeHashKey, maxDigestBytes, maxHashLength, baseEnoder);
if (log.isDebugEnabled()) log.debug("Key : " + rKey);
return rKey;
}
// overlays app level hashing algorithm and client level hashing algorithm
public String getDerivedKey(boolean isDuet, HashingAlgorithm hashingAlgorithm, Boolean shouldEncodeHashKey, Integer maxDigestBytes, Integer maxHashLength, String baseEnoder) {
// this overlay of hashingAlgorithm helps determine if there at all needs to be hashing performed, otherwise, will return canonical key
if (null == hashingAlgorithm) {
hashingAlgorithm = hashingAlgorithmAtAppLevel;
}
final String derivedKey = null == hashingAlgorithm || hashingAlgorithm == HashingAlgorithm.NO_HASHING ? getCanonicalKey(isDuet) : getHashKey(isDuet, hashingAlgorithm, shouldEncodeHashKey, maxDigestBytes, maxHashLength, baseEnoder);
if (log.isDebugEnabled()) log.debug("derivedKey : " + derivedKey);
return derivedKey;
}
private String getHashKey(HashingAlgorithm hashingAlgorithm, Boolean shouldEncodeHashKey, Integer maxDigestBytes, Integer maxHashLength, String encoder) {
if (null == hashingAlgorithm) {
return null;
}
final String key = hashingAlgorithm.toString()+ maxDigestBytes != null ? maxDigestBytes.toString() : "-" + maxHashLength != null ? maxHashLength.toString() : "-" + encoder != null ? encoder : "-";
String val = hashedKeysByAlgorithm.get(key);
if(val == null) {
val = KeyHasher.getHashedKeyEncoded(getCanonicalKey(false), hashingAlgorithm, maxDigestBytes, maxHashLength, encoder);
hashedKeysByAlgorithm.put(key , val);
}
if (log.isDebugEnabled()) log.debug("getHashKey : " + val);
// TODO: Once the issue around passing hashedKey in bytes[] is figured, we will start using (nullable) shouldEncodeHashKey, and call KeyHasher.getHashedKeyInBytes() accordingly
return val;
}
private String getHashKeyForDuet(HashingAlgorithm hashingAlgorithm, Boolean shouldEncodeHashKey, Integer maxDigestBytes, Integer maxHashLength, String encoder) {
if (null == hashingAlgorithm) {
return null;
}
final String key = hashingAlgorithm.toString()+ maxDigestBytes != null ? maxDigestBytes.toString() : "-" + maxHashLength != null ? maxHashLength.toString() : "-" + encoder != null ? encoder : "-";
String val = hashedKeysByAlgorithmForDuet.get(key);
if(val == null) {
val = KeyHasher.getHashedKeyEncoded(getCanonicalKeyForDuet(), hashingAlgorithm, maxDigestBytes, maxHashLength, encoder);
hashedKeysByAlgorithmForDuet.put(key , val);
}
if (log.isDebugEnabled()) log.debug("getHashKeyForDuet : " + val);
// TODO: Once the issue around passing hashedKey in bytes[] is figured, we will start using (nullable) shouldEncodeHashKey, and call KeyHasher.getHashedKeyInBytes() accordingly
return val;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((canonicalKey == null) ? 0 : canonicalKey.hashCode());
result = prime * result + ((canonicalKeyForDuet == null) ? 0 : canonicalKeyForDuet.hashCode());
result = prime * result + ((key == null) ? 0 : key.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EVCacheKey other = (EVCacheKey) obj;
if (canonicalKey == null) {
if (other.canonicalKey != null)
return false;
} else if (!canonicalKey.equals(other.canonicalKey))
return false;
if (canonicalKeyForDuet == null) {
if (other.canonicalKeyForDuet != null)
return false;
} else if (!canonicalKeyForDuet.equals(other.canonicalKeyForDuet))
return false;
if (key == null) {
if (other.key != null)
return false;
} else if (!key.equals(other.key))
return false;
return true;
}
@Override
public String toString() {
return "EVCacheKey [key=" + key + ", canonicalKey=" + canonicalKey + ", canonicalKeyForDuet=" + canonicalKeyForDuet + (hashedKeysByAlgorithm.size() > 0 ? ", hashedKeysByAlgorithm=" + hashedKeysByAlgorithm.toString() : "") + (hashedKeysByAlgorithmForDuet.size() > 0 ? ", hashedKeysByAlgorithmForDuet=" + hashedKeysByAlgorithmForDuet.toString() + "]" : "]");
}
}
| 9,449
| 47.71134
| 364
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCache.java
|
package com.netflix.evcache;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
import javax.annotation.Nullable;
import javax.inject.Inject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.evcache.EVCacheLatch.Policy;
import com.netflix.evcache.operation.EVCacheItem;
import com.netflix.evcache.operation.EVCacheItemMetaData;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import net.spy.memcached.transcoders.Transcoder;
import rx.Scheduler;
import rx.Single;
/**
* An abstract interface for interacting with an Ephemeral Volatile Cache.
*
* <h3>Example</h3>
* <p>
* To create an instance of EVCache with AppName="EVCACHE", cachePrefix="Test"
* and DefaultTTL="3600"
*
* <b>Dependency Injection (Guice) Approach</b> <blockquote>
*
* <pre>
* {@literal @}Inject
* public MyClass(EVCache.Builder builder,....) {
* EVCache myCache = builder.setAppName("EVCACHE").setCachePrefix("Test").setDefaultTTL(3600).build();
* }
* </pre>
*
* </blockquote>
*
* Below is an example to set value="John Doe" for key="name" <blockquote>
*
* <pre>
* myCache.set("name", "John Doe");
* </pre>
*
* </blockquote>
*
*
* To read the value for key="name" <blockquote>
*
* <pre>
* String value = myCache.get("name");
* </pre>
*
* </blockquote>
*
* </p>
*
* @author smadappa
*/
public interface EVCache {
// TODO: Remove Async methods (Project rx) and rename COMPLETABLE_* with ASYNC_*
public static enum Call {
GET, GETL, GET_AND_TOUCH, ASYNC_GET, BULK, SET, DELETE, INCR, DECR, TOUCH, APPEND, PREPEND, REPLACE, ADD, APPEND_OR_ADD, GET_ALL, META_GET, META_SET, META_DEBUG,
COMPLETABLE_FUTURE_GET, COMPLETABLE_FUTURE_GET_BULK
};
/**
* Set an object in the EVCACHE (using the default Transcoder) regardless of
* any existing value.
*
* The <code>timeToLive</code> value passed to memcached is as specified in
* the defaultTTL value for this cache
*
* @param key
* the key under which this object should be added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @return Array of futures representing the processing of this operation
* across all replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues with Serializing the value or any
* IO Related issues
*/
<T> Future<Boolean>[] set(String key, T value) throws EVCacheException;
/**
* Set an object in the EVCACHE (using the default Transcoder) regardless of
* any existing value.
*
* The <code>timeToLive</code> value is passed to memcached exactly as
* given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time a.k.a EPOC
* time (number of seconds since January 1, 1970, as a 32-bit int value), or
* a number of seconds starting from current time. In the latter case, this
* number of seconds may not exceed 60*60*24*30 (number of seconds in 30
* days); if the number sent by a client is larger than that, the server
* will consider it to be real Unix time value rather than an offset from
* current time. </blockquote>
*
* @param key
* the key under which this object should be added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] set(String key, T value, int timeToLive) throws EVCacheException;
/**
* Set an object in the EVCACHE using the given Transcoder regardless of any
* existing value.
*
* The <code>timeToLive</code> value is passed to memcached exactly as
* given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time a.k.a EPOC
* time (number of seconds since January 1, 1970, as a 32-bit int value), or
* a number of seconds starting from current time. In the latter case, this
* number of seconds may not exceed 60*60*24*30 (number of seconds in 30
* days); if the number sent by a client is larger than that, the server
* will consider it to be real Unix time value rather than an offset from
* current time. </blockquote>
*
* @param key
* the key under which this object should be added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] set(String key, T value, Transcoder<T> tc) throws EVCacheException;
/**
* Set an object in the EVCACHE using the given Transcoder regardless of any existing value using the default TTL and Transcoder.
*
* The <code>timeToLive</code> value is passed to memcached exactly as given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time aka EPOC time (number of seconds since January 1, 1970, as a 32-bit int value), or a number of seconds starting from current time. In the latter case, this number of seconds may not exceed 60*60*24*30 (number of seconds in 30 days); if the number sent by a client is larger than that, the server will consider it to be real Unix time value rather than an offset from current time. </blockquote>
*
* @param key
* the key under which this object should be added.
* Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @param policy
* The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed.
* @return Array of futures representing the processing of this operation across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept any more requests or issues Serializing the value or any IO Related issues
*/
<T> EVCacheLatch set(String key, T value, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Set an object in the EVCACHE using the given Transcoder regardless of any existing value with the given TTL.
*
* The <code>timeToLive</code> value is passed to memcached exactly as given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time a.k.a EPOC time (number of seconds since January 1, 1970, as a 32-bit int value), or a number of seconds starting from current time. In the latter case, this number of seconds may not exceed 60*60*24*30 (number of seconds in 30 days); if the number sent by a client is larger than that, the server will consider it to be real Unix time value rather than an offset from current time. </blockquote>
*
* @param key
* the key under which this object should be added. Ensure the key is properly encoded and does not contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @param timeToLive
* the expiration of this object i.e. less than 30 days in seconds or the exact expiry time as UNIX time
* @param policy
* The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed.
* @return Array of futures representing the processing of this operation across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept any more requests or issues Serializing the value or any IO Related issues
*/
<T> EVCacheLatch set(String key, T value, int timeToLive, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Set an object in the EVCACHE using the given Transcoder regardless of any existing value using the given Transcoder.
*
* The <code>timeToLive</code> value is passed to memcached exactly as given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time aka EPOC time (number of seconds since January 1, 1970, as a 32-bit int value), or a number of seconds starting from current time. In the latter case, this number of seconds may not exceed 60*60*24*30 (number of seconds in 30 days); if the number sent by a client is larger than that, the server will consider it to be real Unix time value rather than an offset from current time. </blockquote>
*
* @param key
* the key under which this object should be added. Ensure the key is properly encoded and does not contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @param tc
* the Transcoder to serialize the data
* @param policy
* The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed.
* @return Array of futures representing the processing of this operation across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept any more requests or issues Serializing the value or any IO Related issues
*/
<T> EVCacheLatch set(String key, T value, Transcoder<T> tc, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Set an object in the EVCACHE using the given Transcoder regardless of any
* existing value.
*
* The <code>timeToLive</code> value is passed to memcached exactly as
* given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time aka EPOC time
* (number of seconds since January 1, 1970, as a 32-bit int value), or a
* number of seconds starting from current time. In the latter case, this
* number of seconds may not exceed 60*60*24*30 (number of seconds in 30
* days); if the number sent by a client is larger than that, the server
* will consider it to be real Unix time value rather than an offset from
* current time. </blockquote>
*
* @param key
* the key under which this object should be added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @param tc
* the Transcoder to serialize the data
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param policy
* The Latch will be returned based on the Policy. The Latch can
* then be used to await until the count down has reached to 0 or
* the specified time has elapsed.
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> EVCacheLatch set(String key, T value, Transcoder<T> tc, int timeToLive, EVCacheLatch.Policy policy)
throws EVCacheException;
/**
* Replace an existing object in the EVCACHE using the default Transcoder &
* default TTL. If the object does not exist in EVCACHE then the value is
* not replaced.
*
* @param key
* the key under which this object should be replaced. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to replace
* @param policy
* The Latch will be returned based on the Policy. The Latch can
* then be used to await until the count down has reached to 0 or
* the specified time has elapsed.
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation based on the policy to ensure the required
* criteria is met. The Latch can also be queried to get details on
* status of the operations
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> EVCacheLatch replace(String key, T value, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Replace an existing object in the EVCACHE using the given Transcoder &
* default TTL. If the object does not exist in EVCACHE then the value is
* not replaced.
*
* @param key
* the key under which this object should be replaced. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to replace
* @param tc
* the Transcoder to serialize the data
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param policy
* The Latch will be returned based on the Policy. The Latch can
* then be used to await until the count down has reached to 0 or
* the specified time has elapsed.
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation based on the policy to ensure the required
* criteria is met. The Latch can also be queried to get details on
* status of the operations
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> EVCacheLatch replace(String key, T value, Transcoder<T> tc, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Replace an existing object in the EVCACHE using the given Transcoder. If
* the object does not exist in EVCACHE then the value is not replaced.
*
* The <code>timeToLive</code> value is passed to memcached exactly as
* given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time aka EPOC time
* (number of seconds since January 1, 1970, as a 32-bit int value), or a
* number of seconds starting from current time. In the latter case, this
* number of seconds may not exceed 60*60*24*30 (number of seconds in 30
* days); if the number sent by a client is larger than that, the server
* will consider it to be real Unix time value rather than an offset from
* current time. </blockquote>
*
* @param key
* the key under which this object should be replaced. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to replace
* @param tc
* the Transcoder to serialize the data
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param policy
* The Latch will be returned based on the Policy. The Latch can
* then be used to await until the count down has reached to 0 or
* the specified time has elapsed.
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation based on the policy to ensure the required
* criteria is met. The Latch can also be queried to get details on
* status of the operations
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> EVCacheLatch replace(String key, T value, Transcoder<T> tc, int timeToLive, EVCacheLatch.Policy policy)
throws EVCacheException;
/**
* Set an object in the EVCACHE using the given {@link Transcoder}regardless of any
* existing value.
*
* The <code>timeToLive</code> value is passed to memcached exactly as
* given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time aka EPOC time
* (number of seconds since January 1, 1970, as a 32-bit int value), or a
* number of seconds starting from current time. In the latter case, this
* number of seconds may not exceed 60*60*24*30 (number of seconds in 30
* days); if the number sent by a client is larger than that, the server
* will consider it to be real Unix time value rather than an offset from
* current time. </blockquote>
*
* @param key
* the key under which this object should be added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] set(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException;
/**
* Remove a current key value relation from the Cache.
*
* @param key
* the non-null key corresponding to the relation to be removed.
* Ensure the key is properly encoded and does not contain
* whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @return Array of futures representing the processing of this operation
* across all the replicas. If the future returns true then the key
* was deleted from Cache, if false then the key was not found thus
* not deleted. Note: In effect the outcome was what was desired.
* Note: If the null is returned then the operation timed out and
* probably the key was not deleted. In such scenario retry the
* operation.
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or any IO Related issues
*/
Future<Boolean>[] delete(String key) throws EVCacheException;
/**
* Remove a current key value relation from the Cache.
*
* @param key
* the non-null key corresponding to the relation to be removed.
* Ensure the key is properly encoded and does not contain
* whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param policy
* The Latch will be returned based on the Policy. The Latch can
* then be used to await until the count down has reached to 0 or
* the specified time has elapsed.
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation based on the policy to ensure the required
* criteria is met. The Latch can also be queried to get details on
* status of the operations
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or any IO Related issues
*/
<T> EVCacheLatch delete(String key, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Retrieve the value for the given key.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @return the Value for the given key from the cache (null if there is
* none).
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*
* Note: If the data is replicated by zone, then we can get the
* value from the zone local to the client. If we cannot find
* this value then null is returned. This is transparent to the
* users.
*/
<T> T get(String key) throws EVCacheException;
/**
* Async Retrieve the value for the given key.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @return the Value for the given key from the cache (null if there is
* none).
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*
* Note: If the data is replicated by zone, then we can get the
* value from the zone local to the client. If we cannot find
* this value then null is returned. This is transparent to the
* users.
*/
<T> CompletableFuture<T> getAsync(String key) throws EVCacheException;
/**
* Retrieve the value for the given key.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param scheduler
* the {@link Scheduler} to perform subscription actions on
* @return the Value for the given key from the cache (null if there is
* none).
*/
<T> Single<T> get(String key, Scheduler scheduler);
/**
* Retrieve the value for the given a key using the specified Transcoder for
* deserialization.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param tc
* the Transcoder to deserialize the data
* @return the Value for the given key from the cache (null if there is
* none).
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*
* Note: If the data is replicated by zone, then we can get the
* value from the zone local to the client. If we cannot find
* this value then null is returned. This is transparent to the
* users.
*/
<T> T get(String key, Transcoder<T> tc) throws EVCacheException;
/**
* Async Retrieve the value for the given a key using the specified Transcoder for
* deserialization.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param tc
* the Transcoder to deserialize the data
* @return the Completable Future of value for the given key from the cache (null if there is
* none).
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*
* Note: If the data is replicated by zone, then we can get the
* value from the zone local to the client. If we cannot find
* this value then null is returned. This is transparent to the
* users.
*/
<T> CompletableFuture<T> getAsync(String key, Transcoder<T> tc) throws EVCacheException;
/**
* Retrieve the meta data for the given a key
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @return the metadata for the given key from the cache (null if there is
* none).
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues due IO
* Related issues
*
* Note: If the data is replicated by zone, then we get the metadata
* from the zone local to the client. If we cannot find
* the value then we try other zones, If all are unsuccessful then null is returned.
*/
default EVCacheItemMetaData metaDebug(String key) throws EVCacheException {
throw new EVCacheException("Default implementation. If you are implementing EVCache interface you need to implement this method.");
}
/**
* Retrieve the value & its metadata for the given a key using the specified Transcoder for
* deserialization.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param tc
* the Transcoder to deserialize the data
* @return the Value for the given key from the cache (null if there is
* none) and its metadata all encapsulated in EVCacheItem.
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*
* Note: If the data is replicated by zone, then we can get the
* value from the zone local to the client. If we cannot find
* this value we retry other zones, if still not found, then null is returned.
*/
default <T> EVCacheItem<T> metaGet(String key, Transcoder<T> tc) throws EVCacheException {
throw new EVCacheException("Default implementation. If you are implementing EVCache interface you need to implement this method.");
}
/**
* Retrieve the value for the given a key using the specified Transcoder for
* deserialization.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param tc
* the Transcoder to deserialize the data
* @param policy
* The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed.
*
* @return the Value for the given key from the cache (null if there is
* none).
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*
* Note: If the data is replicated by zone, then we can get the
* value from the zone local to the client. If we cannot find
* this value then null is returned. This is transparent to the
* users.
*/
<T> T get(String key, Transcoder<T> tc, Policy policy) throws EVCacheException;
/**
* Retrieve the value for the given a key using the specified Transcoder for
* deserialization.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param tc
* the Transcoder to deserialize the data
* @param scheduler
* the {@link Scheduler} to perform subscription actions on
* @return the Value for the given key from the cache (null if there is
* none).
*/
<T> Single<T> get(String key, Transcoder<T> tc, Scheduler scheduler);
/**
* Retrieve the value for the given a key using the default Transcoder for
* deserialization and reset its expiration using the passed timeToLive.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param timeToLive
* the new expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param scheduler
* the {@link Scheduler} to perform subscription actions on
* @return the Value for the given key from the cache (null if there is
* none).
*/
<T> Single<T> getAndTouch(String key, int timeToLive, Scheduler scheduler);
/**
* Retrieve the value for the given a key using the default Transcoder for
* deserialization and reset its expiration using the passed timeToLive.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param timeToLive
* the new expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param tc
* the Transcoder to deserialize the data
* @param scheduler
* the {@link Scheduler} to perform subscription actions on
* @return the Value for the given key from the cache (null if there is
* none).
*/
<T> Single<T> getAndTouch(String key, int timeToLive, Transcoder<T> tc, Scheduler scheduler);
/**
* Get with a single key and reset its expiration.
*
* @param key
* the key to get. Ensure the key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param timeToLive
* the new expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @return the result from the cache (null if there is none)
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> T getAndTouch(String key, int timeToLive) throws EVCacheException;
/**
* Get with a single key and reset its expiration.
*
* @param key
* the key to get. Ensure the key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param timeToLive
* the new expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param tc
* the Transcoder to deserialize the data
* @return the result from the cache (null if there is none)
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> T getAndTouch(String key, int timeToLive, Transcoder<T> tc) throws EVCacheException;
/**
* Retrieve the value of a set of keys.
*
* @param keys
* the keys for which we need the values. Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @return a map of the values (for each value that exists). If the Returned
* map contains the key but the value in null then the key does not
* exist in the cache. if a key is missing then we were not able to
* retrieve the data for that key due to some exception
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> Map<String, T> getBulk(String... keys) throws EVCacheException;
/**
* Async Retrieve the value of a set of keys.
*
* @param keys
* the keys for which we need the values. Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @return a map of the values (for each value that exists). If the Returned
* map contains the key but the value in null then the key does not
* exist in the cache. if a key is missing then we were not able to
* retrieve the data for that key due to some exception
*/
<T> CompletableFuture<Map<String, T>> getAsyncBulk(String... keys);
/**
* Retrieve the value for a set of keys, using a specified Transcoder for
* deserialization.
*
* @param keys
* keys to which we need the values.Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param tc
* the transcoder to use for deserialization
* @return a map of the values (for each value that exists). If the Returned
* map contains the key but the value in null then the key does not
* exist in the cache. if a key is missing then we were not able to
* retrieve the data for that key due to some exception
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> Map<String, T> getBulk(Transcoder<T> tc, String... keys) throws EVCacheException;
/**
* Async Retrieve the value for a set of keys, using a specified Transcoder for
* deserialization. In Beta testing (To be used by gateway team)
*
* @param keys
* keys to which we need the values.Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param tc
* the transcoder to use for deserialization
* @return a map of the values (for each value that exists). If the Returned
* map contains the key but the value in null then the key does not
* exist in the cache. if a key is missing then we were not able to
* retrieve the data for that key due to some exception
*/
<T> CompletableFuture<Map<String, T>> getAsyncBulk(Collection<String> keys, Transcoder<T> tc);
/**
* Retrieve the value for the collection of keys, using the default
* Transcoder for deserialization.
*
* @param keys
* The collection of keys for which we need the values. Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @return a map of the values (for each value that exists). If the Returned
* map contains the key but the value in null then the key does not
* exist in the cache. if a key is missing then we were not able to
* retrieve the data for that key due to some exception
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> Map<String, T> getBulk(Collection<String> keys) throws EVCacheException;
/**
* Retrieve the value for the collection of keys, using the specified
* Transcoder for deserialization.
*
* @param keys
* The collection of keys for which we need the values. Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param tc
* the transcoder to use for deserialization
* @return a map of the values (for each value that exists). If the Returned
* map contains the key but the value in null then the key does not
* exist in the cache. if a key is missing then we were not able to
* retrieve the data for that key due to some exception
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> Map<String, T> getBulk(Collection<String> keys, Transcoder<T> tc) throws EVCacheException;
/**
* Retrieve the value for the collection of keys, using the specified
* Transcoder for deserialization.
*
* @param keys
* The collection of keys for which we need the values. Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param tc
* the transcoder to use for deserialization
* @param timeToLive
* the new expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @return a map of the values (for each value that exists). If the value of
* the given key does not exist then null is returned. Only the keys
* whose value are not null and exist in the returned map are set to
* the new TTL as specified in timeToLive.
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> Map<String, T> getBulkAndTouch(Collection<String> keys, Transcoder<T> tc, int timeToLive)
throws EVCacheException;
/**
* Get the value for given key asynchronously and deserialize it with the
* default transcoder.
*
* @param key
* the key for which we need the value. Ensure the key is
* properly encoded and does not contain whitespace or control
* characters. The max length of the key (including prefix)
* is 200 characters.
* @return the Futures containing the Value or null.
* @throws EVCacheException
* in the circumstance where queue is too full to accept any
* more requests or issues during deserialization or timeout
* retrieving the value or any IO Related issues
*
* @deprecated This is a sub-optimal operation does not support Retries, Fast Failures, FIT, GC Detection, etc.
* Will be removed in a subsequent release
*/
<T> Future<T> getAsynchronous(String key) throws EVCacheException;
/**
* Get the value for given key asynchronously and deserialize it with the
* given transcoder.
*
* @param key
* the key for which we need the value. Ensure the key is
* properly encoded and does not contain whitespace or control
* characters. The max length of the key (including prefix)
* is 200 characters.
* @param tc
* the transcoder to use for deserialization
* @return the Futures containing the Value or null.
* @throws EVCacheException
* in the circumstance where queue is too full to accept any
* more requests or issues during deserialization or timeout
* retrieving the value or any IO Related issues
*
* @deprecated This is a sub-optimal operation does not support Retries, Fast Failures, FIT, GC Detection, etc.
* Will be removed in a subsequent release
*/
<T> Future<T> getAsynchronous(String key, Transcoder<T> tc) throws EVCacheException;
/**
* Increment the given counter, returning the new value.
*
* @param key
* the key. Ensure the key is
* properly encoded and does not contain whitespace or control
* characters. The max length of the key (including prefix)
* is 200 characters.
* @param by
* the amount to increment
* @param def
* the default value (if the counter does not exist)
* @param exp
* the expiration of this object
* @return the new value, or -1 if we were unable to increment or add
* @throws EVCacheException
* in the circumstance where timeout is exceeded or queue is
* full
*
*/
public long incr(String key, long by, long def, int exp) throws EVCacheException;
/**
* Decrement the given counter, returning the new value.
*
* @param key
* the key. Ensure the key is
* properly encoded and does not contain whitespace or control
* characters. The max length of the key (including prefix)
* is 200 characters.
* @param by
* the amount to decrement
* @param def
* the default value (if the counter does not exist)
* @param exp
* the expiration of this object
* @return the new value, or -1 if we were unable to decrement or add
* @throws EVCacheException
* in the circumstance where timeout is exceeded or queue is
* full
*
*/
public long decr(String key, long by, long def, int exp) throws EVCacheException;
/**
* Append the given value to the existing value in EVCache. You cannot
* append if the key does not exist in EVCache. If the value has not changed
* then false will be returned.
*
* @param key
* the key under which this object should be appended. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param T
* the value to be appended
* @param tc
* the transcoder the will be used for serialization
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
*
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the circumstance where queue is too full to accept any
* more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] append(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException;
/**
* Append the given value to the existing value in EVCache. You cannot
* append if the key does not exist in EVCache. If the value has not changed
* or does not exist then false will be returned.
*
* @param key
* the key under which this object should be appended. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param T
* the value to be appended
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
*
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the circumstance where queue is too full to accept any
* more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] append(String key, T value, int timeToLive) throws EVCacheException;
/**
* @deprecated Please use {@link #<T> EVCacheLatch add(String, T, Transcoder<T> , int, Policy) throws EVCacheException;}
*
* Add the given value to EVCache. You cannot add if the key already exist in EVCache.
*
* @param key
* the key which this object should be added to. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param T
* the value to be added
* @param tc
* the transcoder the will be used for serialization
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
*
* @return boolean which indicates if the add was successful or not.
* The operation will fail with a false response if the data already exists in EVCache.
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> boolean add(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException;
/**
* Add the given value to EVCache. You cannot add if the key already exist in EVCache.
*
* @param key
* the key which this object should be added to. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param T
* the value to be added
* @param tc
* the transcoder the will be used for serialization
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param policy
* The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed.
*
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation to ensure all adds are successful. If there are any partial success
* The client will try and fix the Data.
*
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> EVCacheLatch add(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException;
/**
* Touch the given key and reset its expiration time.
*
* @param key
* the key to touch. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param ttl
* the new expiration time in seconds
*
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] touch(String key, int ttl) throws EVCacheException;
/**
* Touch the given key and reset its expiration time.
*
* @param key
* the key to touch. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param ttl
* the new expiration time in seconds
*
* @param policy
* The Latch will be returned based on the Policy. The Latch can
* then be used to await until the count down has reached to 0 or
* the specified time has elapsed.
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation based on the policy to ensure the required
* criteria is met. The Latch can also be queried to get details on
* status of the operations
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or any IO Related issues
*/
<T> EVCacheLatch touch(String key, int ttl, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Append the given value to the existing value in EVCache. If the Key does not exist the the key will added.
*
*
* @param key
* the key under which this object should be appended or Added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param T
* the value to be appended
* @param tc
* the transcoder the will be used for serialization
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
*
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the circumstance where queue is too full to accept any
* more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] appendOrAdd(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException;
/**
* Append the given value to the existing value in EVCache. If the Key does not exist the the key will added.
*
*
* @param key
* the key under which this object should be appended or Added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param T
* the value to be appended
* @param tc
* the transcoder the will be used for serialization
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
*
* @param policy
* The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed.
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation based on the policy to ensure the required
* criteria is met. The Latch can also be queried to get details on
* status of the operations
*
* @throws EVCacheException
* in the circumstance where queue is too full to accept any
* more requests or issues Serializing the value or any IO
* Related issues
*/
<T> EVCacheLatch appendOrAdd(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException;
/**
* The {@code appName} that will be used by this {@code EVCache}.
*
* @param The
* name of the EVCache App cluster.
* @return this {@code Builder} object
*/
String getAppName();
/**
* The {@code cachePrefix} that will be used by this {@code EVCache}.
*
* @param The
* name of the EVCache App cluster.
* @return this {@code Builder} object
*/
String getCachePrefix();
/**
* A Builder that builds an EVCache based on the specified App Name, cache
* Name, TTl and Transcoder.
*
* @author smadappa
*/
public class Builder {
private static final Logger logger = LoggerFactory.getLogger(EVCacheImpl.class);
private String _appName;
private String _cachePrefix = null;
private int _ttl = 900;
private Transcoder<?> _transcoder = null;
private boolean _serverGroupRetry = true;
private boolean _enableExceptionThrowing = false;
private List<Customizer> _customizers = new ArrayList<>();
@Inject
private EVCacheClientPoolManager _poolManager;
/**
* Customizers allow post-processing of the Builder. This affords a way for libraries to
* perform customization.
*/
@FunctionalInterface
public interface Customizer {
void customize(final String cacheName, final Builder builder);
}
public static class Factory {
public Builder createInstance(String appName) {
return Builder.forApp(appName);
}
}
public static Builder forApp(final String appName) {
return new Builder().setAppName(appName);
}
public Builder() {
}
public Builder withConfigurationProperties(
final EVCacheClientPoolConfigurationProperties configurationProperties) {
return this
.setCachePrefix(configurationProperties.getKeyPrefix())
.setDefaultTTL(configurationProperties.getTimeToLive())
.setRetry(configurationProperties.getRetryEnabled())
.setExceptionThrowing(configurationProperties.getExceptionThrowingEnabled());
}
/**
* The {@code appName} that will be used by this {@code EVCache}.
*
* @param The
* name of the EVCache App cluster.
* @return this {@code Builder} object
*/
public Builder setAppName(String appName) {
if (appName == null) throw new IllegalArgumentException("param appName cannot be null.");
this._appName = appName.toUpperCase(Locale.US);
if (!_appName.startsWith("EVCACHE")) logger.warn("Make sure the app you are connecting to is EVCache App");
return this;
}
/**
* Adds {@code cachePrefix} to the key. This ensures there are no cache
* collisions if the same EVCache app is used across multiple use cases.
* If the cache is not shared we recommend to set this to
* <code>null</code>. Default is <code>null</code>.
*
* @param cacheName.
* The cache prefix cannot contain colon (':') in it.
* @return this {@code Builder} object
*/
public Builder setCachePrefix(String cachePrefix) {
if (_cachePrefix != null && _cachePrefix.indexOf(':') != -1) throw new IllegalArgumentException(
"param cacheName cannot contain ':' character.");
this._cachePrefix = cachePrefix;
return this;
}
/**
* @deprecated Please use {@link #setCachePrefix(String)}
* @see #setCachePrefix(String)
*
* Adds {@code cacheName} to the key. This ensures there are no
* cache collisions if the same EVCache app is used for across
* multiple use cases.
*
* @param cacheName
* @return this {@code Builder} object
*/
public Builder setCacheName(String cacheName) {
return setCachePrefix(cacheName);
}
/**
* The default Time To Live (TTL) for items in {@link EVCache} in
* seconds. You can override the value by passing the desired TTL with
* {@link EVCache#set(String, Object, int)} operations.
*
* @param ttl. Default is 900 seconds.
* @return this {@code Builder} object
*/
public Builder setDefaultTTL(int ttl) {
if (ttl < 0) throw new IllegalArgumentException("Time to Live cannot be less than 0.");
this._ttl = ttl;
return this;
}
/**
* The default Time To Live (TTL) for items in {@link EVCache} in
* seconds. You can override the value by passing the desired TTL with
* {@link EVCache#set(String, Object, int)} operations.
*
* @param ttl. Default is 900 seconds.
* @return this {@code Builder} object
*/
public Builder setDefaultTTL(@Nullable final Duration ttl) {
if (ttl == null) {
return this;
}
return setDefaultTTL((int) ttl.getSeconds());
}
@VisibleForTesting
Transcoder<?> getTranscoder() {
return this._transcoder;
}
/**
* The default {@link Transcoder} to be used for serializing and
* de-serializing items in {@link EVCache}.
*
* @param transcoder
* @return this {@code Builder} object
*/
public <T> Builder setTranscoder(Transcoder<T> transcoder) {
this._transcoder = transcoder;
return this;
}
/**
* @deprecated Please use {@link #enableRetry()}
*
* Will enable retries across Zone (Server Group).
*
* @return this {@code Builder} object
*/
public <T> Builder enableZoneFallback() {
this._serverGroupRetry = true;
return this;
}
/**
* Will enable or disable retry across Server Group for cache misses and exceptions
* if there are multiple Server Groups for the given EVCache App and
* data is replicated across them. This ensures the Hit Rate continues
* to be unaffected whenever a server group loses instances.
*
* By Default retry is enabled.
*
* @param enableRetry whether retries are to be enabled
* @return this {@code Builder} object
*/
public Builder setRetry(boolean enableRetry) {
this._serverGroupRetry = enableRetry;
return this;
}
/**
* Will enable retry across Server Group for cache misses and exceptions
* if there are multiple Server Groups for the given EVCache App and
* data is replicated across them. This ensures the Hit Rate continues
* to be unaffected whenever a server group loses instances.
*
* By Default retry is enabled.
*
* @return this {@code Builder} object
*/
public <T> Builder enableRetry() {
this._serverGroupRetry = true;
return this;
}
/**
* Will disable retry across Server Groups. This means if the data is
* not found in one server group null is returned.
*
* @return this {@code Builder} object
*/
public <T> Builder disableRetry() {
this._serverGroupRetry = false;
return this;
}
/**
* @deprecated Please use {@link #disableRetry()}
*
* Will disable retry across Zone (Server Group).
*
* @return this {@code Builder} object
*/
public <T> Builder disableZoneFallback() {
this._serverGroupRetry = false;
return this;
}
/**
* By Default exceptions are not propagated and null values are
* returned. By enabling exception propagation we return the
* {@link EVCacheException} whenever the operations experience them.
*
* @param enableExceptionThrowing whether exception throwing is to be enabled
* @return this {@code Builder} object
*/
public Builder setExceptionThrowing(boolean enableExceptionThrowing) {
this._enableExceptionThrowing = enableExceptionThrowing;
return this;
}
/**
* By Default exceptions are not propagated and null values are
* returned. By enabling exception propagation we return the
* {@link EVCacheException} whenever the operations experience them.
*
* @return this {@code Builder} object
*/
public <T> Builder enableExceptionPropagation() {
this._enableExceptionThrowing = true;
return this;
}
/**
* Adds customizers to be applied by {@code customize}.
*
* @param customizers List of {@code Customizer}s
* @return this {@code Builder} object
*/
public Builder addCustomizers(@Nullable final List<Customizer> customizers) {
this._customizers.addAll(customizers);
return this;
}
/**
* Applies {@code Customizer}s added through {@code addCustomizers} to {@this}.
*
* @return this {@code Builder} object
*/
public Builder customize() {
_customizers.forEach(customizer -> {
customizeWith(customizer);
});
return this;
}
/**
* Customizes {@this} with the {@code customizer}.
*
* @param customizer {@code Customizer} or {@code Consumer<String, Builder>} to be applied to {@code this}.
* @return this {@code Builder} object
*/
public Builder customizeWith(final Customizer customizer) {
customizer.customize(this._appName, this);
return this;
}
protected EVCache newImpl(String appName, String cachePrefix, int ttl, Transcoder<?> transcoder, boolean serverGroupRetry, boolean enableExceptionThrowing, EVCacheClientPoolManager poolManager) {
return new EVCacheImpl(appName, cachePrefix, ttl, transcoder, serverGroupRetry, enableExceptionThrowing, poolManager);
}
/**
* Returns a newly created {@code EVCache} based on the contents of the
* {@code Builder}.
*/
@SuppressWarnings("deprecation")
public EVCache build() {
if (_poolManager == null) {
_poolManager = EVCacheClientPoolManager.getInstance();
if (logger.isDebugEnabled()) logger.debug("_poolManager - " + _poolManager + " through getInstance");
}
if (_appName == null) {
throw new IllegalArgumentException("param appName cannot be null.");
}
if(_cachePrefix != null) {
for(int i = 0; i < _cachePrefix.length(); i++) {
if(Character.isWhitespace(_cachePrefix.charAt(i))){
throw new IllegalArgumentException("Cache Prefix ``" + _cachePrefix + "`` contains invalid character at position " + i );
}
}
}
customize();
return newImpl(_appName, _cachePrefix, _ttl, _transcoder, _serverGroupRetry, _enableExceptionThrowing, _poolManager);
}
}
}
| 68,640
| 44.638963
| 462
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheImplMBean.java
|
package com.netflix.evcache;
public interface EVCacheImplMBean extends EVCache {
}
| 84
| 16
| 51
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheImpl.java
|
package com.netflix.evcache;
import static com.netflix.evcache.util.Sneaky.sneakyThrow;
import java.lang.management.ManagementFactory;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Function;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import com.netflix.evcache.dto.KeyMapDto;
import com.netflix.evcache.util.EVCacheBulkDataDto;
import com.netflix.evcache.util.KeyHasher;
import com.netflix.evcache.util.RetryCount;
import com.netflix.evcache.util.Sneaky;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.archaius.api.Property;
import com.netflix.archaius.api.PropertyRepository;
import com.netflix.evcache.EVCacheInMemoryCache.DataNotFoundException;
import com.netflix.evcache.EVCacheLatch.Policy;
import com.netflix.evcache.event.EVCacheEvent;
import com.netflix.evcache.event.EVCacheEventListener;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.operation.EVCacheFuture;
import com.netflix.evcache.operation.EVCacheItem;
import com.netflix.evcache.operation.EVCacheItemMetaData;
import com.netflix.evcache.operation.EVCacheLatchImpl;
import com.netflix.evcache.operation.EVCacheOperationFuture;
import com.netflix.evcache.pool.ChunkTranscoder;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPool;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import com.netflix.evcache.pool.EVCacheClientUtil;
import com.netflix.evcache.pool.EVCacheValue;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.DistributionSummary;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.api.Timer;
import net.spy.memcached.CachedData;
import net.spy.memcached.transcoders.Transcoder;
import rx.Observable;
import rx.Scheduler;
import rx.Single;
/**
* An implementation of a ephemeral volatile cache.
*
* @author smadappa
* @version 2.0
*/
@SuppressWarnings("unchecked")
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "PRMC_POSSIBLY_REDUNDANT_METHOD_CALLS", "WMI_WRONG_MAP_ITERATOR",
"DB_DUPLICATE_BRANCHES", "REC_CATCH_EXCEPTION","RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE" })
public class EVCacheImpl implements EVCache, EVCacheImplMBean {
private static final Logger log = LoggerFactory.getLogger(EVCacheImpl.class);
private final String _appName;
private final String _cacheName;
private final String _metricPrefix;
protected final Transcoder<?> _transcoder;
private final boolean _zoneFallback;
private final boolean _throwException;
private final int _timeToLive; // defaults to 15 minutes
protected EVCacheClientPool _pool;
private final Property<Boolean> _throwExceptionFP, _zoneFallbackFP, _useInMemoryCache;
private final Property<Boolean> _bulkZoneFallbackFP;
private final Property<Boolean> _bulkPartialZoneFallbackFP;
private final List<Tag> tags;
private EVCacheInMemoryCache<?> cache;
private EVCacheClientUtil clientUtil = null;
private final Property<Boolean> ignoreTouch;
private final Property<Boolean> hashKey;
private final Property<String> hashingAlgo;
private final Property<Boolean> shouldEncodeHashKey;
private final Property<Integer> maxDigestBytes;
private final Property<Integer> maxHashLength;
private final EVCacheTranscoder evcacheValueTranscoder;
private final Property<Integer> maxReadDuration, maxWriteDuration;
protected final EVCacheClientPoolManager _poolManager;
private final Map<String, Timer> timerMap = new ConcurrentHashMap<String, Timer>();
private final Map<String, DistributionSummary> distributionSummaryMap = new ConcurrentHashMap<String, DistributionSummary>();
private final Map<String, Counter> counterMap = new ConcurrentHashMap<String, Counter>();
private final Property<Boolean> _eventsUsingLatchFP, autoHashKeys;
private DistributionSummary bulkKeysSize = null;
private final Property<Integer> maxKeyLength;
private final Property<String> alias;
private final Property<String> encoderBase;
EVCacheImpl(String appName, String cacheName, int timeToLive, Transcoder<?> transcoder, boolean enableZoneFallback,
boolean throwException, EVCacheClientPoolManager poolManager) {
this._appName = appName;
this._cacheName = cacheName;
if(_cacheName != null && _cacheName.length() > 0) {
for(int i = 0; i < cacheName.length(); i++) {
if(Character.isWhitespace(cacheName.charAt(i))){
throw new IllegalArgumentException("Cache Prefix ``" + cacheName + "`` contains invalid character at position " + i );
}
}
}
this._timeToLive = timeToLive;
this._transcoder = transcoder;
this._zoneFallback = enableZoneFallback;
this._throwException = throwException;
tags = new ArrayList<Tag>(3);
EVCacheMetricsFactory.getInstance().addAppNameTags(tags, _appName);
if(_cacheName != null && _cacheName.length() > 0) tags.add(new BasicTag(EVCacheMetricsFactory.PREFIX, _cacheName));
final String _metricName = (_cacheName == null) ? _appName : _appName + "." + _cacheName;
_metricPrefix = _appName + "-";
this._poolManager = poolManager;
this._pool = poolManager.getEVCacheClientPool(_appName);
final PropertyRepository propertyRepository = poolManager.getEVCacheConfig().getPropertyRepository();
_throwExceptionFP = propertyRepository.get(_metricName + ".throw.exception", Boolean.class).orElseGet(_appName + ".throw.exception").orElse(false);
_zoneFallbackFP = propertyRepository.get(_metricName + ".fallback.zone", Boolean.class).orElseGet(_appName + ".fallback.zone").orElse(true);
_bulkZoneFallbackFP = propertyRepository.get(_appName + ".bulk.fallback.zone", Boolean.class).orElse(true);
_bulkPartialZoneFallbackFP = propertyRepository.get(_appName+ ".bulk.partial.fallback.zone", Boolean.class).orElse(true);
if(_cacheName == null) {
_useInMemoryCache = propertyRepository.get(_appName + ".use.inmemory.cache", Boolean.class).orElseGet("evcache.use.inmemory.cache").orElse(false);
} else {
_useInMemoryCache = propertyRepository.get(_appName + "." + _cacheName + ".use.inmemory.cache", Boolean.class).orElseGet(_appName + ".use.inmemory.cache").orElseGet("evcache.use.inmemory.cache").orElse(false);
}
_eventsUsingLatchFP = propertyRepository.get(_appName + ".events.using.latch", Boolean.class).orElseGet("evcache.events.using.latch").orElse(false);
maxReadDuration = propertyRepository.get(_appName + ".max.read.duration.metric", Integer.class).orElseGet("evcache.max.write.duration.metric").orElse(20);
maxWriteDuration = propertyRepository.get(_appName + ".max.write.duration.metric", Integer.class).orElseGet("evcache.max.write.duration.metric").orElse(50);
ignoreTouch = propertyRepository.get(appName + ".ignore.touch", Boolean.class).orElse(false);
this.hashKey = propertyRepository.get(appName + ".hash.key", Boolean.class).orElse(false);
this.hashingAlgo = propertyRepository.get(appName + ".hash.algo", String.class).orElse("siphash24");
this.shouldEncodeHashKey = propertyRepository.get(appName + ".hash.encode", Boolean.class).orElse(true);
this.maxDigestBytes = propertyRepository.get(appName + ".max.digest.bytes", Integer.class).orElse(-1);
this.maxHashLength = propertyRepository.get(appName + ".max.hash.length", Integer.class).orElse(-1);
this.encoderBase = propertyRepository.get(appName + ".hash.encoder", String.class).orElse("base64");
this.autoHashKeys = propertyRepository.get(_appName + ".auto.hash.keys", Boolean.class).orElseGet("evcache.auto.hash.keys").orElse(false);
this.evcacheValueTranscoder = new EVCacheTranscoder();
evcacheValueTranscoder.setCompressionThreshold(Integer.MAX_VALUE);
// default max key length is 200, instead of using what is defined in MemcachedClientIF.MAX_KEY_LENGTH (250). This is to accommodate
// auto key prepend with appname for duet feature.
this.maxKeyLength = propertyRepository.get(_appName + ".max.key.length", Integer.class).orElseGet("evcache.max.key.length").orElse(200);
// if alias changes, refresh my pool to point to the correct alias app
this.alias = propertyRepository.get("EVCacheClientPoolManager." + appName + ".alias", String.class);
this.alias.subscribe(i -> {
this._pool = poolManager.getEVCacheClientPool(_appName);
});
_pool.pingServers();
setupMonitoring();
}
private void setupMonitoring() {
try {
final ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=" + _appName
+ ",SubGroup=Impl");
final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
if (mbeanServer.isRegistered(mBeanName)) {
if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one.");
mbeanServer.unregisterMBean(mBeanName);
}
mbeanServer.registerMBean(this, mBeanName);
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception", e);
}
}
EVCacheKey getEVCacheKey(final String key) {
if(key == null || key.length() == 0) throw new NullPointerException("Key cannot be null or empty");
for(int i = 0; i < key.length(); i++) {
if(Character.isWhitespace(key.charAt(i))){
throw new IllegalArgumentException("key ``" + key + "`` contains invalid character at position " + i );
}
}
final String canonicalKey;
if (this._cacheName == null) {
canonicalKey = key;
} else {
final int keyLength = _cacheName.length() + 1 + key.length();
canonicalKey = new StringBuilder(keyLength).append(_cacheName).append(':').append(key).toString();
}
if (canonicalKey.length() > this.maxKeyLength.get() && !hashKey.get() && !autoHashKeys.get()) {
throw new IllegalArgumentException("Key is too long (maxlen = " + this.maxKeyLength.get() + ')');
}
boolean shouldHashKeyAtAppLevel = hashKey.get() || (canonicalKey.length() > this.maxKeyLength.get() && autoHashKeys.get());
final EVCacheKey evcKey = new EVCacheKey(_appName, key, canonicalKey, shouldHashKeyAtAppLevel ? KeyHasher.getHashingAlgorithmFromString(hashingAlgo.get()) : null, this.shouldEncodeHashKey, this.maxDigestBytes, this.maxHashLength, this.encoderBase.get());
if (log.isDebugEnabled() && shouldLog()) log.debug("Key : " + key + "; EVCacheKey : " + evcKey);
return evcKey;
}
private boolean hasZoneFallbackForBulk() {
if (!_pool.supportsFallback()) return false;
if (!_bulkZoneFallbackFP.get()) return false;
return _zoneFallback;
}
private boolean hasZoneFallback() {
if (!_pool.supportsFallback()) return false;
if (!_zoneFallbackFP.get().booleanValue()) return false;
return _zoneFallback;
}
private boolean shouldLog() {
return _poolManager.shouldLog(_appName);
}
private boolean doThrowException() {
return (_throwException || _throwExceptionFP.get().booleanValue());
}
private List<EVCacheEventListener> getEVCacheEventListeners() {
return _poolManager.getEVCacheEventListeners();
}
private EVCacheEvent createEVCacheEvent(Collection<EVCacheClient> clients, Call call) {
final List<EVCacheEventListener> evcacheEventListenerList = getEVCacheEventListeners();
if (evcacheEventListenerList == null || evcacheEventListenerList.size() == 0) return null;
final EVCacheEvent event = new EVCacheEvent(call, _appName, _cacheName, _pool);
event.setClients(clients);
return event;
}
private boolean shouldThrottle(EVCacheEvent event) {
for (EVCacheEventListener evcacheEventListener : getEVCacheEventListeners()) {
try {
if (evcacheEventListener.onThrottle(event)) {
return true;
}
} catch(Exception e) {
incrementEventFailure("throttle", event.getCall(), evcacheEventListener.getClass().getName());
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing throttle event on listener " + evcacheEventListener + " for event " + event, e);
}
}
return false;
}
private void startEvent(EVCacheEvent event) {
final List<EVCacheEventListener> evcacheEventListenerList = getEVCacheEventListeners();
for (EVCacheEventListener evcacheEventListener : evcacheEventListenerList) {
try {
evcacheEventListener.onStart(event);
} catch(Exception e) {
incrementEventFailure("start", event.getCall(), evcacheEventListener.getClass().getName());
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing start event on listener " + evcacheEventListener + " for event " + event, e);
}
}
}
private void endEvent(EVCacheEvent event) {
event.setEndTime(System.currentTimeMillis());
final List<EVCacheEventListener> evcacheEventListenerList = getEVCacheEventListeners();
for (EVCacheEventListener evcacheEventListener : evcacheEventListenerList) {
try {
evcacheEventListener.onComplete(event);
} catch(Exception e) {
incrementEventFailure("end", event.getCall(), evcacheEventListener.getClass().getName());
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing end event on listener " + evcacheEventListener + " for event " + event, e);
}
}
}
private void eventError(EVCacheEvent event, Throwable t) {
event.setEndTime(System.currentTimeMillis());
final List<EVCacheEventListener> evcacheEventListenerList = getEVCacheEventListeners();
for (EVCacheEventListener evcacheEventListener : evcacheEventListenerList) {
try {
evcacheEventListener.onError(event, t);
} catch(Exception e) {
incrementEventFailure("error", event.getCall(), evcacheEventListener.getClass().getName());
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing error event on listener " + evcacheEventListener + " for event " + event, e);
}
}
}
private <T> EVCacheInMemoryCache<T> getInMemoryCache(Transcoder<T> tc) {
if (cache == null) cache = _poolManager.createInMemoryCache(tc, this);
return (EVCacheInMemoryCache<T>) cache;
}
public <T> T get(String key) throws EVCacheException {
return this.get(key, (Transcoder<T>) _transcoder);
}
private void incrementFastFail(String metric, Call call) {
final String name = metric + call.name();
Counter counter = counterMap.get(name);
if(counter == null) {
final List<Tag> tagList = new ArrayList<Tag>(tags.size() + 3);
tagList.addAll(tags);
if(call != null) {
final String operation = call.name();
final String operationType;
switch(call) {
case GET:
case GET_AND_TOUCH:
case GETL:
case BULK:
case COMPLETABLE_FUTURE_GET:
case COMPLETABLE_FUTURE_GET_BULK:
case ASYNC_GET:
operationType = EVCacheMetricsFactory.READ;
break;
default :
operationType = EVCacheMetricsFactory.WRITE;
}
if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType));
}
tagList.add(new BasicTag(EVCacheMetricsFactory.FAILURE_REASON, metric));
counter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.FAST_FAIL, tagList);
counterMap.put(name, counter);
}
counter.increment();
}
private void incrementEventFailure(String metric, Call call, String event) {
final String name = metric + call.name() + event;
Counter counter = counterMap.get(name);
if(counter == null) {
final List<Tag> tagList = new ArrayList<Tag>(tags.size() + 3);
tagList.addAll(tags);
if(call != null) {
final String operation = call.name();
final String operationType;
switch(call) {
case GET:
case GET_AND_TOUCH:
case GETL:
case BULK:
case ASYNC_GET:
operationType = EVCacheMetricsFactory.READ;
break;
default :
operationType = EVCacheMetricsFactory.WRITE;
}
if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType));
}
tagList.add(new BasicTag(EVCacheMetricsFactory.EVENT_STAGE, metric));
tagList.add(new BasicTag(EVCacheMetricsFactory.EVENT, event));
counter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.INTERNAL_EVENT_FAIL, tagList);
counterMap.put(name, counter);
}
counter.increment();
}
private void incrementFailure(String metric, String operation, String operationType) {
final String name = metric + operation;
Counter counter = counterMap.get(name);
if(counter == null) {
final List<Tag> tagList = new ArrayList<Tag>(tags.size() + 3);
tagList.addAll(tags);
if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType));
tagList.add(new BasicTag(EVCacheMetricsFactory.FAILURE_REASON, metric));
counter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.INTERNAL_FAIL, tagList);
counterMap.put(name, counter);
}
counter.increment();
}
public <T> T get(String key, Transcoder<T> tc) throws EVCacheException {
if (null == key) throw new IllegalArgumentException("Key cannot be null");
final EVCacheKey evcKey = getEVCacheKey(key);
if (_useInMemoryCache.get()) {
T value = null;
try {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) _pool.getEVCacheClientForRead().getTranscoder() : (Transcoder<T>) _transcoder) : tc;
value = (T) getInMemoryCache(transcoder).get(evcKey);
} catch (ExecutionException e) {
final boolean throwExc = doThrowException();
if(throwExc) {
if(e.getCause() instanceof DataNotFoundException) {
return null;
}
if(e.getCause() instanceof EVCacheException) {
if (log.isDebugEnabled() && shouldLog()) log.debug("ExecutionException while getting data from InMemory Cache", e);
throw (EVCacheException)e.getCause();
}
throw new EVCacheException("ExecutionException", e);
}
}
if (log.isDebugEnabled() && shouldLog()) log.debug("Value retrieved from inmemory cache for APP " + _appName + ", key : " + evcKey + (log.isTraceEnabled() ? "; value : " + value : ""));
if (value != null) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Value retrieved from inmemory cache for APP " + _appName + ", key : " + evcKey + (log.isTraceEnabled() ? "; value : " + value : ""));
return value;
} else {
if (log.isInfoEnabled() && shouldLog()) log.info("Value not_found in inmemory cache for APP " + _appName + ", key : " + evcKey + "; value : " + value );
}
}
return doGet(evcKey, tc);
}
<T> T doGet(EVCacheKey evcKey , Transcoder<T> tc) throws EVCacheException {
final boolean throwExc = doThrowException();
EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET);
if (throwExc) throw new EVCacheException("Could not find a client to get the data APP " + _appName);
return null; // Fast failure
}
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.GET);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
String cacheOperation = EVCacheMetricsFactory.YES;
int tries = 1;
try {
final boolean hasZF = hasZoneFallback();
boolean throwEx = hasZF ? false : throwExc;
T data = getData(client, evcKey, tc, throwEx, hasZF);
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
for (int i = 0; i < fbClients.size(); i++) {
final EVCacheClient fbClient = fbClients.get(i);
if(i >= fbClients.size() - 1) throwEx = throwExc;
if (event != null) {
try {
if (shouldThrottle(event)) {
status = EVCacheMetricsFactory.THROTTLED;
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
status = EVCacheMetricsFactory.THROTTLED;
return null;
}
}
tries++;
data = getData(fbClient, evcKey, tc, throwEx, (i < fbClients.size() - 1) ? true : false);
if (log.isDebugEnabled() && shouldLog()) log.debug("Retry for APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + fbClient.getServerGroup());
if (data != null) {
client = fbClient;
break;
}
}
}
}
if (data != null) {
if (event != null) event.setAttribute("status", "GHIT");
} else {
cacheOperation = EVCacheMetricsFactory.NO;
if (event != null) event.setAttribute("status", "GMISS");
if (log.isInfoEnabled() && shouldLog()) log.info("GET : APP " + _appName + " ; cache miss for key : " + evcKey);
}
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup());
if (event != null) endEvent(event);
return data;
} catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) {
status = EVCacheMetricsFactory.TIMEOUT;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("CheckedOperationTimeoutException getting data for APP " + _appName + ", key = " + evcKey
+ ".\nYou can set the following property to increase the timeout " + _appName
+ ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex);
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("Exception getting data for APP " + _appName + ", key = " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.GET.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : APP " + _appName + ", Took " + duration + " milliSec.");
}
}
public <T> CompletableFuture<T> getAsync(String key, Transcoder<T> tc) {
if (null == key) throw new IllegalArgumentException("Key cannot be null");
final EVCacheKey evcKey = getEVCacheKey(key);
return getAsyncInMemory(evcKey, tc)
.thenCompose(data -> data == null
? doAsyncGet(evcKey, tc)
: CompletableFuture.completedFuture(data));
}
public <T> CompletableFuture<T> getAsync(String key) {
return this.getAsync(key, (Transcoder<T>) _transcoder);
}
private <T> T getInMemory(EVCacheKey evcKey, Transcoder<T> tc) throws Exception {
if (_useInMemoryCache.get()) {
try {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) _pool.getEVCacheClientForRead().getTranscoder() : (Transcoder<T>) _transcoder) : tc;
T value = getInMemoryCache(transcoder).get(evcKey);
if (value != null) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Value retrieved from in-memory cache for APP " + _appName + ", key : " + evcKey + (log.isTraceEnabled() ? "; value : " + value : ""));
return value;
} else {
if (log.isInfoEnabled() && shouldLog()) log.info("Value not_found in in-memory cache for APP " + _appName + ", key : " + evcKey + "; value : " + value );
}
} catch (Exception e) {
return handleInMemoryException(e);
}
}
return null;
}
private <T> CompletableFuture<T> getAsyncInMemory(EVCacheKey evcKey, Transcoder<T> tc) {
CompletableFuture<T> promise = new CompletableFuture<>();
try {
if(log.isDebugEnabled() && shouldLog()) {
log.debug("Retrieving value from memory {} ", evcKey.getKey());
}
T t = getInMemory(evcKey, tc);
promise.complete(t);
} catch (Exception ex) {
promise.completeExceptionally(ex);
}
return promise;
}
private <T> T handleInMemoryException(Exception e) throws Exception {
final boolean throwExc = doThrowException();
if(throwExc) {
if(e.getCause() instanceof DataNotFoundException) {
if (log.isDebugEnabled() && shouldLog()) log.debug("DataNotFoundException while getting data from InMemory Cache", e);
return null;
}
if(e.getCause() instanceof EVCacheException) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting data from InMemory Cache", e);
throw e;
} else {
throw new EVCacheException("ExecutionException", e);
}
} else {
if (log.isDebugEnabled() && shouldLog()) log.debug("Throws Exception is false and returning null in this case");
return null;
}
}
private <T> CompletableFuture<T> doAsyncGet(EVCacheKey evcKey, Transcoder<T> tc) {
CompletableFuture<T> errorFuture = new CompletableFuture<>();
final boolean throwExc = doThrowException();
//Building the client
EVCacheClient client = buildEvCacheClient(throwExc, Call.COMPLETABLE_FUTURE_GET, errorFuture);
if (errorFuture.isCompletedExceptionally() || client == null) {
if (client == null ) {
if (log.isDebugEnabled() && shouldLog()) log.debug("client is null");
errorFuture.complete(null);
}
return errorFuture;
}
if (log.isDebugEnabled() && shouldLog()) log.debug("Completed Building the client");
//Building the start event
EVCacheEvent event = buildAndStartEvent(client,
Collections.singletonList(evcKey),
throwExc,
errorFuture,
Call.COMPLETABLE_FUTURE_GET);
if (errorFuture.isCompletedExceptionally()) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Error while building and starting the event");
return errorFuture;
}
errorFuture.cancel(false);
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
StringBuilder status = new StringBuilder(EVCacheMetricsFactory.SUCCESS);
StringBuilder cacheOperation = new StringBuilder(EVCacheMetricsFactory.YES);
final boolean hasZF = hasZoneFallback();
RetryCount retryCount = new RetryCount();
boolean throwEx = !hasZF && throwExc;
return getAsyncData(client, evcKey, tc)
.thenCompose(data -> handleRetry(data, evcKey, tc, client, hasZF, throwExc, event, retryCount))
.handle((data, ex) -> {
if (ex != null) {
handleMissData(event, evcKey, client, cacheOperation);
handleFinally(data, status, retryCount.get(), client, cacheOperation, start, Call.COMPLETABLE_FUTURE_GET);
handleException(ex, event);
if (throwEx) {
throw new RuntimeException(ex);
} else {
return null;
}
} else {
handleFinally(data, status, retryCount.get(), client, cacheOperation, start, Call.COMPLETABLE_FUTURE_GET);
handleData(data, event, evcKey, client, cacheOperation);
return data;
}
});
}
private <T> EVCacheClient buildEvCacheClient(boolean throwExc, Call callType, CompletableFuture<T> completableFuture) {
EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, callType);
if (throwExc) completableFuture.completeExceptionally(new EVCacheException("Could not find a client to get the data APP " + _appName));
return null;
}
return client;
}
private <T> EVCacheEvent buildAndStartEvent(EVCacheClient client,
List<EVCacheKey> evcKeys,
boolean throwExc,
CompletableFuture<T> completableFuture,
Call callType) {
EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), callType);
if (event != null) {
event.setEVCacheKeys(evcKeys);
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, callType);
if (throwExc)
completableFuture.completeExceptionally(new EVCacheException("Request Throttled for app " + _appName + " & keys " + evcKeys));
return null;
}
startEvent(event);
return event;
}
return null;
}
private <T> void handleBulkFinally(StringBuilder status,
RetryCount tries,
EVCacheClient client,
StringBuilder cacheOperation,
Collection<String> keys,
Long start) {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
if (bulkKeysSize == null) {
final List<Tag> tagList = new ArrayList<Tag>(4);
tagList.addAll(tags);
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, EVCacheMetricsFactory.BULK_OPERATION));
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, EVCacheMetricsFactory.READ));
bulkKeysSize = EVCacheMetricsFactory.getInstance().getDistributionSummary(EVCacheMetricsFactory.OVERALL_KEYS_SIZE, tagList);
}
bulkKeysSize.record(keys.size());
getTimer(Call.COMPLETABLE_FUTURE_GET_BULK.name(),
EVCacheMetricsFactory.READ,
cacheOperation.toString(),
status.toString(),
tries.get(),
maxReadDuration.get(),
client.getServerGroup())
.record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("ASYNC GET BULK : APP "
+ _appName + " Took " + duration + " milliSec to get the value for key " + keys);
}
private <T> T handleFinally(T data,
StringBuilder status,
Integer tries,
EVCacheClient client,
StringBuilder cacheOperation,
Long start,
Call call) {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime() - start;
getTimer(call.name(),
EVCacheMetricsFactory.READ,
cacheOperation.toString(),
status.toString(),
tries,
maxReadDuration.get(),
client.getServerGroup())
.record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog())
log.debug("GET ASYNC : APP " + _appName + ", Took " + duration + " milliSec.");
return data;
}
private void handleException(Throwable ex, EVCacheEvent event) {
if (ex.getCause() instanceof RuntimeException) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("Handling exception with cause ", ex.getCause());
}
Throwable runTimeCause = ex.getCause();
if (runTimeCause.getCause() instanceof ExecutionException) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("Handling ExecutionException with cause ",runTimeCause.getCause());
}
Throwable executionExceptionCause = runTimeCause.getCause();
if (executionExceptionCause.getCause() instanceof net.spy.memcached.internal.CheckedOperationTimeoutException) {
if (event != null) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("Setting Status as Timeout");
}
event.setStatus(EVCacheMetricsFactory.TIMEOUT);
eventError(event, ex);
}
}
return;
}
}
if (event != null) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("Setting event as Error");
}
event.setStatus(EVCacheMetricsFactory.ERROR);
eventError(event, ex);
}
}
private <T> void handleMissData(EVCacheEvent event, EVCacheKey evcKey, EVCacheClient client, StringBuilder cacheOperation) {
cacheOperation.replace(0, cacheOperation.length(), EVCacheMetricsFactory.NO);
if (event != null) event.setAttribute("status", "GMISS");
if (log.isInfoEnabled() && shouldLog())
log.info("GET ASYNC : APP " + _appName + " ; cache miss for key : " + evcKey);
endEvent(null, evcKey, client, event);
}
private <T> void handleData(T data, EVCacheEvent event, EVCacheKey evcKey, EVCacheClient client, StringBuilder cacheOperation) {
if (event != null) event.setAttribute("status", "GHIT");
endEvent(data, evcKey, client, event);
}
private <T> void endEvent(T data, EVCacheKey evcKey, EVCacheClient client, EVCacheEvent event) {
if (log.isDebugEnabled() && shouldLog())
log.debug("COMPLETABLE FUTURE GET : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup());
if (event != null) endEvent(event);
}
private <T> CompletableFuture<T> handleRetries(List<EVCacheClient> fbClients,
int fbClientIndex,
EVCacheEvent event,
EVCacheKey evcKey,
Transcoder<T> tc,
RetryCount retryCount) {
if (fbClientIndex >= fbClients.size()) {
return CompletableFuture.completedFuture(null);
}
if (log.isDebugEnabled() && shouldLog()) {
log.debug("searching key in the server {}", fbClients.get(fbClientIndex).getServerGroup().getName());
}
CompletableFuture<T> future = getAsyncData(
fbClients.get(fbClientIndex),
event,
evcKey,
tc);
int nextIndex = fbClientIndex + 1;
retryCount.incr();
return future.thenApply(s -> s != null ?
handleSuccessCompletion(s, evcKey, fbClients, fbClientIndex, retryCount) :
handleRetries(fbClients, nextIndex, event, evcKey, tc, retryCount))
.exceptionally(t -> handleRetries(fbClients, nextIndex, event, evcKey, tc, retryCount))
.thenCompose(Function.identity());
}
public <T> CompletableFuture<T> handleSuccessCompletion(T s, EVCacheKey key, List<EVCacheClient> fbClients, int index, RetryCount retryCount) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("fetched the key {} from server {} and retry count {}", key.getKey(), fbClients.get(index).getServerGroup().getName(), retryCount.get());
}
return CompletableFuture.completedFuture(s);
}
private <T> CompletableFuture<T> handleRetry(T data,
EVCacheKey evcKey,
Transcoder<T> tc,
EVCacheClient client,
boolean hasZF,
boolean throwExc,
EVCacheEvent event,
RetryCount retryCount) {
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
return handleRetries(fbClients, 0, event, evcKey, tc, retryCount);
}
return CompletableFuture.completedFuture(data);
}
public EVCacheItemMetaData metaDebug(String key) throws EVCacheException {
return this.metaDebugInternal(key, false);
}
protected EVCacheItemMetaData metaDebugInternal(String key, boolean isOriginalKeyHashed) throws EVCacheException {
if (null == key) throw new IllegalArgumentException("Key cannot be null");
final EVCacheKey evcKey = getEVCacheKey(key);
final boolean throwExc = doThrowException();
EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.META_DEBUG);
if (throwExc) throw new EVCacheException("Could not find a client to get the metadata for APP " + _appName);
return null; // Fast failure
}
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.META_DEBUG);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.META_DEBUG);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.META_DEBUG);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
String cacheOperation = EVCacheMetricsFactory.YES;
int tries = 1;
try {
final boolean hasZF = hasZoneFallback();
boolean throwEx = hasZF ? false : throwExc;
EVCacheItemMetaData data = getEVCacheItemMetaData(client, evcKey, throwEx, hasZF, isOriginalKeyHashed);
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
for (int i = 0; i < fbClients.size(); i++) {
final EVCacheClient fbClient = fbClients.get(i);
if(i >= fbClients.size() - 1) throwEx = throwExc;
if (event != null) {
try {
if (shouldThrottle(event)) {
status = EVCacheMetricsFactory.THROTTLED;
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
status = EVCacheMetricsFactory.THROTTLED;
return null;
}
}
tries++;
data = getEVCacheItemMetaData(fbClient, evcKey, throwEx, (i < fbClients.size() - 1) ? true : false, isOriginalKeyHashed);
if (log.isDebugEnabled() && shouldLog()) log.debug("Retry for APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + fbClient.getServerGroup());
if (data != null) {
client = fbClient;
break;
}
}
}
}
if (data != null) {
if (event != null) event.setAttribute("status", "MDHIT");
} else {
cacheOperation = EVCacheMetricsFactory.NO;
if (event != null) event.setAttribute("status", "MDMISS");
if (log.isInfoEnabled() && shouldLog()) log.info("META_DEBUG : APP " + _appName + " ; cache miss for key : " + evcKey);
}
if (log.isDebugEnabled() && shouldLog()) log.debug("META_DEBUG : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup());
if (event != null) endEvent(event);
return data;
} catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) {
status = EVCacheMetricsFactory.TIMEOUT;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("CheckedOperationTimeoutException getting with meta data for APP " + _appName + ", key = " + evcKey
+ ".\nYou can set the following property to increase the timeout " + _appName
+ ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex);
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("Exception getting with metadata for APP " + _appName + ", key = " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.META_DEBUG.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("META_DEBUG : APP " + _appName + ", Took " + duration + " milliSec.");
}
}
public <T> EVCacheItem<T> metaGet(String key, Transcoder<T> tc) throws EVCacheException {
return this.metaGetInternal(key, tc, false);
}
protected <T> EVCacheItem<T> metaGetInternal(String key, Transcoder<T> tc, boolean isOriginalKeyHashed) throws EVCacheException {
if (null == key) throw new IllegalArgumentException("Key cannot be null");
final EVCacheKey evcKey = getEVCacheKey(key);
final boolean throwExc = doThrowException();
EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.META_GET);
if (throwExc) throw new EVCacheException("Could not find a client to get the data APP " + _appName);
return null; // Fast failure
}
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.META_GET);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.META_GET);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.META_GET);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
String cacheOperation = EVCacheMetricsFactory.YES;
int tries = 1;
try {
final boolean hasZF = hasZoneFallback();
boolean throwEx = hasZF ? false : throwExc;
EVCacheItem<T> data = getEVCacheItem(client, evcKey, tc, throwEx, hasZF, isOriginalKeyHashed, true);
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
for (int i = 0; i < fbClients.size(); i++) {
final EVCacheClient fbClient = fbClients.get(i);
if(i >= fbClients.size() - 1) throwEx = throwExc;
if (event != null) {
try {
if (shouldThrottle(event)) {
status = EVCacheMetricsFactory.THROTTLED;
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
status = EVCacheMetricsFactory.THROTTLED;
return null;
}
}
tries++;
data = getEVCacheItem(fbClient, evcKey, tc, throwEx, (i < fbClients.size() - 1) ? true : false, isOriginalKeyHashed, true);
if (log.isDebugEnabled() && shouldLog()) log.debug("Retry for APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + fbClient.getServerGroup());
if (data != null) {
client = fbClient;
break;
}
}
}
}
if (data != null) {
if (event != null) event.setAttribute("status", "MGHIT");
} else {
cacheOperation = EVCacheMetricsFactory.NO;
if (event != null) event.setAttribute("status", "MGMISS");
if (log.isInfoEnabled() && shouldLog()) log.info("META_GET : APP " + _appName + " ; cache miss for key : " + evcKey);
}
if (log.isDebugEnabled() && shouldLog()) log.debug("META_GET : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup());
if (event != null) endEvent(event);
return data;
} catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) {
status = EVCacheMetricsFactory.TIMEOUT;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("CheckedOperationTimeoutException getting with meta data for APP " + _appName + ", key = " + evcKey
+ ".\nYou can set the following property to increase the timeout " + _appName
+ ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex);
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("Exception getting with meta data for APP " + _appName + ", key = " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.META_GET.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("META_GET : APP " + _appName + ", Took " + duration + " milliSec.");
}
}
private int policyToCount(Policy policy, int count) {
if (policy == null) return 0;
switch (policy) {
case NONE:
return 0;
case ONE:
return 1;
case QUORUM:
if (count == 0)
return 0;
else if (count <= 2)
return count;
else
return (count / 2) + 1;
case ALL_MINUS_1:
if (count == 0)
return 0;
else if (count <= 2)
return 1;
else
return count - 1;
default:
return count;
}
}
public <T> T get(String key, Transcoder<T> tc, Policy policy) throws EVCacheException {
if (null == key) throw new IllegalArgumentException();
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET);
if (throwExc) throw new EVCacheException("Could not find a client to asynchronously get the data");
return null; // Fast failure
}
final int expectedSuccessCount = policyToCount(policy, clients.length);
if(expectedSuccessCount <= 1) return get(key, tc);
final long startTime = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
String cacheOperation = EVCacheMetricsFactory.YES;
int tries = 1;
try {
final List<Future<T>> futureList = new ArrayList<Future<T>>(clients.length);
final long endTime = startTime + _pool.getReadTimeout().get().intValue();
for (EVCacheClient client : clients) {
final Future<T> future = getGetFuture(client, key, tc, throwExc);
futureList.add(future);
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : APP " + _appName + ", Future " + future + " for key : " + key + " with policy : " + policy + " for client : " + client);
}
final Map<T, List<EVCacheClient>> evcacheClientMap = new HashMap<T, List<EVCacheClient>>();
//final Map<T, Integer> tMap = new HashMap<T,Integer>();
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : Total Requests " + clients.length + "; Expected Success Count : " + expectedSuccessCount);
for(Future<T> future : futureList) {
try {
if(future instanceof EVCacheOperationFuture) {
EVCacheOperationFuture<T> evcacheOperationFuture = (EVCacheOperationFuture<T>)future;
long duration = endTime - System.currentTimeMillis();
if(duration < 20) duration = 20;
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : block duration : " + duration);
final T t = evcacheOperationFuture.get(duration, TimeUnit.MILLISECONDS, throwExc, false);
if (log.isTraceEnabled() && shouldLog()) log.trace("GET : CONSISTENT : value : " + t);
if(t != null) {
final List<EVCacheClient> cList = evcacheClientMap.computeIfAbsent(t, k -> new ArrayList<EVCacheClient>(clients.length));
cList.add(evcacheOperationFuture.getEVCacheClient());
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : Added Client to ArrayList " + cList);
}
}
} catch (Exception e) {
log.error("Exception",e);
}
}
T retVal = null;
/* TODO : use metaget to get TTL and set it. For now we will delete the inconsistent value */
for(Entry<T, List<EVCacheClient>> entry : evcacheClientMap.entrySet()) {
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : Existing Count for Value : " + entry.getValue().size() + "; expectedSuccessCount : " + expectedSuccessCount);
if(entry.getValue().size() >= expectedSuccessCount) {
retVal = entry.getKey();
} else {
for(EVCacheClient client : entry.getValue()) {
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : Delete in-consistent vale from : " + client);
client.delete(key);
}
}
}
if(retVal != null) {
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : policy : " + policy + " was met. Will return the value. Total Duration : " + (System.currentTimeMillis() - startTime) + " milli Seconds.");
return retVal;
}
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : policy : " + policy + " was NOT met. Will return NULL. Total Duration : " + (System.currentTimeMillis() - startTime) + " milli Seconds.");
return null;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (!throwExc) return null;
throw new EVCacheException("Exception getting data for APP " + _appName + ", key = " + key, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- startTime;
getTimer(Call.GET_ALL.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : APP " + _appName + ", Took " + duration + " milliSec.");
}
}
public <T> Single<T> get(String key, Scheduler scheduler) {
return this.get(key, (Transcoder<T>) _transcoder, scheduler);
}
public <T> Single<T> get(String key, Transcoder<T> tc, Scheduler scheduler) {
if (null == key) return Single.error(new IllegalArgumentException("Key cannot be null"));
final boolean throwExc = doThrowException();
final EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET);
return Single.error(new EVCacheException("Could not find a client to get the data APP " + _appName));
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.GET);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET);
return Single.error(new EVCacheException("Request Throttled for app " + _appName + " & key " + key));
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
final boolean hasZF = hasZoneFallback();
final boolean throwEx = hasZF ? false : throwExc;
return getData(client, evcKey, tc, throwEx, hasZF, scheduler).flatMap(data -> {
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
return Observable.concat(Observable.from(fbClients).map(
fbClient -> getData(fbClients.indexOf(fbClient), fbClients.size(), fbClient, evcKey, tc, throwEx, throwExc, false, scheduler) //TODO : for the last one make sure to pass throwExc
//.doOnSuccess(fbData -> increment("RETRY_" + ((fbData == null) ? "MISS" : "HIT")))
.toObservable()))
.firstOrDefault(null, fbData -> (fbData != null)).toSingle();
}
}
return Single.just(data);
}).map(data -> {
//increment("GetCall");
if (data != null) {
//increment("GetHit");
if (event != null) event.setAttribute("status", "GHIT");
} else {
//increment("GetMiss");
if (event != null) event.setAttribute("status", "GMISS");
if (log.isInfoEnabled() && shouldLog()) log.info("GET : APP " + _appName + " ; cache miss for key : " + evcKey);
}
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client .getServerGroup());
if (event != null) endEvent(event);
return data;
}).onErrorReturn(ex -> {
if (ex instanceof net.spy.memcached.internal.CheckedOperationTimeoutException) {
if (event != null) {
event.setStatus(EVCacheMetricsFactory.TIMEOUT);
eventError(event, ex);
}
if (!throwExc) return null;
throw sneakyThrow(new EVCacheException("CheckedOperationTimeoutException getting data for APP " + _appName + ", key = "
+ evcKey
+ ".\nYou can set the following property to increase the timeout " + _appName
+ ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex));
} else {
if (event != null) {
event.setStatus(EVCacheMetricsFactory.ERROR);
eventError(event, ex);
}
if (!throwExc) return null;
throw sneakyThrow(new EVCacheException("Exception getting data for APP " + _appName + ", key = " + evcKey, ex));
}
}).doAfterTerminate(() -> {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.GET_AND_TOUCH.name(), EVCacheMetricsFactory.READ, null, EVCacheMetricsFactory.SUCCESS, 1, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : APP " + _appName + ", Took " + duration + " milliSec.");
});
}
private <T> T getData(EVCacheClient client, EVCacheKey evcKey, Transcoder<T> tc, boolean throwException, boolean hasZF) throws Exception {
if (client == null) return null;
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc;
try {
String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient());
if(hashKey != null) {
final Object obj = client.get(hashKey, evcacheValueTranscoder, throwException, hasZF);
if(obj != null && obj instanceof EVCacheValue) {
final EVCacheValue val = (EVCacheValue)obj;
if(!val.getKey().equals(canonicalKey)) {
incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.GET.name(), EVCacheMetricsFactory.READ);
return null;
}
final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE);
return transcoder.decode(cd);
} else {
return null;
}
} else {
return client.get(canonicalKey, transcoder, throwException, hasZF);
}
} catch (EVCacheConnectException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheConnectException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (EVCacheReadQueueException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheReadQueueException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (EVCacheException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (Exception ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getting data for APP " + _appName + ", key : " + evcKey, ex);
if (!throwException || hasZF) return null;
throw ex;
}
}
private <T> CompletableFuture<T> getAsyncData(EVCacheClient client,
EVCacheEvent event,
EVCacheKey key,
Transcoder<T> tc) {
if (event != null) {
if (shouldThrottle(event)) {
CompletableFuture<T> completableFuture = new CompletableFuture<>();
completableFuture.completeExceptionally(new EVCacheException("Request Throttled for app " + _appName + " & key " + key));
return completableFuture;
}
}
return getAsyncData(client, key, tc);
}
private <T> CompletableFuture<T> getAsyncData(EVCacheClient client,
EVCacheKey evcKey,
Transcoder<T> tc) {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc;
String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient());
if (hashKey != null) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("Fetching data with hashKey {} ", hashKey);
}
return client.getAsync(hashKey, evcacheValueTranscoder)
.thenApply(val -> getData(transcoder, canonicalKey, val))
.exceptionally(ex -> handleClientException(hashKey, ex));
} else {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("Fetching data with canonicalKey {} ", canonicalKey);
}
return client.getAsync(canonicalKey, transcoder)
.exceptionally(ex -> handleClientException(canonicalKey, ex));
}
}
private <T> T handleClientException(String evcKey, Throwable ex) {
if (log.isDebugEnabled() && shouldLog())
log.debug("Exception while getting data for APP " + _appName + ", key : " + evcKey + ":" + ex);
throw sneakyThrow(ex);
}
private <T> T getData(Transcoder<T> transcoder, String canonicalKey, Object obj) {
if (obj instanceof EVCacheValue) {
final EVCacheValue val = (EVCacheValue) obj;
if (!val.getKey().equals(canonicalKey)) {
incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.GET.name(), EVCacheMetricsFactory.READ);
return null;
}
final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE);
return transcoder.decode(cd);
} else {
return null;
}
}
protected EVCacheItemMetaData getEVCacheItemMetaData(EVCacheClient client, EVCacheKey evcKey, boolean throwException, boolean hasZF, boolean isOriginalKeyHashed) throws Exception {
if (client == null) return null;
try {
return client.metaDebug(isOriginalKeyHashed ? evcKey.getKey() : evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()));
} catch (EVCacheConnectException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheConnectException while getting with metadata for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (EVCacheReadQueueException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheReadQueueException while getting with metadata for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (EVCacheException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting with metadata for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (Exception ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getting with metadata for APP " + _appName + ", key : " + evcKey, ex);
if (!throwException || hasZF) return null;
throw ex;
}
}
protected <T> EVCacheItem<T> getEVCacheItem(EVCacheClient client, EVCacheKey evcKey, Transcoder<T> tc, boolean throwException, boolean hasZF, boolean isOriginalKeyHashed, boolean desearilizeEVCacheValue) throws Exception {
if (client == null) return null;
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc;
try {
String hashKey = isOriginalKeyHashed ? evcKey.getKey() : evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient());
if (hashKey != null) {
if(desearilizeEVCacheValue) {
final EVCacheItem<Object> obj = client.metaGet(hashKey, evcacheValueTranscoder, throwException, hasZF);
if (null == obj) return null;
if (obj.getData() instanceof EVCacheValue) {
final EVCacheValue val = (EVCacheValue) obj.getData();
if (null == val) {
return null;
}
// compare the key embedded in the value to the original key only if the original key is not passed hashed
if (!isOriginalKeyHashed && !(val.getKey().equals(canonicalKey))) {
incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.META_GET.name(), EVCacheMetricsFactory.META_GET_OPERATION);
return null;
}
final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE);
T t = transcoder.decode(cd);
obj.setData(t);
obj.setFlag(val.getFlags());
return (EVCacheItem<T>) obj;
} else {
return null;
}
} else {
final EVCacheItem<CachedData> obj = client.metaGet(hashKey, new ChunkTranscoder(), throwException, hasZF);
if (null == obj) return null;
return (EVCacheItem<T>) obj;
}
} else {
return client.metaGet(canonicalKey, transcoder, throwException, hasZF);
}
} catch (EVCacheConnectException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheConnectException while getting with meta data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (EVCacheReadQueueException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheReadQueueException while getting with meta data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (EVCacheException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting with meta data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (Exception ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getting with meta data for APP " + _appName + ", key : " + evcKey, ex);
if (!throwException || hasZF) return null;
throw ex;
}
}
private <T> Single<T> getData(int index, int size, EVCacheClient client, EVCacheKey canonicalKey, Transcoder<T> tc, boolean throwEx, boolean throwExc, boolean hasZF, Scheduler scheduler) {
if(index >= size -1) throwEx = throwExc;
return getData(client, canonicalKey, tc, throwEx, hasZF, scheduler);
}
private <T> Single<T> getData(EVCacheClient client, EVCacheKey evcKey, Transcoder<T> tc, boolean throwException, boolean hasZF, Scheduler scheduler) {
if (client == null) return Single.error(new IllegalArgumentException("Client cannot be null"));
if(evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) {
return Single.error(new IllegalArgumentException("Not supported"));
} else {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc;
return client.get(evcKey.getCanonicalKey(client.isDuetClient()), transcoder, throwException, hasZF, scheduler).onErrorReturn(ex -> {
if (ex instanceof EVCacheReadQueueException) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheReadQueueException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw sneakyThrow(ex);
} else if (ex instanceof EVCacheException) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw sneakyThrow(ex);
} else {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getting data for APP " + _appName + ", key : " + evcKey, ex);
if (!throwException || hasZF) return null;
throw sneakyThrow(ex);
}
});
}
}
private final int MAX_IN_SEC = 2592000;
private void checkTTL(int timeToLive, Call call) throws IllegalArgumentException {
try {
if(timeToLive < 0) throw new IllegalArgumentException ("Time to Live ( " + timeToLive + ") must be great than or equal to 0.");
final long currentTimeInMillis = System.currentTimeMillis();
if(timeToLive > currentTimeInMillis) throw new IllegalArgumentException ("Time to Live ( " + timeToLive + ") must be in seconds.");
if(timeToLive > MAX_IN_SEC && timeToLive < currentTimeInMillis/1000) throw new IllegalArgumentException ("If providing Time to Live ( " + timeToLive + ") in seconds as epoc value, it should be greater than current time " + currentTimeInMillis/1000);
} catch (IllegalArgumentException iae) {
incrementFastFail(EVCacheMetricsFactory.INVALID_TTL, call);
throw iae;
}
}
public <T> T getAndTouch(String key, int timeToLive) throws EVCacheException {
return this.getAndTouch(key, timeToLive, (Transcoder<T>) _transcoder);
}
public <T> Single<T> getAndTouch(String key, int timeToLive, Scheduler scheduler) {
return this.getAndTouch(key, timeToLive, (Transcoder<T>) _transcoder, scheduler);
}
public <T> Single<T> getAndTouch(String key, int timeToLive, Transcoder<T> tc, Scheduler scheduler) {
if (null == key) return Single.error(new IllegalArgumentException("Key cannot be null"));
checkTTL(timeToLive, Call.GET_AND_TOUCH);
if(hashKey.get()) {
return Single.error(new IllegalArgumentException("Not supported"));
}
final boolean throwExc = doThrowException();
final EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET_AND_TOUCH);
return Single.error(new EVCacheException("Could not find a client to get and touch the data for APP " + _appName));
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.GET_AND_TOUCH);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET_AND_TOUCH);
return Single.error(new EVCacheException("Request Throttled for app " + _appName + " & key " + key));
}
event.setTTL(timeToLive);
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
final boolean hasZF = hasZoneFallback();
final boolean throwEx = hasZF ? false : throwExc;
//anyway we have to touch all copies so let's just reuse getData instead of getAndTouch
return getData(client, evcKey, tc, throwEx, hasZF, scheduler).flatMap(data -> {
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
return Observable.concat(Observable.from(fbClients).map(
//TODO : for the last one make sure to pass throwExc
fbClient -> getData(fbClients.indexOf(fbClient), fbClients.size(), fbClient, evcKey, tc, throwEx, throwExc, false, scheduler)
.doOnSuccess(fbData -> {
//increment("RETRY_" + ((fbData == null) ? "MISS" : "HIT"));
})
.toObservable()))
.firstOrDefault(null, fbData -> (fbData != null)).toSingle();
}
}
return Single.just(data);
}).map(data -> {
//increment("GetCall");
if (data != null) {
//increment("GetHit");
if (event != null) event.setAttribute("status", "THIT");
// touch all copies
try {
touchData(evcKey, timeToLive);
} catch (Exception e) {
throw sneakyThrow(new EVCacheException("Exception performing touch for APP " + _appName + ", key = " + evcKey, e));
}
if (log.isDebugEnabled() && shouldLog()) log.debug("GET_AND_TOUCH : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client .getServerGroup());
} else {
//increment("GetMiss");
if (event != null) event.setAttribute("status", "TMISS");
if (log.isInfoEnabled() && shouldLog()) log.info("GET_AND_TOUCH : APP " + _appName + " ; cache miss for key : " + evcKey);
}
if (event != null) endEvent(event);
return data;
}).onErrorReturn(ex -> {
if (ex instanceof net.spy.memcached.internal.CheckedOperationTimeoutException) {
if (event != null) {
event.setStatus(EVCacheMetricsFactory.TIMEOUT);
eventError(event, ex);
}
if (!throwExc) return null;
throw sneakyThrow(new EVCacheException("CheckedOperationTimeoutException executing getAndTouch APP " + _appName + ", key = " + evcKey
+ ".\nYou can set the following property to increase the timeout " + _appName + ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex));
} else {
if (event != null) {
event.setStatus(EVCacheMetricsFactory.ERROR);
eventError(event, ex);
}
if (event != null) eventError(event, ex);
if (!throwExc) return null;
throw sneakyThrow(new EVCacheException("Exception executing getAndTouch APP " + _appName + ", key = " + evcKey, ex));
}
}).doAfterTerminate(() -> {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.GET_AND_TOUCH.name(), EVCacheMetricsFactory.READ, null, EVCacheMetricsFactory.SUCCESS, 1, maxReadDuration.get().intValue(),client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("GET_AND_TOUCH : APP " + _appName + ", Took " + duration+ " milliSec.");
});
}
@Override
public <T> T getAndTouch(String key, int timeToLive, Transcoder<T> tc) throws EVCacheException {
if (null == key) throw new IllegalArgumentException("Key cannot be null");
checkTTL(timeToLive, Call.GET_AND_TOUCH);
final EVCacheKey evcKey = getEVCacheKey(key);
if (_useInMemoryCache.get()) {
final boolean throwExc = doThrowException();
T value = null;
try {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) _pool.getEVCacheClientForRead().getTranscoder() : (Transcoder<T>) _transcoder) : tc;
value = (T) getInMemoryCache(transcoder).get(evcKey);
} catch (ExecutionException e) {
if(throwExc) {
if(e.getCause() instanceof DataNotFoundException) {
return null;
}
if(e.getCause() instanceof EVCacheException) {
if (log.isDebugEnabled() && shouldLog()) log.debug("ExecutionException while getting data from InMemory Cache", e);
throw (EVCacheException)e.getCause();
}
throw new EVCacheException("ExecutionException", e);
}
}
if (value != null) {
try {
touchData(evcKey, timeToLive);
} catch (Exception e) {
if (throwExc) throw new EVCacheException("Exception executing getAndTouch APP " + _appName + ", key = " + evcKey, e);
}
return value;
}
}
if(ignoreTouch.get()) {
return doGet(evcKey, tc);
} else {
return doGetAndTouch(evcKey, timeToLive, tc);
}
}
<T> T doGetAndTouch(EVCacheKey evcKey, int timeToLive, Transcoder<T> tc) throws EVCacheException {
final boolean throwExc = doThrowException();
EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET_AND_TOUCH);
if (throwExc) throw new EVCacheException("Could not find a client to get and touch the data for App " + _appName);
return null; // Fast failure
}
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.GET_AND_TOUCH);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET_AND_TOUCH);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET_AND_TOUCH);
return null;
}
event.setTTL(timeToLive);
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String cacheOperation = EVCacheMetricsFactory.YES;
int tries = 1;
String status = EVCacheMetricsFactory.SUCCESS;
try {
final boolean hasZF = hasZoneFallback();
boolean throwEx = hasZF ? false : throwExc;
T data = getData(client, evcKey, tc, throwEx, hasZF);
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
for (int i = 0; i < fbClients.size(); i++) {
final EVCacheClient fbClient = fbClients.get(i);
if(i >= fbClients.size() - 1) throwEx = throwExc;
if (event != null) {
try {
if (shouldThrottle(event)) {
status = EVCacheMetricsFactory.THROTTLED;
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
status = EVCacheMetricsFactory.THROTTLED;
return null;
}
}
tries++;
data = getData(fbClient, evcKey, tc, throwEx, (i < fbClients.size() - 1) ? true : false);
if (log.isDebugEnabled() && shouldLog()) log.debug("GetAndTouch Retry for APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + fbClient.getServerGroup());
if (data != null) {
client = fbClient;
break;
}
}
}
if (data != null) {
if (event != null) event.setAttribute("status", "THIT");
// touch all copies
touchData(evcKey, timeToLive);
if (log.isDebugEnabled() && shouldLog()) log.debug("GET_AND_TOUCH : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup());
} else {
cacheOperation = EVCacheMetricsFactory.NO;
if (log.isInfoEnabled() && shouldLog()) log.info("GET_AND_TOUCH : APP " + _appName + " ; cache miss for key : " + evcKey);
if (event != null) event.setAttribute("status", "TMISS");
}
if (event != null) endEvent(event);
return data;
} catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) {
status = EVCacheMetricsFactory.TIMEOUT;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (log.isDebugEnabled() && shouldLog()) log.debug("CheckedOperationTimeoutException executing getAndTouch APP " + _appName + ", key : " + evcKey, ex);
if (!throwExc) return null;
throw new EVCacheException("CheckedOperationTimeoutException executing getAndTouch APP " + _appName + ", key = " + evcKey
+ ".\nYou can set the following property to increase the timeout " + _appName+ ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex);
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing getAndTouch APP " + _appName + ", key = " + evcKey, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("Exception executing getAndTouch APP " + _appName + ", key = " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.GET_AND_TOUCH.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("Took " + duration + " milliSec to get&Touch the value for APP " + _appName + ", key " + evcKey);
}
}
@Override
public Future<Boolean>[] touch(String key, int timeToLive) throws EVCacheException {
checkTTL(timeToLive, Call.TOUCH);
final EVCacheLatch latch = this.touch(key, timeToLive, null);
if (latch == null) return new EVCacheFuture[0];
final List<Future<Boolean>> futures = latch.getAllFutures();
if (futures == null || futures.isEmpty()) return new EVCacheFuture[0];
final EVCacheFuture[] eFutures = new EVCacheFuture[futures.size()];
for (int i = 0; i < futures.size(); i++) {
final Future<Boolean> future = futures.get(i);
if (future instanceof EVCacheFuture) {
eFutures[i] = (EVCacheFuture) future;
} else if (future instanceof EVCacheOperationFuture) {
final EVCacheOperationFuture<Boolean> evfuture = (EVCacheOperationFuture<Boolean>)future;
eFutures[i] = new EVCacheFuture(future, key, _appName, evfuture.getServerGroup(), evfuture.getEVCacheClient());
} else {
eFutures[i] = new EVCacheFuture(future, key, _appName, null);
}
}
return eFutures;
}
public <T> EVCacheLatch touch(String key, int timeToLive, Policy policy) throws EVCacheException {
if (null == key) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.TOUCH);
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.TOUCH);
if (throwExc) throw new EVCacheException("Could not find a client to set the data");
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.TOUCH);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.TOUCH);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.TOUCH);
return null;
}
startEvent(event);
}
String status = EVCacheMetricsFactory.SUCCESS;
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
try {
final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, clients.length - _pool.getWriteOnlyEVCacheClients().length, _appName);
touchData(evcKey, timeToLive, clients, latch);
if (event != null) {
event.setTTL(timeToLive);
if(_eventsUsingLatchFP.get()) {
latch.setEVCacheEvent(event);
latch.scheduledFutureValidation();
} else {
endEvent(event);
}
}
return latch;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception touching the data for APP " + _appName + ", key : " + evcKey, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName);
throw new EVCacheException("Exception setting data for APP " + _appName + ", key : " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTTLDistributionSummary(Call.TOUCH.name(), EVCacheMetricsFactory.WRITE, EVCacheMetricsFactory.TTL).record(timeToLive);
getTimer(Call.TOUCH.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("TOUCH : APP " + _appName + " for key : " + evcKey + " with timeToLive : " + timeToLive);
}
}
private void touchData(EVCacheKey evcKey, int timeToLive) throws Exception {
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
touchData(evcKey, timeToLive, clients);
}
private void touchData(EVCacheKey evcKey, int timeToLive, EVCacheClient[] clients) throws Exception {
touchData(evcKey, timeToLive, clients, null);
}
private void touchData(EVCacheKey evcKey, int timeToLive, EVCacheClient[] clients, EVCacheLatch latch ) throws Exception {
checkTTL(timeToLive, Call.TOUCH);
for (EVCacheClient client : clients) {
client.touch(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), timeToLive, latch);
}
}
public <T> Future<T> getAsynchronous(String key) throws EVCacheException {
return this.getAsynchronous(key, (Transcoder<T>) _transcoder);
};
@Override
public <T> Future<T> getAsynchronous(final String key, final Transcoder<T> tc) throws EVCacheException {
if (null == key) throw new IllegalArgumentException("Key is null.");
final boolean throwExc = doThrowException();
final EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.ASYNC_GET);
if (throwExc) throw new EVCacheException("Could not find a client to asynchronously get the data");
return null; // Fast failure
}
return getGetFuture(client, key, tc, throwExc);
}
private <T> Future<T> getGetFuture(final EVCacheClient client, final String key, final Transcoder<T> tc, final boolean throwExc) throws EVCacheException {
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.ASYNC_GET);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.ASYNC_GET);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.ASYNC_GET);
return null;
}
startEvent(event);
}
String status = EVCacheMetricsFactory.SUCCESS;
final Future<T> r;
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
try {
String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient());
if(hashKey != null) {
final Future<Object> objFuture = client.asyncGet(hashKey, evcacheValueTranscoder, throwExc, false);
r = new Future<T> () {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return objFuture.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return objFuture.isCancelled();
}
@Override
public boolean isDone() {
return objFuture.isDone();
}
@Override
public T get() throws InterruptedException, ExecutionException {
return getFromObj(objFuture.get());
}
private T getFromObj(Object obj) {
if(obj != null && obj instanceof EVCacheValue) {
final EVCacheValue val = (EVCacheValue)obj;
if(!val.getKey().equals(canonicalKey)) {
incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.ASYNC_GET.name(), EVCacheMetricsFactory.READ);
return null;
}
final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE);
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc;
return transcoder.decode(cd);
} else {
return null;
}
}
@Override
public T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
return getFromObj(objFuture.get(timeout, unit));
}
};
} else {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc;
r = client.asyncGet(canonicalKey, transcoder, throwExc, false);
}
if (event != null) endEvent(event);
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug( "Exception while getting data for keys Asynchronously APP " + _appName + ", key : " + key, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("Exception getting data for APP " + _appName + ", key : " + key, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.ASYNC_GET.name(), EVCacheMetricsFactory.READ, null, status, 1, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("Took " + duration + " milliSec to execute AsyncGet the value for APP " + _appName + ", key " + key);
}
return r;
}
private <T> CompletableFuture<Map<EVCacheKey, T>> getAsyncBulkData(EVCacheClient client,
EVCacheEvent event,
List<EVCacheKey> keys,
Transcoder<T> tc) {
if (event != null) {
if (shouldThrottle(event)) {
throw sneakyThrow(new EVCacheException("Request Throttled for app " + _appName + " & key " + keys));
}
}
return getAsyncBulkData(client, keys, tc);
}
private <T> CompletableFuture<Map<EVCacheKey, T>> getAsyncBulkData(EVCacheClient client,
List<EVCacheKey> evcacheKeys,
Transcoder<T> tc) {
KeyMapDto keyMapDto = buildKeyMap(client, evcacheKeys);
final Map<String, EVCacheKey> keyMap = keyMapDto.getKeyMap();
boolean hasHashedKey = keyMapDto.isKeyHashed();
if (hasHashedKey) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("fetching bulk data with hashedKey {} ",evcacheKeys);
}
return client.getAsyncBulk(keyMap.keySet(), evcacheValueTranscoder)
.thenApply(data -> buildHashedKeyValueResult(data, tc, client, keyMap))
.exceptionally(t -> handleBulkException(t, evcacheKeys));
} else {
final Transcoder<T> tcCopy;
if (tc == null && _transcoder != null) {
tcCopy = (Transcoder<T>) _transcoder;
} else {
tcCopy = tc;
}
if (log.isDebugEnabled() && shouldLog()) {
log.debug("fetching bulk data with non hashedKey {} ",keyMap.keySet());
}
return client.getAsyncBulk(keyMap.keySet(), tcCopy )
.thenApply(data -> buildNonHashedKeyValueResult(data, keyMap))
.exceptionally(t -> handleBulkException(t, evcacheKeys));
}
}
private <T> Map<EVCacheKey, T> handleBulkException(Throwable t, Collection<EVCacheKey> evCacheKeys) {
if (log.isDebugEnabled() && shouldLog())
log.debug("Exception while getBulk data for APP " + _appName + ", key : " + evCacheKeys, t);
throw Sneaky.sneakyThrow(t);
}
private KeyMapDto buildKeyMap(EVCacheClient client, Collection<EVCacheKey> evcacheKeys) {
boolean hasHashedKey = false;
final Map<String, EVCacheKey> keyMap = new HashMap<String, EVCacheKey>(evcacheKeys.size() * 2);
for (EVCacheKey evcKey : evcacheKeys) {
String key = evcKey.getCanonicalKey(client.isDuetClient());
String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
if (hashKey != null) {
if (log.isDebugEnabled() && shouldLog())
log.debug("APP " + _appName + ", key [" + key + "], has been hashed [" + hashKey + "]");
key = hashKey;
hasHashedKey = true;
}
keyMap.put(key, evcKey);
}
return new KeyMapDto(keyMap, hasHashedKey);
}
private <T> Map<EVCacheKey, T> buildNonHashedKeyValueResult(Map<String, T> objMap,
Map<String, EVCacheKey> keyMap) {
final Map<EVCacheKey, T> retMap = new HashMap<>((int) (objMap.size() / 0.75) + 1);
for (Map.Entry<String, T> i : objMap.entrySet()) {
final EVCacheKey evcKey = keyMap.get(i.getKey());
if (log.isDebugEnabled() && shouldLog())
log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
retMap.put(evcKey, i.getValue());
}
return retMap;
}
private <T> Map<EVCacheKey, T> buildHashedKeyValueResult(Map<String, Object> objMap,
Transcoder<T> tc,
EVCacheClient client,
Map<String, EVCacheKey> keyMap) {
final Map<EVCacheKey, T> retMap = new HashMap<>((int) (objMap.size() / 0.75) + 1);
for (Map.Entry<String, Object> i : objMap.entrySet()) {
final Object obj = i.getValue();
if (obj instanceof EVCacheValue) {
if (log.isDebugEnabled() && shouldLog())
log.debug("APP " + _appName + ", The value for key [" + i.getKey() + "] is EVCache Value");
final EVCacheValue val = (EVCacheValue) obj;
final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE);
final T tVal;
if (tc == null) {
tVal = (T) client.getTranscoder().decode(cd);
} else {
tVal = tc.decode(cd);
}
final EVCacheKey evcKey = keyMap.get(i.getKey());
if (evcKey.getCanonicalKey(client.isDuetClient()).equals(val.getKey())) {
if (log.isDebugEnabled() && shouldLog())
log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
retMap.put(evcKey, tVal);
} else {
if (log.isDebugEnabled() && shouldLog())
log.debug("CACHE COLLISION : APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.COMPLETABLE_FUTURE_GET_BULK.name(), EVCacheMetricsFactory.READ);
}
} else {
final EVCacheKey evcKey = keyMap.get(i.getKey());
if (log.isDebugEnabled() && shouldLog())
log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
retMap.put(evcKey, (T) obj);
}
}
return retMap;
}
private <T> Map<EVCacheKey, T> getBulkData(EVCacheClient client, Collection<EVCacheKey> evcacheKeys, Transcoder<T> tc, boolean throwException, boolean hasZF) throws Exception {
try {
boolean hasHashedKey = false;
final Map<String, EVCacheKey> keyMap = new HashMap<>(evcacheKeys.size() * 2);
for(EVCacheKey evcKey : evcacheKeys) {
String key = evcKey.getCanonicalKey(client.isDuetClient());
String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
if(hashKey != null) {
if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + key + "], has been hashed [" + hashKey + "]");
key = hashKey;
hasHashedKey = true;
}
keyMap.put(key, evcKey);
}
if(hasHashedKey) {
final Map<String, Object> objMap = client.getBulk(keyMap.keySet(), evcacheValueTranscoder, throwException, hasZF);
final Map<EVCacheKey, T> retMap = new HashMap<>((int) (objMap.size() / 0.75) + 1);
for (Map.Entry<String, Object> i : objMap.entrySet()) {
final Object obj = i.getValue();
if(obj instanceof EVCacheValue) {
if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", The value for key [" + i.getKey() + "] is EVCache Value");
final EVCacheValue val = (EVCacheValue)obj;
final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE);
final T tVal;
if(tc == null) {
tVal = (T)client.getTranscoder().decode(cd);
} else {
tVal = tc.decode(cd);
}
final EVCacheKey evcKey = keyMap.get(i.getKey());
if(evcKey.getCanonicalKey(client.isDuetClient()).equals(val.getKey())) {
if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
retMap.put(evcKey, tVal);
} else {
if (log.isDebugEnabled() && shouldLog()) log.debug("CACHE COLLISION : APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.BULK.name(), EVCacheMetricsFactory.READ);
}
} else {
final EVCacheKey evcKey = keyMap.get(i.getKey());
if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
retMap.put(evcKey, (T)obj);
}
}
return retMap;
} else {
if(tc == null && _transcoder != null) tc = (Transcoder<T>)_transcoder;
final Map<String, T> objMap = client.getBulk(keyMap.keySet(), tc, throwException, hasZF);
final Map<EVCacheKey, T> retMap = new HashMap<EVCacheKey, T>((int)(objMap.size()/0.75) + 1);
for (Map.Entry<String, T> i : objMap.entrySet()) {
final EVCacheKey evcKey = keyMap.get(i.getKey());
if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
retMap.put(evcKey, i.getValue());
}
return retMap;
}
} catch (Exception ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getBulk data for APP " + _appName + ", key : " + evcacheKeys, ex);
if (!throwException || hasZF) return null;
throw ex;
}
}
public <T> Map<String, T> getBulk(Collection<String> keys, Transcoder<T> tc) throws EVCacheException {
return getBulk(keys, tc, false, 0);
}
public <T> Map<String, T> getBulkAndTouch(Collection<String> keys, Transcoder<T> tc, int timeToLive)
throws EVCacheException {
return getBulk(keys, tc, true, timeToLive);
}
private <T> Map<String, T> getBulk(final Collection<String> keys, Transcoder<T> tc, boolean touch, int timeToLive) throws EVCacheException {
if (null == keys) throw new IllegalArgumentException();
if (keys.isEmpty()) return Collections.<String, T> emptyMap();
checkTTL(timeToLive, Call.BULK);
final boolean throwExc = doThrowException();
final EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.BULK);
if (throwExc) throw new EVCacheException("Could not find a client to get the data in bulk");
return Collections.<String, T> emptyMap();// Fast failure
}
final Map<String, T> decanonicalR = new HashMap<String, T>((keys.size() * 4) / 3 + 1);
final Collection<EVCacheKey> evcKeys = new ArrayList<EVCacheKey>();
/* Canonicalize keys and perform fast failure checking */
for (String k : keys) {
final EVCacheKey evcKey = getEVCacheKey(k);
T value = null;
if (_useInMemoryCache.get()) {
try {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) _pool.getEVCacheClientForRead().getTranscoder() : (Transcoder<T>) _transcoder) : tc;
value = (T) getInMemoryCache(transcoder).get(evcKey);
if(value == null) if (log.isInfoEnabled() && shouldLog()) log.info("Value not_found in inmemory cache for APP " + _appName + ", key : " + evcKey + "; value : " + value );
} catch (ExecutionException e) {
if (log.isDebugEnabled() && shouldLog()) log.debug("ExecutionException while getting data from InMemory Cache", e);
throw new EVCacheException("ExecutionException", e);
}
}
if(value == null) {
evcKeys.add(evcKey);
} else {
decanonicalR.put(evcKey.getKey(), value);
if (log.isDebugEnabled() && shouldLog()) log.debug("Value retrieved from inmemory cache for APP " + _appName + ", key : " + evcKey + (log.isTraceEnabled() ? "; value : " + value : ""));
}
}
if(evcKeys.size() == 0 && decanonicalR.size() == keys.size()) {
if (log.isDebugEnabled() && shouldLog()) log.debug("All Values retrieved from inmemory cache for APP " + _appName + ", keys : " + keys + (log.isTraceEnabled() ? "; value : " + decanonicalR : ""));
return decanonicalR;
}
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.BULK);
if (event != null) {
event.setEVCacheKeys(evcKeys);
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.BULK);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & keys " + keys);
return Collections.<String, T> emptyMap();
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.BULK);
return null;
}
event.setTTL(timeToLive);
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String cacheOperation = EVCacheMetricsFactory.YES;
int tries = 1;
String status = EVCacheMetricsFactory.SUCCESS;
try {
final boolean hasZF = hasZoneFallbackForBulk();
boolean throwEx = hasZF ? false : throwExc;
Map<EVCacheKey, T> retMap = getBulkData(client, evcKeys, tc, throwEx, hasZF);
List<EVCacheClient> fbClients = null;
if (hasZF) {
if (retMap == null || retMap.isEmpty()) {
fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
for (int i = 0; i < fbClients.size(); i++) {
final EVCacheClient fbClient = fbClients.get(i);
if(i >= fbClients.size() - 1) throwEx = throwExc;
if (event != null) {
try {
if (shouldThrottle(event)) {
status = EVCacheMetricsFactory.THROTTLED;
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKeys);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
status = EVCacheMetricsFactory.THROTTLED;
return null;
}
}
tries++;
retMap = getBulkData(fbClient, evcKeys, tc, throwEx, (i < fbClients.size() - 1) ? true : false);
if (log.isDebugEnabled() && shouldLog()) log.debug("Fallback for APP " + _appName + ", key [" + evcKeys + (log.isTraceEnabled() ? "], Value [" + retMap : "") + "], zone : " + fbClient.getZone());
if (retMap != null && !retMap.isEmpty()) break;
}
//increment("BULK-FULL_RETRY-" + ((retMap == null || retMap.isEmpty()) ? "MISS" : "HIT"));
}
} else if (retMap != null && keys.size() > retMap.size() && _bulkPartialZoneFallbackFP.get()) {
final int initRetrySize = keys.size() - retMap.size();
List<EVCacheKey> retryEVCacheKeys = new ArrayList<EVCacheKey>(initRetrySize);
for (Iterator<EVCacheKey> keysItr = evcKeys.iterator(); keysItr.hasNext();) {
final EVCacheKey key = keysItr.next();
if (!retMap.containsKey(key)) {
retryEVCacheKeys.add(key);
}
}
fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
for (int ind = 0; ind < fbClients.size(); ind++) {
final EVCacheClient fbClient = fbClients.get(ind);
if (event != null) {
try {
if (shouldThrottle(event)) {
status = EVCacheMetricsFactory.THROTTLED;
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & keys " + retryEVCacheKeys);
return null;
}
} catch(EVCacheException ex) {
status = EVCacheMetricsFactory.THROTTLED;
if(throwExc) throw ex;
return null;
}
}
tries++;
final Map<EVCacheKey, T> fbRetMap = getBulkData(fbClient, retryEVCacheKeys, tc, false, hasZF);
if (log.isDebugEnabled() && shouldLog()) log.debug("Fallback for APP " + _appName + ", key [" + retryEVCacheKeys + "], Fallback Server Group : " + fbClient .getServerGroup().getName());
for (Map.Entry<EVCacheKey, T> i : fbRetMap.entrySet()) {
retMap.put(i.getKey(), i.getValue());
if (log.isDebugEnabled() && shouldLog()) log.debug("Fallback for APP " + _appName + ", key [" + i.getKey() + (log.isTraceEnabled() ? "], Value [" + i.getValue(): "]"));
}
if (retryEVCacheKeys.size() == fbRetMap.size()) break;
if (ind < fbClients.size()) {
retryEVCacheKeys = new ArrayList<EVCacheKey>(keys.size() - retMap.size());
for (Iterator<EVCacheKey> keysItr = evcKeys.iterator(); keysItr.hasNext();) {
final EVCacheKey key = keysItr.next();
if (!retMap.containsKey(key)) {
retryEVCacheKeys.add(key);
}
}
}
}
}
if (log.isDebugEnabled() && shouldLog() && retMap.size() == keys.size()) log.debug("Fallback SUCCESS for APP " + _appName + ", retMap [" + retMap + "]");
}
}
if(decanonicalR.isEmpty()) {
if (retMap == null || retMap.isEmpty()) {
if (log.isInfoEnabled() && shouldLog()) log.info("BULK : APP " + _appName + " ; Full cache miss for keys : " + keys);
if (event != null) event.setAttribute("status", "BMISS_ALL");
final Map<String, T> returnMap = new HashMap<String, T>();
if (retMap != null && retMap.isEmpty()) {
for (String k : keys) {
returnMap.put(k, null);
}
}
//increment("BulkMissFull");
cacheOperation = EVCacheMetricsFactory.NO;
/* If both Retry and first request fail Exit Immediately. */
if (event != null) endEvent(event);
return returnMap;
}
}
/* Decanonicalize the keys */
boolean partialHit = false;
final List<String> decanonicalHitKeys = new ArrayList<String>(retMap.size());
for (Iterator<EVCacheKey> itr = evcKeys.iterator(); itr.hasNext();) {
final EVCacheKey key = itr.next();
final String deCanKey = key.getKey();
final T value = retMap.get(key);
if (value != null) {
decanonicalR.put(deCanKey, value);
if (touch) touchData(key, timeToLive);
decanonicalHitKeys.add(deCanKey);
} else {
partialHit = true;
// this ensures the fallback was tried
decanonicalR.put(deCanKey, null);
}
}
if (!decanonicalR.isEmpty()) {
if (!partialHit) {
if (event != null) event.setAttribute("status", "BHIT");
} else {
if (event != null) {
event.setAttribute("status", "BHIT_PARTIAL");
event.setAttribute("BHIT_PARTIAL_KEYS", decanonicalHitKeys);
}
//increment("BulkHitPartial");
cacheOperation = EVCacheMetricsFactory.PARTIAL;
if (log.isInfoEnabled() && shouldLog()) log.info("BULK_HIT_PARTIAL for APP " + _appName + ", keys in cache [" + decanonicalR + "], all keys [" + keys + "]");
}
}
if (log.isDebugEnabled() && shouldLog()) log.debug("BulkGet; APP " + _appName + ", keys : " + keys + (log.isTraceEnabled() ? "; value : " + decanonicalR : ""));
if (event != null) endEvent(event);
return decanonicalR;
} catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) {
status = EVCacheMetricsFactory.TIMEOUT;
if (log.isDebugEnabled() && shouldLog()) log.debug("CheckedOperationTimeoutException getting bulk data for APP " + _appName + ", keys : " + evcKeys, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("CheckedOperationTimeoutException getting bulk data for APP " + _appName + ", keys = " + evcKeys
+ ".\nYou can set the following property to increase the timeout " + _appName + ".EVCacheClientPool.bulkReadTimeout=<timeout in milli-seconds>", ex);
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception getting bulk data for APP " + _appName + ", keys = " + evcKeys, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("Exception getting bulk data for APP " + _appName + ", keys = " + evcKeys, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
if(bulkKeysSize == null) {
final List<Tag> tagList = new ArrayList<Tag>(4);
tagList.addAll(tags);
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, EVCacheMetricsFactory.BULK_OPERATION));
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, EVCacheMetricsFactory.READ));
// if(status != null) tagList.add(new BasicTag(EVCacheMetricsFactory.STATUS, status));
// if(tries >= 0) tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, String.valueOf(tries)));
bulkKeysSize = EVCacheMetricsFactory.getInstance().getDistributionSummary(EVCacheMetricsFactory.OVERALL_KEYS_SIZE, tagList);
}
bulkKeysSize.record(keys.size());
getTimer(Call.BULK.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("BULK : APP " + _appName + " Took " + duration + " milliSec to get the value for key " + evcKeys);
}
}
private <T> CompletableFuture<EVCacheBulkDataDto<T>> handleBulkInMemory(Collection<String> keys, Transcoder<T> tc) {
if (log.isDebugEnabled() && shouldLog()) log.debug("handleBulkInMemory with keys {} " + keys);
final Map<String, T> decanonicalR = new HashMap<>((keys.size() * 4) / 3 + 1);
final List<EVCacheKey> evcKeys = new ArrayList<>();
CompletableFuture<EVCacheBulkDataDto<T>> promise = new CompletableFuture<>();
try {
EVCacheBulkDataDto<T> data = handleBulkInMemory(keys, tc, decanonicalR, evcKeys);
promise.complete(data);
} catch (Exception e) {
promise.completeExceptionally(e);
}
return promise;
}
private <T> EVCacheBulkDataDto<T> handleBulkInMemory(Collection<String> keys,
Transcoder<T> tc,
Map<String, T> decanonicalR,
List<EVCacheKey> evcKeys) throws Exception {
for (String k : keys) {
final EVCacheKey evcKey = getEVCacheKey(k);
T value = getInMemory(evcKey, tc);
if (value != null) {
decanonicalR.put(evcKey.getKey(), value);
if (log.isDebugEnabled() && shouldLog())
log.debug("Value retrieved from inmemory cache for APP " + _appName + ", key : "
+ evcKey + (log.isTraceEnabled() ? "; value : " + value : ""));
} else {
if (log.isDebugEnabled() && shouldLog()) log.debug("Key not present in in memory {} " + k);
evcKeys.add(evcKey);
}
}
return new EVCacheBulkDataDto<>(decanonicalR, evcKeys);
}
public <T> CompletableFuture<Map<String, T>> getAsyncBulk(String... keys) {
return this.getAsyncBulk(Arrays.asList(keys), (Transcoder<T>) _transcoder);
}
public <T> CompletableFuture<Map<String, T>> getAsyncBulk(final Collection<String> keys, Transcoder<T> tc) {
if (null == keys) throw new IllegalArgumentException();
if (keys.isEmpty()) return CompletableFuture.completedFuture(Collections.emptyMap());
return handleBulkInMemory(keys, tc)
.thenCompose(dto -> doAsyncGetBulk(keys, tc, dto));
}
private <T> CompletableFuture<Map<String, T>> doAsyncGetBulk(Collection<String> keys,
Transcoder<T> tc,
EVCacheBulkDataDto<T> dto) {
// all keys handled by in memory
if(dto.getEvcKeys().size() == 0 && dto.getDecanonicalR().size() == keys.size()) {
if (log.isDebugEnabled() && shouldLog()) log.debug("All Values retrieved from in-memory cache for APP " + _appName + ", keys : " + keys);
return CompletableFuture.completedFuture(dto.getDecanonicalR());
}
final boolean throwExc = doThrowException();
CompletableFuture<Map<String, T>> errorFuture = new CompletableFuture<>();
EVCacheClient client = buildEvCacheClient(throwExc, Call.COMPLETABLE_FUTURE_GET_BULK, errorFuture);
if (errorFuture.isCompletedExceptionally() || client == null) {
if (client == null ) {
if (log.isDebugEnabled() && shouldLog()) log.debug("doAsyncGetBulk is null");
errorFuture.complete(null);
}
return errorFuture;
}
if (log.isDebugEnabled() && shouldLog()) log.debug("Completed Building the client for doAsyncGetBulk");
//Building the start event
EVCacheEvent event = buildAndStartEvent(client, dto.getEvcKeys(), throwExc, errorFuture, Call.COMPLETABLE_FUTURE_GET_BULK);
if (errorFuture.isCompletedExceptionally()) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Error while building and starting the event for doAsyncGetBulk");
return errorFuture;
}
if (log.isDebugEnabled() && shouldLog()) log.debug("Cancelling the error future");
errorFuture.cancel(false);
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
StringBuilder status = new StringBuilder(EVCacheMetricsFactory.SUCCESS);
StringBuilder cacheOperation = new StringBuilder(EVCacheMetricsFactory.YES);
final boolean hasZF = hasZoneFallbackForBulk();
RetryCount retryCount = new RetryCount();
boolean throwEx = !hasZF && throwExc;
return getAsyncBulkData(client, dto.getEvcKeys(), tc)
.thenCompose(data -> handleBulkRetry(data, dto.getEvcKeys(), tc, client, event, hasZF, retryCount))
.handle((data, ex) -> {
if (ex != null) {
handleFullCacheMiss(data, event, keys, cacheOperation);
handleException(ex, event);
if (throwEx) {
throw new RuntimeException(ex);
} else {
return null;
}
} else {
Map<String, T> result = handleBulkData(dto.getDecanonicalR(),
data,
event,
keys,
dto.getEvcKeys(),
cacheOperation);
handleBulkFinally(status, retryCount, client, cacheOperation, keys, start);
return result;
}
});
}
private <T> Map<String, T> handleBulkData(Map<String, T> decanonicalR,
Map<EVCacheKey, T> retMap,
EVCacheEvent event,
Collection<String> keys,
List<EVCacheKey> evcKeys,
StringBuilder cacheOperation) {
if(retMap == null || retMap.isEmpty()) {
return handleFullCacheMiss(retMap, event, keys, cacheOperation);
}
boolean partialHit = false;
final List<String> decanonicalHitKeys = new ArrayList<>(retMap.size());
for (final EVCacheKey key : evcKeys) {
final String deCanKey = key.getKey();
final T value = retMap.get(key);
if (value != null) {
decanonicalR.put(deCanKey, value);
decanonicalHitKeys.add(deCanKey);
} else {
partialHit = true;
// this ensures the fallback was tried
decanonicalR.put(deCanKey, null);
}
}
if (!decanonicalR.isEmpty()) {
updateBulkGetEvent(decanonicalR, event, keys, partialHit, decanonicalHitKeys, cacheOperation);
}
if (log.isDebugEnabled() && shouldLog()) log.debug("Async BulkGet; APP " + _appName + ", keys : " + keys + (log.isTraceEnabled() ? "; value : " + decanonicalR : ""));
if (event != null) endEvent(event);
return decanonicalR;
}
private <T> void updateBulkGetEvent(Map<String, T> decanonicalR,
EVCacheEvent event,
Collection<String> keys,
boolean partialHit,
List<String> decanonicalHitKeys,
StringBuilder cacheOperation) {
if (!partialHit) {
if (event != null) event.setAttribute("status", "ASYNC_BHIT");
} else {
if (event != null) {
event.setAttribute("status", "ASYNC_BHIT_PARTIAL");
event.setAttribute("ASYNC_BHIT_PARTIAL_KEYS", decanonicalHitKeys);
}
cacheOperation.replace(0, cacheOperation.length(), EVCacheMetricsFactory.PARTIAL);
if (log.isInfoEnabled() && shouldLog())
log.info("ASYNC_BULK_HIT_PARTIAL for APP " + _appName + ", keys in cache [" + decanonicalR + "], all keys [" + keys + "]");
}
}
private <T> Map<String, T> handleFullCacheMiss(Map<EVCacheKey, T> retMap,
EVCacheEvent event,
Collection<String> keys,
StringBuilder cacheOperation) {
if (log.isInfoEnabled() && shouldLog())
log.info("ASYNC BULK : APP " + _appName + " ; Full cache miss for keys : " + keys);
if (event != null) event.setAttribute("status", "ASYNC_BMISS_ALL");
final Map<String, T> returnMap = new HashMap<>();
if (retMap != null && retMap.isEmpty()) {
for (String k : keys) {
returnMap.put(k, null);
}
}
cacheOperation.replace(0, cacheOperation.length(), EVCacheMetricsFactory.NO);
if (event != null) endEvent(event);
return returnMap;
}
private <T> CompletableFuture<Map<EVCacheKey, T>> handleFullRetry(EVCacheClient client,
EVCacheEvent event,
List<EVCacheKey> evcKeys,
Transcoder<T> tc,
RetryCount retryCount) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (log.isInfoEnabled() && shouldLog()) {
log.info("Fetching the clients for retry {}", fbClients);
}
return handleFullBulkRetries(fbClients, 0, event, evcKeys, tc, retryCount);
}
private <T> CompletableFuture<Map<EVCacheKey, T>> handleFullBulkRetries(List<EVCacheClient> fbClients,
int fbClientIndex,
EVCacheEvent event,
List<EVCacheKey> evcKeys,
Transcoder<T> tc,
RetryCount retryCount) {
if (fbClientIndex >= fbClients.size()) {
if (log.isInfoEnabled() && shouldLog()) {
log.debug("Clients exhausted so returning the future with null result for keys {}", evcKeys);
}
return CompletableFuture.completedFuture(null);
}
if (log.isInfoEnabled() && shouldLog()) {
EVCacheClient evCacheClient = fbClients.get(fbClientIndex);
log.debug("Trying to fetching the data from server group {} client {} and keys {}", evCacheClient.getServerGroupName(), evCacheClient.getId(), evcKeys);
}
CompletableFuture<Map<EVCacheKey, T>> future = getAsyncBulkData(fbClients.get(fbClientIndex), event, evcKeys, tc);
int nextIndex = fbClientIndex + 1;
retryCount.incr();
return future
.thenApply(s -> s != null ?
CompletableFuture.completedFuture(s) :
handleFullBulkRetries(fbClients, nextIndex, event, evcKeys, tc, retryCount))
.exceptionally(t -> handleFullBulkRetries(fbClients, nextIndex, event, evcKeys, tc, retryCount))
.thenCompose(Function.identity());
}
private <T> CompletableFuture<Map<EVCacheKey, T>> handleBulkRetry(Map<EVCacheKey, T> retMap,
List<EVCacheKey> evcKeys,
Transcoder<T> tc,
EVCacheClient client,
EVCacheEvent event,
boolean hasZF,
RetryCount retryCount) {
if (log.isInfoEnabled() && shouldLog()) {
log.debug("handling Bulk retry with keys {}", evcKeys);
}
if (hasZF && (retMap == null || retMap.isEmpty())) {
if (log.isInfoEnabled() && shouldLog()) {
log.debug("Return map is null or empty for going for a full retry {} ", evcKeys);
}
return handleFullRetry(client, event, evcKeys, tc, retryCount);
}
if (log.isInfoEnabled() && shouldLog()) {
log.debug("Async does not yet support partial retry for bulk. So completing the future or keys {}", evcKeys);
}
return CompletableFuture.completedFuture(retMap);
}
public <T> Map<String, T> getBulk(Collection<String> keys) throws EVCacheException {
return (this.getBulk(keys, (Transcoder<T>) _transcoder));
}
public <T> Map<String, T> getBulk(String... keys) throws EVCacheException {
return (this.getBulk(Arrays.asList(keys), (Transcoder<T>) _transcoder));
}
public <T> Map<String, T> getBulk(Transcoder<T> tc, String... keys) throws EVCacheException {
return (this.getBulk(Arrays.asList(keys), tc));
}
@Override
public <T> EVCacheFuture[] set(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException {
final EVCacheLatch latch = this.set(key, value, tc, timeToLive, null);
if (latch == null) return new EVCacheFuture[0];
final List<Future<Boolean>> futures = latch.getAllFutures();
if (futures == null || futures.isEmpty()) return new EVCacheFuture[0];
final EVCacheFuture[] eFutures = new EVCacheFuture[futures.size()];
for (int i = 0; i < futures.size(); i++) {
final Future<Boolean> future = futures.get(i);
if (future instanceof EVCacheFuture) {
eFutures[i] = (EVCacheFuture) future;
} else if (future instanceof EVCacheOperationFuture) {
eFutures[i] = new EVCacheFuture(futures.get(i), key, _appName, ((EVCacheOperationFuture<T>) futures.get(i)).getServerGroup());
} else {
eFutures[i] = new EVCacheFuture(future, key, _appName, null);
}
}
return eFutures;
}
public <T> EVCacheLatch set(String key, T value, Policy policy) throws EVCacheException {
return set(key, value, (Transcoder<T>)_transcoder, _timeToLive, policy);
}
public <T> EVCacheLatch set(String key, T value, int timeToLive, Policy policy) throws EVCacheException {
return set(key, value, (Transcoder<T>)_transcoder, timeToLive, policy);
}
public <T> EVCacheLatch set(String key, T value, Transcoder<T> tc, EVCacheLatch.Policy policy) throws EVCacheException {
return set(key, value, tc, _timeToLive, policy);
}
public <T> EVCacheLatch set(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException {
EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
return this.set(key, value, tc, timeToLive, policy, clients, clients.length - _pool.getWriteOnlyEVCacheClients().length);
}
protected <T> EVCacheLatch set(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy, EVCacheClient[] clients, int latchCount) throws EVCacheException {
if ((null == key) || (null == value)) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.SET);
final boolean throwExc = doThrowException();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.SET);
if (throwExc) throw new EVCacheException("Could not find a client to set the data");
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.SET);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.SET);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheLatchImpl(policy, 0, _appName);
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.SET);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, latchCount, _appName);
try {
CachedData cd = null;
CachedData cdHashed = null;
for (EVCacheClient client : clients) {
final String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient());
final String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
if(cd == null) {
if (tc != null) {
cd = tc.encode(value);
} else if (_transcoder != null) {
cd = ((Transcoder<Object>) _transcoder).encode(value);
} else {
cd = client.getTranscoder().encode(value);
}
}
if (hashKey != null) {
if(cdHashed == null) {
final EVCacheValue val = new EVCacheValue(canonicalKey, cd.getData(), cd.getFlags(), timeToLive, System.currentTimeMillis());
cdHashed = evcacheValueTranscoder.encode(val);
}
final Future<Boolean> future = client.set(hashKey, cdHashed, timeToLive, latch);
if (log.isDebugEnabled() && shouldLog()) log.debug("SET : APP " + _appName + ", Future " + future + " for hashed key : " + evcKey);
} else {
final Future<Boolean> future = client.set(canonicalKey, cd, timeToLive, latch);
if (log.isDebugEnabled() && shouldLog()) log.debug("SET : APP " + _appName + ", Future " + future + " for key : " + evcKey);
}
}
if (event != null) {
event.setTTL(timeToLive);
event.setCachedData(cd);
if(_eventsUsingLatchFP.get()) {
latch.setEVCacheEvent(event);
latch.scheduledFutureValidation();
} else {
endEvent(event);
}
}
return latch;
} catch (Exception ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception setting the data for APP " + _appName + ", key : " + evcKey, ex);
if (event != null) endEvent(event);
status = EVCacheMetricsFactory.ERROR;
if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName);
throw new EVCacheException("Exception setting data for APP " + _appName + ", key : " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTTLDistributionSummary(Call.SET.name(), EVCacheMetricsFactory.WRITE, EVCacheMetricsFactory.TTL).record(timeToLive);
getTimer(Call.SET.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("SET : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey);
}
}
public <T> EVCacheFuture[] append(String key, T value, int timeToLive) throws EVCacheException {
return this.append(key, value, null, timeToLive);
}
public <T> EVCacheFuture[] append(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException {
if ((null == key) || (null == value)) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.APPEND);
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.APPEND);
if (throwExc) throw new EVCacheException("Could not find a client to set the data");
return new EVCacheFuture[0]; // Fast failure
}
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.APPEND);
final EVCacheKey evcKey = getEVCacheKey(key);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.APPEND);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheFuture[0];
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.APPEND);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
try {
final EVCacheFuture[] futures = new EVCacheFuture[clients.length];
CachedData cd = null;
int index = 0;
for (EVCacheClient client : clients) {
// ensure key hashing is not enabled
if (evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) {
throw new IllegalArgumentException("append is not supported when key hashing is enabled.");
}
if (cd == null) {
if (tc != null) {
cd = tc.encode(value);
} else if ( _transcoder != null) {
cd = ((Transcoder<Object>)_transcoder).encode(value);
} else {
cd = client.getTranscoder().encode(value);
}
//if (cd != null) EVCacheMetricsFactory.getInstance().getDistributionSummary(_appName + "-AppendData-Size", tags).record(cd.getData().length);
}
final Future<Boolean> future = client.append(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), cd);
futures[index++] = new EVCacheFuture(future, key, _appName, client.getServerGroup());
}
if (event != null) {
event.setCachedData(cd);
event.setTTL(timeToLive);
endEvent(event);
}
touchData(evcKey, timeToLive, clients);
return futures;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception setting the data for APP " + _appName + ", key : " + evcKey, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return new EVCacheFuture[0];
throw new EVCacheException("Exception setting data for APP " + _appName + ", key : " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
//timer.record(duration, TimeUnit.MILLISECONDS);
getTimer(Call.APPEND.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("APPEND : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey);
}
}
public <T> EVCacheFuture[] set(String key, T value, Transcoder<T> tc) throws EVCacheException {
return this.set(key, value, tc, _timeToLive);
}
public <T> EVCacheFuture[] set(String key, T value, int timeToLive) throws EVCacheException {
return this.set(key, value, (Transcoder<T>) _transcoder, timeToLive);
}
public <T> EVCacheFuture[] set(String key, T value) throws EVCacheException {
return this.set(key, value, (Transcoder<T>) _transcoder, _timeToLive);
}
public EVCacheFuture[] delete(String key) throws EVCacheException {
return this.deleteInternal(key, false);
}
protected EVCacheFuture[] deleteInternal(String key, boolean isOriginalKeyHashed) throws EVCacheException {
final EVCacheLatch latch = this.deleteInternal(key, null, isOriginalKeyHashed);
if (latch == null) return new EVCacheFuture[0];
final List<Future<Boolean>> futures = latch.getAllFutures();
if (futures == null || futures.isEmpty()) return new EVCacheFuture[0];
final EVCacheFuture[] eFutures = new EVCacheFuture[futures.size()];
for (int i = 0; i < futures.size(); i++) {
final Future<Boolean> future = futures.get(i);
if (future instanceof EVCacheFuture) {
eFutures[i] = (EVCacheFuture) future;
} else if (future instanceof EVCacheOperationFuture) {
final EVCacheOperationFuture<Boolean> evfuture = (EVCacheOperationFuture<Boolean>)future;
eFutures[i] = new EVCacheFuture(future, key, _appName, evfuture.getServerGroup(), evfuture.getEVCacheClient());
} else {
eFutures[i] = new EVCacheFuture(future, key, _appName, null);
}
}
return eFutures;
}
@Override
public <T> EVCacheLatch delete(String key, Policy policy) throws EVCacheException {
return this.deleteInternal(key, policy, false);
}
protected <T> EVCacheLatch deleteInternal(String key, Policy policy, boolean isOriginalKeyHashed) throws EVCacheException {
if (key == null) throw new IllegalArgumentException("Key cannot be null");
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.DELETE);
if (throwExc) throw new EVCacheException("Could not find a client to delete the keyAPP " + _appName
+ ", Key " + key);
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.DELETE);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.DELETE);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.DELETE);
return null;
}
startEvent(event);
}
String status = EVCacheMetricsFactory.SUCCESS;
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, clients.length - _pool.getWriteOnlyEVCacheClients().length, _appName);
try {
for (int i = 0; i < clients.length; i++) {
Future<Boolean> future = clients[i].delete(isOriginalKeyHashed ? evcKey.getKey() : evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), latch);
if (log.isDebugEnabled() && shouldLog()) log.debug("DELETE : APP " + _appName + ", Future " + future + " for key : " + evcKey);
}
if (event != null) {
if(_eventsUsingLatchFP.get()) {
latch.setEVCacheEvent(event);
latch.scheduledFutureValidation();
} else {
endEvent(event);
}
}
return latch;
} catch (Exception ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while deleting the data for APP " + _appName + ", key : " + key, ex);
status = EVCacheMetricsFactory.ERROR;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName);
throw new EVCacheException("Exception while deleting the data for APP " + _appName + ", key : " + key, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.DELETE.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
//timer.record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("DELETE : APP " + _appName + " Took " + duration + " milliSec for key : " + key);
}
}
public int getDefaultTTL() {
return _timeToLive;
}
public long incr(String key, long by, long defaultVal, int timeToLive) throws EVCacheException {
if ((null == key) || by < 0 || defaultVal < 0 || timeToLive < 0) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.INCR);
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.INCR);
if (log.isDebugEnabled() && shouldLog()) log.debug("INCR : " + _metricPrefix + ":NULL_CLIENT");
if (throwExc) throw new EVCacheException("Could not find a client to incr the data");
return -1;
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.INCR);
if (event != null) {
event.setTTL(timeToLive);
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.INCR);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return -1;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.INCR);
return -1;
}
startEvent(event);
}
String status = EVCacheMetricsFactory.SUCCESS;
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
long currentValue = -1;
try {
final long[] vals = new long[clients.length];
int index = 0;
for (EVCacheClient client : clients) {
vals[index] = client.incr(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), by, defaultVal, timeToLive);
if (vals[index] != -1 && currentValue < vals[index]) {
currentValue = vals[index];
if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + " current value = " + currentValue + " for key : " + key + " from client : " + client);
}
index++;
}
if (currentValue != -1) {
CachedData cd = null;
if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + " current value = " + currentValue + " for key : " + key);
for (int i = 0; i < vals.length; i++) {
if (vals[i] == -1 && currentValue > -1) {
if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + "; Zone " + clients[i].getZone()
+ " had a value = -1 so setting it to current value = " + currentValue + " for key : " + key);
clients[i].incr(evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), 0, currentValue, timeToLive);
} else if (vals[i] != currentValue) {
if(cd == null) cd = clients[i].getTranscoder().encode(String.valueOf(currentValue));
if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + "; Zone " + clients[i].getZone()
+ " had a value of " + vals[i] + " so setting it to current value = " + currentValue + " for key : " + key);
clients[i].set(evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), cd, timeToLive);
}
}
}
if (event != null) endEvent(event);
if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + " returning value = " + currentValue + " for key : " + key);
return currentValue;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception incrementing the value for APP " + _appName + ", key : " + key, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return -1;
throw new EVCacheException("Exception incrementing value for APP " + _appName + ", key : " + key, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.INCR.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("INCR : APP " + _appName + ", Took " + duration
+ " milliSec for key : " + key + " with value as " + currentValue);
}
}
public long decr(String key, long by, long defaultVal, int timeToLive) throws EVCacheException {
if ((null == key) || by < 0 || defaultVal < 0 || timeToLive < 0) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.DECR);
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.DECR);
if (log.isDebugEnabled() && shouldLog()) log.debug("DECR : " + _metricPrefix + ":NULL_CLIENT");
if (throwExc) throw new EVCacheException("Could not find a client to decr the data");
return -1;
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.DECR);
if (event != null) {
event.setTTL(timeToLive);
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.DECR);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return -1;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.DECR);
return -1;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
long currentValue = -1;
try {
final long[] vals = new long[clients.length];
int index = 0;
for (EVCacheClient client : clients) {
vals[index] = client.decr(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), by, defaultVal, timeToLive);
if (vals[index] != -1 && currentValue < vals[index]) {
currentValue = vals[index];
if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + " current value = " + currentValue + " for key : " + key + " from client : " + client);
}
index++;
}
if (currentValue != -1) {
CachedData cd = null;
if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + " current value = " + currentValue
+ " for key : " + key);
for (int i = 0; i < vals.length; i++) {
if (vals[i] == -1 && currentValue > -1) {
if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + "; Zone " + clients[i].getZone()
+ " had a value = -1 so setting it to current value = "
+ currentValue + " for key : " + key);
clients[i].decr(evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), 0, currentValue, timeToLive);
} else if (vals[i] != currentValue) {
if(cd == null) cd = clients[i].getTranscoder().encode(currentValue);
if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + "; Zone " + clients[i].getZone()
+ " had a value of " + vals[i]
+ " so setting it to current value = " + currentValue + " for key : " + key);
clients[i].set(evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), cd, timeToLive);
}
}
}
if (event != null) endEvent(event);
if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + " returning value = " + currentValue + " for key : " + key);
return currentValue;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception decrementing the value for APP " + _appName + ", key : " + key, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return -1;
throw new EVCacheException("Exception decrementing value for APP " + _appName + ", key : " + key, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.DECR.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("DECR : APP " + _appName + ", Took " + duration + " milliSec for key : " + key + " with value as " + currentValue);
}
}
@Override
public <T> EVCacheLatch replace(String key, T value, Policy policy) throws EVCacheException {
return replace(key, value, (Transcoder<T>) _transcoder, policy);
}
@Override
public <T> EVCacheLatch replace(String key, T value, Transcoder<T> tc, Policy policy) throws EVCacheException {
return replace(key, value, (Transcoder<T>) _transcoder, _timeToLive, policy);
}
public <T> EVCacheLatch replace(String key, T value, int timeToLive, Policy policy) throws EVCacheException {
return replace(key, value, (Transcoder<T>)_transcoder, timeToLive, policy);
}
@Override
public <T> EVCacheLatch replace(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy)
throws EVCacheException {
if ((null == key) || (null == value)) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.REPLACE);
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.REPLACE);
if (throwExc) throw new EVCacheException("Could not find a client to set the data");
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.REPLACE);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.REPLACE);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheLatchImpl(policy, 0, _appName);
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.REPLACE);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, clients.length - _pool.getWriteOnlyEVCacheClients().length, _appName);
try {
final EVCacheFuture[] futures = new EVCacheFuture[clients.length];
CachedData cd = null;
int index = 0;
for (EVCacheClient client : clients) {
if (tc != null) {
cd = tc.encode(value);
} else if (_transcoder != null) {
cd = ((Transcoder<Object>) _transcoder).encode(value);
} else {
cd = client.getTranscoder().encode(value);
}
if (evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) {
final EVCacheValue val = new EVCacheValue(evcKey.getCanonicalKey(client.isDuetClient()), cd.getData(), cd.getFlags(), timeToLive, System.currentTimeMillis());
cd = evcacheValueTranscoder.encode(val);
}
final Future<Boolean> future = client.replace(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), cd, timeToLive, latch);
futures[index++] = new EVCacheFuture(future, key, _appName, client.getServerGroup());
}
if (event != null) {
event.setTTL(timeToLive);
event.setCachedData(cd);
if(_eventsUsingLatchFP.get()) {
latch.setEVCacheEvent(event);
latch.scheduledFutureValidation();
} else {
endEvent(event);
}
}
return latch;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception setting the data for APP " + _appName + ", key : " + evcKey, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName);
throw new EVCacheException("Exception setting data for APP " + _appName + ", key : " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.REPLACE.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("REPLACE : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey);
}
}
@Override
public String getCachePrefix() {
return _cacheName;
}
public String getAppName() {
return _appName;
}
public String getCacheName() {
return _cacheName;
}
public <T> EVCacheLatch appendOrAdd(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException {
if ((null == key) || (null == value)) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.APPEND_OR_ADD);
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.APPEND_OR_ADD);
if (throwExc) throw new EVCacheException("Could not find a client to appendOrAdd the data");
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.APPEND_OR_ADD);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.APPEND_OR_ADD);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.APPEND_OR_ADD);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, clients.length - _pool.getWriteOnlyEVCacheClients().length, _appName);
String status = EVCacheMetricsFactory.SUCCESS;
try {
CachedData cd = null;
for (EVCacheClient client : clients) {
// ensure key hashing is not enabled
if (evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) {
throw new IllegalArgumentException("appendOrAdd is not supported when key hashing is enabled.");
}
if (cd == null) {
if (tc != null) {
cd = tc.encode(value);
} else if ( _transcoder != null) {
cd = ((Transcoder<Object>)_transcoder).encode(value);
} else {
cd = client.getTranscoder().encode(value);
}
}
final Future<Boolean> future = client.appendOrAdd(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), cd, timeToLive, latch);
if (log.isDebugEnabled() && shouldLog()) log.debug("APPEND_OR_ADD : APP " + _appName + ", Future " + future + " for key : " + evcKey);
}
if (event != null) {
event.setTTL(timeToLive);
event.setCachedData(cd);
if(_eventsUsingLatchFP.get()) {
latch.setEVCacheEvent(event);
latch.scheduledFutureValidation();
} else {
endEvent(event);
}
}
return latch;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while appendOrAdd the data for APP " + _appName + ", key : " + evcKey, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName);
throw new EVCacheException("Exception while appendOrAdd data for APP " + _appName + ", key : " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.APPEND_OR_ADD.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("APPEND_OR_ADD : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey);
}
}
public <T> Future<Boolean>[] appendOrAdd(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException {
final EVCacheLatch latch = this.appendOrAdd(key, value, tc, timeToLive, Policy.ALL_MINUS_1);
if(latch != null) return latch.getAllFutures().toArray(new Future[latch.getAllFutures().size()]);
return new EVCacheFuture[0];
}
public <T> boolean add(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException {
final EVCacheLatch latch = add(key, value, tc, timeToLive, Policy.NONE);
try {
latch.await(_pool.getOperationTimeout().get(), TimeUnit.MILLISECONDS);
final List<Future<Boolean>> allFutures = latch.getAllFutures();
for(Future<Boolean> future : allFutures) {
if(!future.get()) return false;
}
return true;
} catch (InterruptedException e) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception adding the data for APP " + _appName + ", key : " + key, e);
final boolean throwExc = doThrowException();
if(throwExc) throw new EVCacheException("Exception add data for APP " + _appName + ", key : " + key, e);
return false;
} catch (ExecutionException e) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception adding the data for APP " + _appName + ", key : " + key, e);
final boolean throwExc = doThrowException();
if(throwExc) throw new EVCacheException("Exception add data for APP " + _appName + ", key : " + key, e);
return false;
}
}
@Override
public <T> EVCacheLatch add(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException {
EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
return this.add(key, value, tc, timeToLive, policy, clients, clients.length - _pool.getWriteOnlyEVCacheClients().length);
}
protected <T> EVCacheLatch add(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy, EVCacheClient[] clients, int latchCount) throws EVCacheException {
return add(key, value, tc, timeToLive, policy, clients, latchCount, true);
}
protected <T> EVCacheLatch add(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy, EVCacheClient[] clients, int latchCount, boolean fixup) throws EVCacheException {
if ((null == key) || (null == value)) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.ADD);
final boolean throwExc = doThrowException();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.ADD);
if (throwExc) throw new EVCacheException("Could not find a client to Add the data");
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.ADD);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.ADD);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.ADD);
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
EVCacheLatch latch = null;
try {
CachedData cd = null;
if (tc != null) {
cd = tc.encode(value);
} else if (_transcoder != null) {
cd = ((Transcoder<Object>) _transcoder).encode(value);
} else {
cd = _pool.getEVCacheClientForRead().getTranscoder().encode(value);
}
if (clientUtil == null) clientUtil = new EVCacheClientUtil(_appName, _pool.getOperationTimeout().get());
latch = clientUtil.add(evcKey, cd, evcacheValueTranscoder, timeToLive, policy, clients, latchCount, fixup);
if (event != null) {
event.setTTL(timeToLive);
event.setCachedData(cd);
if (_eventsUsingLatchFP.get()) {
latch.setEVCacheEvent(event);
if (latch instanceof EVCacheLatchImpl)
((EVCacheLatchImpl) latch).scheduledFutureValidation();
} else {
endEvent(event);
}
}
return latch;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception adding the data for APP " + _appName + ", key : " + evcKey, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName);
throw new EVCacheException("Exception adding data for APP " + _appName + ", key : " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.ADD.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("ADD : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey);
}
}
private DistributionSummary getTTLDistributionSummary(String operation, String type, String metric) {
DistributionSummary distributionSummary = distributionSummaryMap.get(operation);
if(distributionSummary != null) return distributionSummary;
final List<Tag> tagList = new ArrayList<Tag>(6);
tagList.addAll(tags);
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, type));
distributionSummary = EVCacheMetricsFactory.getInstance().getDistributionSummary(metric, tagList);
distributionSummaryMap.put(operation, distributionSummary);
return distributionSummary;
}
private Timer getTimer(String operation, String operationType, String hit, String status, int tries, long duration, ServerGroup serverGroup) {
String name = ((hit != null) ? operation + hit : operation);
if(status != null) name += status;
if(tries >= 0) name += tries;
if(serverGroup != null) name += serverGroup.getName();
//if(_cacheName != null) name += _cacheName;
Timer timer = timerMap.get(name);
if(timer != null) return timer;
final List<Tag> tagList = new ArrayList<Tag>(7);
tagList.addAll(tags);
if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType));
if(status != null) tagList.add(new BasicTag(EVCacheMetricsFactory.IPC_RESULT, status));
if(hit != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CACHE_HIT, hit));
switch(tries) {
case 0 :
case 1 :
tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, EVCacheMetricsFactory.INITIAL));
break;
case 2 :
tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, EVCacheMetricsFactory.SECOND));
break;
default:
tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, EVCacheMetricsFactory.THIRD_UP));
break;
}
// if(tries == 0) tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, String.valueOf(tries)));
if(serverGroup != null) {
tagList.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, serverGroup.getName()));
tagList.add(new BasicTag(EVCacheMetricsFactory.ZONE, serverGroup.getZone()));
}
timer = EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.OVERALL_CALL, tagList, Duration.ofMillis(duration));
timerMap.put(name, timer);
return timer;
}
protected List<Tag> getTags() {
return tags;
}
}
| 189,277
| 54.686378
| 315
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheSerializingTranscoder.java
|
/**
* Copyright (C) 2006-2009 Dustin Sallings
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
* IN THE SOFTWARE.
*/
package com.netflix.evcache;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.api.Timer;
import net.spy.memcached.CachedData;
import net.spy.memcached.transcoders.BaseSerializingTranscoder;
import net.spy.memcached.transcoders.Transcoder;
import net.spy.memcached.transcoders.TranscoderUtils;
import net.spy.memcached.util.StringUtils;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
/**
* Transcoder that serializes and compresses objects.
*/
public class EVCacheSerializingTranscoder extends BaseSerializingTranscoder implements
Transcoder<Object> {
// General flags
static final int SERIALIZED = 1;
static final int COMPRESSED = 2;
// Special flags for specially handled types.
private static final int SPECIAL_MASK = 0xff00;
static final int SPECIAL_BOOLEAN = (1 << 8);
static final int SPECIAL_INT = (2 << 8);
static final int SPECIAL_LONG = (3 << 8);
static final int SPECIAL_DATE = (4 << 8);
static final int SPECIAL_BYTE = (5 << 8);
static final int SPECIAL_FLOAT = (6 << 8);
static final int SPECIAL_DOUBLE = (7 << 8);
static final int SPECIAL_BYTEARRAY = (8 << 8);
static final String COMPRESSION = "COMPRESSION_METRIC";
private final TranscoderUtils tu = new TranscoderUtils(true);
private Timer timer;
/**
* Get a serializing transcoder with the default max data size.
*/
public EVCacheSerializingTranscoder() {
this(CachedData.MAX_SIZE);
}
/**
* Get a serializing transcoder that specifies the max data size.
*/
public EVCacheSerializingTranscoder(int max) {
super(max);
}
@Override
public boolean asyncDecode(CachedData d) {
if ((d.getFlags() & COMPRESSED) != 0 || (d.getFlags() & SERIALIZED) != 0) {
return true;
}
return super.asyncDecode(d);
}
/*
* (non-Javadoc)
*
* @see net.spy.memcached.Transcoder#decode(net.spy.memcached.CachedData)
*/
public Object decode(CachedData d) {
byte[] data = d.getData();
Object rv = null;
if ((d.getFlags() & COMPRESSED) != 0) {
data = decompress(d.getData());
}
int flags = d.getFlags() & SPECIAL_MASK;
if ((d.getFlags() & SERIALIZED) != 0 && data != null) {
rv = deserialize(data);
} else if (flags != 0 && data != null) {
switch (flags) {
case SPECIAL_BOOLEAN:
rv = Boolean.valueOf(tu.decodeBoolean(data));
break;
case SPECIAL_INT:
rv = Integer.valueOf(tu.decodeInt(data));
break;
case SPECIAL_LONG:
rv = Long.valueOf(tu.decodeLong(data));
break;
case SPECIAL_DATE:
rv = new Date(tu.decodeLong(data));
break;
case SPECIAL_BYTE:
rv = Byte.valueOf(tu.decodeByte(data));
break;
case SPECIAL_FLOAT:
rv = new Float(Float.intBitsToFloat(tu.decodeInt(data)));
break;
case SPECIAL_DOUBLE:
rv = new Double(Double.longBitsToDouble(tu.decodeLong(data)));
break;
case SPECIAL_BYTEARRAY:
rv = data;
break;
default:
getLogger().warn("Undecodeable with flags %x", flags);
}
} else {
rv = decodeString(data);
}
return rv;
}
/*
* (non-Javadoc)
*
* @see net.spy.memcached.Transcoder#encode(java.lang.Object)
*/
public CachedData encode(Object o) {
byte[] b = null;
int flags = 0;
if (o instanceof String) {
b = encodeString((String) o);
if (StringUtils.isJsonObject((String) o)) {
return new CachedData(flags, b, getMaxSize());
}
} else if (o instanceof Long) {
b = tu.encodeLong((Long) o);
flags |= SPECIAL_LONG;
} else if (o instanceof Integer) {
b = tu.encodeInt((Integer) o);
flags |= SPECIAL_INT;
} else if (o instanceof Boolean) {
b = tu.encodeBoolean((Boolean) o);
flags |= SPECIAL_BOOLEAN;
} else if (o instanceof Date) {
b = tu.encodeLong(((Date) o).getTime());
flags |= SPECIAL_DATE;
} else if (o instanceof Byte) {
b = tu.encodeByte((Byte) o);
flags |= SPECIAL_BYTE;
} else if (o instanceof Float) {
b = tu.encodeInt(Float.floatToRawIntBits((Float) o));
flags |= SPECIAL_FLOAT;
} else if (o instanceof Double) {
b = tu.encodeLong(Double.doubleToRawLongBits((Double) o));
flags |= SPECIAL_DOUBLE;
} else if (o instanceof byte[]) {
b = (byte[]) o;
flags |= SPECIAL_BYTEARRAY;
} else {
b = serialize(o);
flags |= SERIALIZED;
}
assert b != null;
if (b.length > compressionThreshold) {
byte[] compressed = compress(b);
if (compressed.length < b.length) {
getLogger().debug("Compressed %s from %d to %d",
o.getClass().getName(), b.length, compressed.length);
b = compressed;
flags |= COMPRESSED;
} else {
getLogger().info("Compression increased the size of %s from %d to %d",
o.getClass().getName(), b.length, compressed.length);
}
long compression_ratio = Math.round((double) compressed.length / b.length * 100);
updateTimerWithCompressionRatio(compression_ratio);
}
return new CachedData(flags, b, getMaxSize());
}
private void updateTimerWithCompressionRatio(long ratio_percentage) {
if(timer == null) {
final List<Tag> tagList = new ArrayList<Tag>(1);
tagList.add(new BasicTag(EVCacheMetricsFactory.COMPRESSION_TYPE, "gzip"));
timer = EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.COMPRESSION_RATIO, tagList, Duration.ofMillis(100));
};
timer.record(ratio_percentage, TimeUnit.MILLISECONDS);
}
}
| 7,865
| 35.929577
| 149
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheGetOperationListener.java
|
package com.netflix.evcache;
import com.netflix.evcache.operation.EVCacheOperationFuture;
import net.spy.memcached.internal.GenericCompletionListener;
public interface EVCacheGetOperationListener<T> extends GenericCompletionListener<EVCacheOperationFuture<T>> {
}
| 268
| 25.9
| 110
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheException.java
|
package com.netflix.evcache;
public class EVCacheException extends Exception {
private static final long serialVersionUID = -3885811159646046383L;
public EVCacheException(String message) {
super(message);
}
public EVCacheException(String message, Throwable cause) {
super(message, cause);
}
}
| 332
| 22.785714
| 71
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheLatch.java
|
package com.netflix.evcache;
import java.util.List;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import com.netflix.evcache.event.EVCacheEvent;
import net.spy.memcached.internal.OperationCompletionListener;
/**
* EVCacheLatch is a blocking mechanism that allows one or more threads to wait until
* a set of operations as specified by {@Link Policy} performed by evcache threads are complete.
*
* <p>The Latch is initialized with a <em>count</em> as determined by the Policy.
* The {@link #await await} methods block until the current count reaches
* zero due to completion of the operation, after which
* all waiting threads are released and any subsequent invocations of
* {@link #await await} return immediately.
*
* The latch is also released if the specified timeout is reached even though the count is greater than zero.
* In this case the {@link #await await} method returns false
*
* The various methods in latch can be queried any time and they return the state of the operations across the Futures.
*/
public interface EVCacheLatch extends OperationCompletionListener {
/**
* The Policy which can be used to control the latch behavior. The latch is released when the number operations as specified by the Policy are completed.
* For example: If your evcache app has 3 copies (3 server groups) in a region then each write done on that app will perform 3 operations (one for each copy/server group).
* If you are doing a set operation and the selected Policy is ALL_MINUS_1 then we need to complete 2 operations(set on 2 copies/server groups) need to be finished before we release the latch.
*
* Note that an Operation completed means that the operation was accepted by evcache or rejected by evcache.
* If it is still in flight the that operation is in pending state.
*
* Case ALL : All the operations have to be completed.
* Case All_MINUS_1 : All but one needs to be completed. For ex: If there are 3 copies for a cache then 2 need to be completed.
* Case QUORUM: Quorum number of operations have to be completed before we release the latch: for a cluster with 3 this means 2 operations need to be completed.
* Case ONE: At least one operations needs to be completed before we release the latch.
* Case NONE: The latch is released immediately.
*
* @author smadappa
*
*/
public static enum Policy {
NONE, ONE, QUORUM, ALL_MINUS_1, ALL
}
/**
* Causes the current thread to wait until the latch has counted down to
* zero, unless the thread is interrupted, or the specified waiting time
* elapses.
*
* @param timeout
* - the maximum time to wait
* @param unit
* - the time unit of the timeout argument
*
* @return - {@code true} if the count reached zero and false if the waiting
* time elapsed before the count reached zero
* @throws InterruptedException
* if the current thread is interrupted while waiting
*/
boolean await(long timeout, TimeUnit unit) throws InterruptedException;
/**
* Returns {@code true} if this all the tasks assigned for this Latch
* completed.
*
* Completion may be due to normal termination, an exception, or
* cancellation -- in all of these cases, this method will return
* {@code true}.
*
* @return {@code true} if all the tasks completed
*/
boolean isDone();
/**
* Returns the Futures backing the Pending tasks.
*
* @return the current outstanding tasks
*/
List<Future<Boolean>> getPendingFutures();
/**
* Returns all the Tasks.
*
* @return the tasks submitted part of this Latch
*/
List<Future<Boolean>> getAllFutures();
/**
* Returns all the completed Tasks.
*
* @return the current completed tasks
*/
List<Future<Boolean>> getCompletedFutures();
/**
* Returns the number of Futures that are still Pending.
*
* @return the current outstanding Future task count
*/
int getPendingFutureCount();
/**
* Returns the number of Future Tasks that are completed.
*
* @return the current completed future task count
*/
int getCompletedFutureCount();
/**
* Returns the number of Tasks that are still Pending.
*
* @return the current outstanding task count
*/
int getPendingCount();
/**
* Returns the number of Tasks that are completed. A task is completed if
* the task was finished either success of failure. The task is considered
* failure if it times out or there was an exception.
*
* @return the completed task count
*/
int getCompletedCount();
/**
* Returns the number of Tasks that failed to complete. There was either an
* exception or the task was cancelled.
*
* @return the failed task count
*/
int getFailureCount();
/**
* Returns the number of Tasks that need to be successfully completed based
* on the Specified Policy before the latch can be released.
*
* @return the expected success count
* @deprecated replaced by {@link #getExpectedCompleteCount()}
*/
int getExpectedSuccessCount();
/**
* Returns the number of Tasks that need to be successfully completed based
* on the Specified Policy before the latch can be released.
*
* @return the expected success count
*/
int getExpectedCompleteCount();
/**
* Returns the current number of Tasks that are successful .
*
* @return the current Successful Task count.
*/
int getSuccessCount();
/**
* The {@code Policy} for this Latch
*
* @return the Latch.
*/
Policy getPolicy();
/**
* Returns {@code true} if the operation is a Fast failure i.e. the operation was not even performed.
*
* @return {@code true} upon fast failure else false.
*/
boolean isFastFailure();
/**
* The event associated with this Latch
*
* @return the EVCacheEvent associated with this latch or null if there is none.
*/
void setEVCacheEvent(EVCacheEvent event);
}
| 6,305
| 32.903226
| 197
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheInternalImpl.java
|
package com.netflix.evcache;
import com.netflix.archaius.api.PropertyRepository;
import com.netflix.evcache.operation.EVCacheItem;
import com.netflix.evcache.operation.EVCacheItemMetaData;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import com.netflix.evcache.pool.EVCacheValue;
import com.netflix.evcache.pool.ServerGroup;
import net.spy.memcached.CachedData;
import net.spy.memcached.MemcachedNode;
import net.spy.memcached.transcoders.Transcoder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.util.*;
import java.util.concurrent.Future;
import java.util.stream.Collectors;
/**
* This class is for internal-use only by EVCache components, and is not recommended to be used for any other purpose. EVCache and EVCacheImpl are recommended instead.
*/
class EVCacheInternalImpl extends EVCacheImpl implements EVCacheInternal {
private static final Logger log = LoggerFactory.getLogger(EVCacheInternalImpl.class);
public EVCacheItem<CachedData> metaGet(String key, Transcoder<CachedData> tc, boolean isOriginalKeyHashed) throws EVCacheException {
return this.metaGetInternal(key, tc, isOriginalKeyHashed);
}
public Map<MemcachedNode, CachedValues> metaGetPerClient(String key, Transcoder<CachedData> tc, boolean isOriginalKeyHashed) throws EVCacheException {
Map<MemcachedNode, CachedValues> map = new HashMap<>();
final Map<ServerGroup, List<EVCacheClient>> instancesByZone = _pool.getAllInstancesByZone();
final Map<ServerGroup, EVCacheClient> instancesWithNull = new HashMap<ServerGroup, EVCacheClient>();
final EVCacheKey evcKey = getEVCacheKey(key);
for (ServerGroup sGroup : instancesByZone.keySet()) {
try {
for (EVCacheClient client : instancesByZone.get(sGroup)) {
EVCacheItem<CachedData> item = getEVCacheItem(client, evcKey, tc, true, false, isOriginalKeyHashed, false);
if (log.isDebugEnabled()) log.debug("client : " + client + "; item : " + item);
if(item == null) {
instancesWithNull.put(sGroup, client);
} else {
map.put(client.getNodeLocator().getPrimary(key), null == item ? null : new CachedValues(key, item.getData(), item.getItemMetaData()));
}
}
} catch (Exception e) {
log.error("Error getting meta data", e);
}
}
if (log.isDebugEnabled()) log.debug("map : " + map);
if (log.isDebugEnabled()) log.debug("instancesWithNull : " + instancesWithNull);
if(instancesWithNull.size() > 0 && map.size() > 0) {
final EVCacheTranscoder transcoder = new EVCacheTranscoder();
String originalKey = null;
for(CachedValues vals : map.values()) {
if (log.isDebugEnabled()) log.debug("vals : " + vals);
try {
Object obj = transcoder.decode(vals.getData());
if (log.isDebugEnabled()) log.debug("Obj : " + obj);
if(obj instanceof EVCacheValue) {
originalKey = ((EVCacheValue)obj).getKey();
if (log.isDebugEnabled()) log.debug("original key: " + originalKey);
break;
}
} catch(Exception e) {
log.error("Exception decoding", e);
}
}
if(originalKey != null) {
for(ServerGroup sGroup : instancesWithNull.keySet()) {
if (log.isDebugEnabled()) log.debug("sGroup : " + sGroup);
final EVCacheClient client = instancesWithNull.get(sGroup);
if (log.isDebugEnabled()) log.debug("Client : " + client);
EVCacheItem<CachedData> item;
try {
item = getEVCacheItem(client, getEVCacheKey(originalKey), tc, true, false, false, false);
if (log.isDebugEnabled()) log.debug("item : " + item);
map.put(client.getNodeLocator().getPrimary(originalKey), null == item ? null : new CachedValues(key, item.getData(), item.getItemMetaData()));
} catch (Exception e) {
log.error("Exception getting meta data using original key - " + originalKey, e);
}
}
}
} else if(map.size() == 0) {
for (ServerGroup sGroup : instancesByZone.keySet()) {
try {
for (EVCacheClient client : instancesByZone.get(sGroup)) {
map.put(client.getNodeLocator().getPrimary(key), null);
}
} catch (Exception e) {
log.error("Error getting meta data", e);
}
}
}
if (log.isDebugEnabled()) log.debug("return map : " + map);
return map;
}
public EVCacheItemMetaData metaDebug(String key, boolean isOriginalKeyHashed) throws EVCacheException {
return this.metaDebugInternal(key, isOriginalKeyHashed);
}
public Map<MemcachedNode, EVCacheItemMetaData> metaDebugPerClient(String key, boolean isOriginalKeyHashed) throws EVCacheException {
Map<MemcachedNode, EVCacheItemMetaData> map = new HashMap<>();
final Map<ServerGroup, List<EVCacheClient>> instancesByZone = _pool.getAllInstancesByZone();
final EVCacheKey evcKey = getEVCacheKey(key);
for (ServerGroup sGroup : instancesByZone.keySet()) {
try {
for (EVCacheClient client : instancesByZone.get(sGroup)) {
EVCacheItemMetaData itemMetaData = getEVCacheItemMetaData(client, evcKey, true, false, isOriginalKeyHashed);
map.put(client.getNodeLocator().getPrimary(key), itemMetaData);
}
} catch (Exception e) {
log.error("Error getting meta data", e);
}
}
return map;
}
public Future<Boolean>[] delete(String key, boolean isOriginalKeyHashed) throws EVCacheException {
return this.deleteInternal(key, isOriginalKeyHashed);
}
public EVCacheInternalImpl(String appName, String cacheName, int timeToLive, Transcoder<?> transcoder, boolean enableZoneFallback,
boolean throwException, EVCacheClientPoolManager poolManager) {
super(appName, cacheName, timeToLive, transcoder, enableZoneFallback, throwException, poolManager);
}
public EVCacheLatch addOrSetToWriteOnly(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy) throws EVCacheException {
EVCacheClient[] clients = _pool.getWriteOnlyEVCacheClients();
if (replaceItem)
return set(key, value, null, timeToLive, policy, clients, 0);
else
return add(key, value, null, timeToLive, policy, clients, 0, false);
}
public EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, List<String> serverGroups) throws EVCacheException {
return addOrSet(replaceItem, key, value, timeToLive, policy, serverGroups, null);
}
public EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, String serverGroupName) throws EVCacheException {
return addOrSet(replaceItem, key, value, timeToLive, policy, serverGroupName, null);
}
public EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, String serverGroupName, List<String> destinationIps) throws EVCacheException {
List<String> serverGroups = new ArrayList<>();
serverGroups.add(serverGroupName);
return addOrSet(replaceItem, key, value, timeToLive, policy, serverGroups, destinationIps);
}
private EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, List<String> serverGroups, List<String> destinationIps) throws EVCacheException {
Map<ServerGroup, List<EVCacheClient>> clientsByServerGroup = _pool.getAllInstancesByZone();
List<EVCacheClient> evCacheClients = clientsByServerGroup.entrySet().stream()
.filter(entry -> serverGroups.contains(entry.getKey().getName()))
.map(Map.Entry::getValue)
.flatMap(List::stream)
.collect(Collectors.toList());
if (null != destinationIps && !destinationIps.isEmpty()) {
// identify that evcache client whose primary node is the destination ip for the key being processed
evCacheClients = evCacheClients.stream().filter(client ->
destinationIps.contains(((InetSocketAddress) client.getNodeLocator()
.getPrimary(getEVCacheKey(key).getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()))
.getSocketAddress()).getAddress().getHostAddress())
).collect(Collectors.toList());
}
EVCacheClient[] evCacheClientsArray = new EVCacheClient[evCacheClients.size()];
evCacheClients.toArray(evCacheClientsArray);
if (replaceItem) {
return this.set(key, value, null, timeToLive, policy, evCacheClientsArray, evCacheClientsArray.length);
}
else {
// given that we do not want to replace items, we should explicitly set fixup to false, otherwise "add" can
// result in "set" during fixup which can result in replacing items
return this.add(key, value, null, timeToLive, policy, evCacheClientsArray, evCacheClientsArray.length, false);
}
}
public KeyHashedState isKeyHashed(String appName, String serverGroup) {
PropertyRepository propertyRepository = _poolManager.getEVCacheConfig().getPropertyRepository();
boolean isKeyHashedAtAppOrAsg = propertyRepository.get(serverGroup + ".hash.key", Boolean.class).orElseGet(appName + ".hash.key").orElse(false).get();
if (isKeyHashedAtAppOrAsg) {
return KeyHashedState.YES;
}
if (propertyRepository.get(appName + ".auto.hash.keys", Boolean.class).orElseGet("evcache.auto.hash.keys").orElse(false).get()) {
return KeyHashedState.MAYBE;
}
return KeyHashedState.NO;
}
}
| 10,703
| 51.990099
| 232
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheReadQueueException.java
|
package com.netflix.evcache;
public class EVCacheReadQueueException extends EVCacheException {
private static final long serialVersionUID = -7660503904923117538L;
public EVCacheReadQueueException(String message) {
super(message);
}
public EVCacheReadQueueException(String message, Throwable cause) {
super(message, cause);
}
}
| 366
| 25.214286
| 71
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheInMemoryCache.java
|
package com.netflix.evcache;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import com.netflix.archaius.api.Property;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.CacheStats;
import com.google.common.cache.LoadingCache;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListenableFutureTask;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Tag;
import net.spy.memcached.transcoders.Transcoder;
/**
* An In Memory cache that can be used to hold data for short duration. This is
* helpful when the same key is repeatedly requested from EVCache within a short
* duration. This can be turned on dynamically and can relive pressure on
* EVCache Server instances.
*/
public class EVCacheInMemoryCache<T> {
private static final Logger log = LoggerFactory.getLogger(EVCacheInMemoryCache.class);
private final Property<Integer> _cacheDuration; // The key will be cached for this long
private final Property<Integer> _refreshDuration, _exireAfterAccessDuration;
private final Property<Integer> _cacheSize; // This many items will be cached
private final Property<Integer> _poolSize; // This many threads will be initialized to fetch data from evcache async
private final String appName;
private final Map<String, Counter> counterMap = new ConcurrentHashMap<String, Counter>();
private final Map<String, Gauge> gaugeMap = new ConcurrentHashMap<String, Gauge>();
private LoadingCache<EVCacheKey, Optional<T>> cache;
private ExecutorService pool = null;
private final Transcoder<T> tc;
private final EVCacheImpl impl;
private final Id sizeId;
public EVCacheInMemoryCache(String appName, Transcoder<T> tc, EVCacheImpl impl) {
this.appName = appName;
this.tc = tc;
this.impl = impl;
this._cacheDuration = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".inmemory.expire.after.write.duration.ms", Integer.class).orElseGet(appName + ".inmemory.cache.duration.ms").orElse(0);
this._cacheDuration.subscribe((i) -> setupCache());
this._exireAfterAccessDuration = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".inmemory.expire.after.access.duration.ms", Integer.class).orElse(0);
this._exireAfterAccessDuration.subscribe((i) -> setupCache());;
this._refreshDuration = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".inmemory.refresh.after.write.duration.ms", Integer.class).orElse(0);
this._refreshDuration.subscribe((i) -> setupCache());
this._cacheSize = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".inmemory.cache.size", Integer.class).orElse(100);
this._cacheSize.subscribe((i) -> setupCache());
this._poolSize = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".thread.pool.size", Integer.class).orElse(5);
this._poolSize.subscribe((i) -> initRefreshPool());
final List<Tag> tags = new ArrayList<Tag>(3);
tags.addAll(impl.getTags());
tags.add(new BasicTag(EVCacheMetricsFactory.METRIC, "size"));
this.sizeId = EVCacheMetricsFactory.getInstance().getId(EVCacheMetricsFactory.IN_MEMORY, tags);
setupCache();
setupMonitoring(appName);
}
private WriteLock writeLock = new ReentrantReadWriteLock().writeLock();
private void initRefreshPool() {
final ExecutorService oldPool = pool;
writeLock.lock();
try {
final ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat(
"EVCacheInMemoryCache-%d").build();
pool = Executors.newFixedThreadPool(_poolSize.get(), factory);
if(oldPool != null) oldPool.shutdown();
} finally {
writeLock.unlock();
}
}
private void setupCache() {
try {
CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder().recordStats();
if(_cacheSize.get() > 0) {
builder = builder.maximumSize(_cacheSize.get());
}
if(_exireAfterAccessDuration.get() > 0) {
builder = builder.expireAfterAccess(_exireAfterAccessDuration.get(), TimeUnit.MILLISECONDS);
} else if(_cacheDuration.get().intValue() > 0) {
builder = builder.expireAfterWrite(_cacheDuration.get(), TimeUnit.MILLISECONDS);
}
if(_refreshDuration.get() > 0) {
builder = builder.refreshAfterWrite(_refreshDuration.get(), TimeUnit.MILLISECONDS);
}
initRefreshPool();
final LoadingCache<EVCacheKey, Optional<T>> newCache = builder.build(
new CacheLoader<EVCacheKey, Optional<T>>() {
public Optional<T> load(EVCacheKey key) throws EVCacheException, DataNotFoundException {
try {
return Optional.fromNullable(impl.doGet(key, tc));
} catch (EVCacheException e) {
log.error("EVCacheException while loading key -> "+ key, e);
throw e;
} catch (Exception e) {
log.error("EVCacheException while loading key -> "+ key, e);
throw new EVCacheException("key : " + key + " could not be loaded", e);
}
}
@Override
public ListenableFuture<Optional<T>> reload(EVCacheKey key, Optional<T> oldValue) {
ListenableFutureTask<Optional<T>> task = ListenableFutureTask.create(new Callable<Optional<T>>() {
public Optional<T> call() {
try {
final Optional<T> t = load(key);
if(t == null) {
EVCacheMetricsFactory.getInstance().increment("EVCacheInMemoryCache" + "-" + appName + "-Reload-NotFound");
return oldValue;
} else {
EVCacheMetricsFactory.getInstance().increment("EVCacheInMemoryCache" + "-" + appName + "-Reload-Success");
}
return t;
} catch (EVCacheException e) {
log.error("EVCacheException while reloading key -> "+ key, e);
EVCacheMetricsFactory.getInstance().increment("EVCacheInMemoryCache" + "-" + appName + "-Reload-Fail");
return oldValue;
}
}
});
pool.execute(task);
return task;
}
});
if(cache != null) newCache.putAll(cache.asMap());
final Cache<EVCacheKey, Optional<T>> currentCache = this.cache;
this.cache = newCache;
if(currentCache != null) {
currentCache.invalidateAll();
currentCache.cleanUp();
}
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
private CacheStats previousStats = null;
private long getSize() {
final long size = cache.size();
final CacheStats stats = cache.stats();
if(previousStats != null) {
try {
getCounter("hits").increment(stats.hitCount() - previousStats.hitCount());
getCounter("miss").increment(stats.missCount() - previousStats.missCount());
getCounter("evictions").increment(stats.evictionCount() - previousStats.evictionCount());
getCounter("requests").increment(stats.requestCount() - previousStats.requestCount());
getCounter("loadExceptionCount").increment(stats.loadExceptionCount() - previousStats.loadExceptionCount());
getCounter("loadCount").increment(stats.loadCount() - previousStats.loadCount());
getCounter("loadSuccessCount").increment(stats.loadSuccessCount() - previousStats.loadSuccessCount());
getCounter("totalLoadTime-ms").increment(( stats.totalLoadTime() - previousStats.totalLoadTime())/1000000);
getGauge("hitrate").set(stats.hitRate());
getGauge("loadExceptionRate").set(stats.loadExceptionRate());
getGauge("averageLoadTime-ms").set(stats.averageLoadPenalty()/1000000);
} catch(Exception e) {
log.error("Error while reporting stats", e);
}
}
previousStats = stats;
return size;
}
@SuppressWarnings("deprecation")
private void setupMonitoring(final String appName) {
EVCacheMetricsFactory.getInstance().getRegistry().gauge(sizeId, this, EVCacheInMemoryCache::getSize);
}
private Counter getCounter(String name) {
Counter counter = counterMap.get(name);
if(counter != null) return counter;
final List<Tag> tags = new ArrayList<Tag>(3);
tags.addAll(impl.getTags());
tags.add(new BasicTag(EVCacheMetricsFactory.METRIC, name));
counter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.IN_MEMORY, tags);
counterMap.put(name, counter);
return counter;
}
private Gauge getGauge(String name) {
Gauge gauge = gaugeMap.get(name);
if(gauge != null) return gauge;
final List<Tag> tags = new ArrayList<Tag>(3);
tags.addAll(impl.getTags());
tags.add(new BasicTag(EVCacheMetricsFactory.METRIC, name));
final Id id = EVCacheMetricsFactory.getInstance().getId(EVCacheMetricsFactory.IN_MEMORY, tags);
gauge = EVCacheMetricsFactory.getInstance().getRegistry().gauge(id);
gaugeMap.put(name, gauge);
return gauge;
}
public T get(EVCacheKey key) throws ExecutionException {
if (cache == null) return null;
final Optional<T> val = cache.get(key);
if(!val.isPresent()) return null;
if (log.isDebugEnabled()) log.debug("GET : appName : " + appName + "; Key : " + key + "; val : " + val);
return val.get();
}
public void put(EVCacheKey key, T value) {
if (cache == null) return;
cache.put(key, Optional.fromNullable(value));
if (log.isDebugEnabled()) log.debug("PUT : appName : " + appName + "; Key : " + key + "; val : " + value);
}
public void delete(String key) {
if (cache == null) return;
cache.invalidate(key);
if (log.isDebugEnabled()) log.debug("DEL : appName : " + appName + "; Key : " + key);
}
public Map<EVCacheKey, Optional<T>> getAll() {
if (cache == null) return Collections.<EVCacheKey, Optional<T>>emptyMap();
return cache.asMap();
}
public static final class DataNotFoundException extends EVCacheException {
private static final long serialVersionUID = 1800185311509130263L;
public DataNotFoundException(String message) {
super(message);
}
}
}
| 12,483
| 46.287879
| 216
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheConnectException.java
|
package com.netflix.evcache;
import java.io.IOException;
public class EVCacheConnectException extends IOException {
private static final long serialVersionUID = 8065483548278456469L;
public EVCacheConnectException(String message) {
super(message);
}
public EVCacheConnectException(String message, Throwable cause) {
super(message, cause);
}
}
| 383
| 23
| 70
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/EVCacheMissException.java
|
package com.netflix.evcache;
public class EVCacheMissException extends EVCacheException {
private static final long serialVersionUID = 222337840463312890L;
public EVCacheMissException(String message) {
super(message);
}
public EVCacheMissException(String message, Throwable cause) {
super(message, cause);
}
}
| 351
| 21
| 69
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/config/EVCachePersistedProperties.java
|
package com.netflix.evcache.config;
import java.net.URL;
import java.net.URLEncoder;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.archaius.config.PollingDynamicConfig;
import com.netflix.archaius.config.polling.FixedPollingStrategy;
import com.netflix.archaius.persisted2.DefaultPersisted2ClientConfig;
import com.netflix.archaius.persisted2.JsonPersistedV2Reader;
import com.netflix.archaius.persisted2.Persisted2ClientConfig;
import com.netflix.archaius.persisted2.ScopePredicates;
import com.netflix.archaius.persisted2.loader.HTTPStreamLoader;
public class EVCachePersistedProperties {
private static Logger log = LoggerFactory.getLogger(EVCachePersistedProperties.class);
private static final String SCOPE_CLUSTER = "cluster";
private static final String SCOPE_AMI = "ami";
private static final String SCOPE_ZONE = "zone";
private static final String SCOPE_ASG = "asg";
private static final String SCOPE_SERVER_ID = "serverId";
private static final String SCOPE_REGION = "region";
private static final String SCOPE_STACK = "stack";
private static final String SCOPE_ENV = "env";
private static final String SCOPE_APP_ID = "appId";
private PollingDynamicConfig config;
public EVCachePersistedProperties() {
}
private Persisted2ClientConfig getConfig() {
final String region = System.getProperty("netflix.region", getSystemEnvValue("NETFLIX_REGION", "us-east-1"));
final String env = System.getProperty("netflix.environment", getSystemEnvValue("NETFLIX_ENVIRONMENT", "test"));
String url = System.getProperty("platformserviceurl", "http://platformservice."+region+".dyn" + env +".netflix.net:7001/platformservice/REST/v2/properties/jsonFilterprops");
return new DefaultPersisted2ClientConfig()
.setEnabled(true)
.withServiceUrl(url)
.withQueryScope(SCOPE_APP_ID, System.getProperty("netflix.appId", getSystemEnvValue("NETFLIX_APP", "")), "")
.withQueryScope(SCOPE_ENV, env, "")
.withQueryScope(SCOPE_STACK, System.getProperty("netflix.stack", getSystemEnvValue("NETFLIX_STACK", "")), "")
.withQueryScope(SCOPE_REGION, region, "")
.withScope(SCOPE_APP_ID, System.getProperty("netflix.appId", getSystemEnvValue("NETFLIX_APP", "")))
.withScope(SCOPE_ENV, env)
.withScope(SCOPE_STACK, System.getProperty("netflix.stack", getSystemEnvValue("NETFLIX_STACK", "")))
.withScope(SCOPE_REGION, region)
.withScope(SCOPE_SERVER_ID, System.getProperty("netflix.serverId", getSystemEnvValue("NETFLIX_INSTANCE_ID", "")))
.withScope(SCOPE_ASG, System.getProperty("netflix.appinfo.asgName", getSystemEnvValue("NETFLIX_AUTO_SCALE_GROUP", "")))
.withScope(SCOPE_ZONE, getSystemEnvValue("EC2_AVAILABILITY_ZONE", ""))
.withScope(SCOPE_AMI, getSystemEnvValue("EC2_AMI_ID", ""))
.withScope(SCOPE_CLUSTER, getSystemEnvValue("NETFLIX_CLUSTER", ""))
.withPrioritizedScopes(SCOPE_SERVER_ID, SCOPE_ASG, SCOPE_AMI, SCOPE_CLUSTER, SCOPE_APP_ID, SCOPE_ENV, SCOPE_STACK, SCOPE_ZONE, SCOPE_REGION)
;
}
private String getSystemEnvValue(String key, String def) {
final String val = System.getenv(key);
return val == null ? def : val;
}
private String getFilterString(Map<String, Set<String>> scopes) {
StringBuilder sb = new StringBuilder();
for (Entry<String, Set<String>> scope : scopes.entrySet()) {
if (scope.getValue().isEmpty())
continue;
if (sb.length() > 0) {
sb.append(" and ");
}
sb.append("(");
boolean first = true;
for (String value : scope.getValue()) {
if (!first) {
sb.append(" or ");
}
else {
first = false;
}
sb.append(scope.getKey());
if (null == value) {
sb.append(" is null");
}
else if (value.isEmpty()) {
sb.append("=''");
}
else {
sb.append("='").append(value).append("'");
}
}
sb.append(")");
}
return sb.toString();
}
public PollingDynamicConfig getPollingDynamicConfig() {
try {
Persisted2ClientConfig clientConfig = getConfig();
log.info("Remote config : " + clientConfig);
String url = new StringBuilder()
.append(clientConfig.getServiceUrl())
.append("?skipPropsWithExtraScopes=").append(clientConfig.getSkipPropsWithExtraScopes())
.append("&filter=").append(URLEncoder.encode(getFilterString(clientConfig.getQueryScopes()), "UTF-8"))
.toString();
if (clientConfig.isEnabled()) {
JsonPersistedV2Reader reader = JsonPersistedV2Reader.builder(new HTTPStreamLoader(new URL(url)))
.withPath("propertiesList")
.withScopes(clientConfig.getPrioritizedScopes())
.withPredicate(ScopePredicates.fromMap(clientConfig.getScopes()))
.build();
config = new PollingDynamicConfig(reader, new FixedPollingStrategy(clientConfig.getRefreshRate(), TimeUnit.SECONDS));
return config;
}
} catch (Exception e1) {
throw new RuntimeException(e1);
}
return null;
}
}
| 6,075
| 44.343284
| 181
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/util/ServerGroupCircularIterator.java
|
package com.netflix.evcache.util;
import java.util.Iterator;
import java.util.Set;
import com.netflix.evcache.pool.ServerGroup;
/**
* A circular iterator for ReplicaSets. This ensures that all ReplicaSets are
* equal number of requests.
*
* @author smadappa
*/
public class ServerGroupCircularIterator {
private Entry<ServerGroup> entry;
private int size = 0;
/**
* Creates an instance of ReplicaSetCircularIterator across all ReplicaSets.
*
* @param allReplicaSets
* Set of all available ReplicaSets.
*/
public ServerGroupCircularIterator(Set<ServerGroup> allReplicaSets) {
if (allReplicaSets == null || allReplicaSets.isEmpty()) return;
Entry<ServerGroup> pEntry = null;
for (Iterator<ServerGroup> itr = allReplicaSets.iterator(); itr.hasNext();) {
size++;
final ServerGroup rSet = itr.next();
final Entry<ServerGroup> newEntry = new Entry<ServerGroup>(rSet, pEntry);
if (entry == null) entry = newEntry;
pEntry = newEntry;
}
/*
* Connect the first and the last entry to form a circular list
*/
if (pEntry != null) {
entry.next = pEntry;
}
}
/**
* Returns the next ReplicaSet which should get the request.
*
* @return - the next ReplicaSetCircularIterator in the iterator. If there
* are none then null is returned.
*/
public ServerGroup next() {
if (entry == null) return null;
entry = entry.next;
return entry.element;
}
/**
* Returns the next ReplicaSet excluding the given ReplicaSet which should
* get the request.
*
* @return - the next ReplicaSet in the iterator. If there are none then
* null is returned.
*/
public ServerGroup next(ServerGroup ignoreReplicaSet) {
if (entry == null) return null;
entry = entry.next;
if (entry.element.equals(ignoreReplicaSet)) {
return entry.next.element;
} else {
return entry.element;
}
}
public int getSize() {
return size;
}
/**
* The Entry keeps track of the current element and next element in the
* list.
*
* @author smadappa
*
* @param <E>
*/
static class Entry<E> {
private E element;
private Entry<E> next;
/**
* Creates an instance of Entry.
*/
Entry(E element, Entry<E> next) {
this.element = element;
this.next = next;
}
}
public String toString() {
final StringBuilder current = new StringBuilder();
if (entry != null) {
Entry<ServerGroup> startEntry = entry;
current.append(startEntry.element);
while (!entry.next.equals(startEntry)) {
current.append(",").append(entry.next.element);
entry = entry.next;
}
}
return "Server Group Iterator : { size=" + getSize() + "; Server Group=" + current.toString() + "}";
}
}
| 3,144
| 27.333333
| 108
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/util/KeyHasher.java
|
package com.netflix.evcache.util;
import java.util.Arrays;
import java.util.Base64;
import java.util.Base64.Encoder;
import org.apache.log4j.BasicConfigurator;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Level;
import org.apache.log4j.PatternLayout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.fzakaria.ascii85.Ascii85;
import com.google.common.base.Charsets;
import com.google.common.hash.HashCode;
import com.google.common.hash.HashFunction;
import com.google.common.hash.Hashing;
import com.netflix.archaius.api.Property;
public class KeyHasher {
/**
* meta data size
* 40 + key + 'item_hdr' size
i.e.
40 + keysize + 12
And if client flags are present:
40 + keysize + 4 bytes(for flags) + 12
And if CAS and client flags are present:
40 + keysize + 4 bytes(for flags) + 8(for CAS) + 12
*/
public enum HashingAlgorithm {
murmur3,
adler32,
crc32,
sha1,
sha256,
siphash24,
md5,
NO_HASHING // useful for disabling hashing at client level, while Hashing is enabled at App level
}
public static HashingAlgorithm getHashingAlgorithmFromString(String algorithmStr) {
try {
if (null == algorithmStr || algorithmStr.isEmpty()) {
return null;
}
return HashingAlgorithm.valueOf(algorithmStr.toLowerCase());
} catch (IllegalArgumentException ex) {
// default to md5 incase of unsupported algorithm
return HashingAlgorithm.md5;
}
}
private static final Logger log = LoggerFactory.getLogger(KeyHasher.class);
private static final Encoder encoder= Base64.getEncoder().withoutPadding();
public static String getHashedKeyEncoded(String key, HashingAlgorithm hashingAlgorithm, Integer maxDigestBytes, Integer maxHashLength) {
return getHashedKeyEncoded(key, hashingAlgorithm, maxDigestBytes, maxHashLength, null);
}
public static String getHashedKeyEncoded(String key, HashingAlgorithm hashingAlgorithm, Integer maxDigestBytes, Integer maxHashLength, String baseEncoder) {
final long start = System.nanoTime();
byte[] digest = getHashedKey(key, hashingAlgorithm, maxDigestBytes);
if(log.isDebugEnabled()) {
final char[] HEX_ARRAY = "0123456789ABCDEF".toCharArray();
char[] hexChars = new char[digest.length * 2];
for (int j = 0; j < digest.length; j++) {
int v = digest[j] & 0xFF;
hexChars[j * 2] = HEX_ARRAY[v >>> 4];
hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F];
}
log.debug("Key : " + key +"; hex : " + new String(hexChars));
}
if(log.isDebugEnabled()) log.debug("Key : " + key +"; digest length : " + digest.length + "; byte Array contents : " + Arrays.toString(digest) );
String hKey = null;
if(baseEncoder != null && baseEncoder.equals("ascii85")) {
hKey = Ascii85.encode(digest);
if(log.isDebugEnabled()) log.debug("Key : " + key +"; Hashed & Ascii85 encoded key : " + hKey + "; Took " + (System.nanoTime() - start) + " nanos");
} else {
hKey = encoder.encodeToString(digest);
if (null != hKey && maxHashLength != null && maxHashLength > 0 && maxHashLength < hKey.length()) {
hKey = hKey.substring(0, maxHashLength);
}
if(log.isDebugEnabled()) log.debug("Key : " + key +"; Hashed & encoded key : " + hKey + "; Took " + (System.nanoTime() - start) + " nanos");
}
return hKey;
}
public static byte[] getHashedKeyInBytes(String key, HashingAlgorithm hashingAlgorithm, Integer maxDigestBytes) {
final long start = System.nanoTime();
byte[] digest = getHashedKey(key, hashingAlgorithm, maxDigestBytes);
if(log.isDebugEnabled()) log.debug("Key : " + key +"; digest length : " + digest.length + "; byte Array contents : " + Arrays.toString(digest) + "; Took " + (System.nanoTime() - start) + " nanos");
return digest;
}
private static byte[] getHashedKey(String key, HashingAlgorithm hashingAlgorithm, Integer maxDigestBytes) {
HashFunction hf = null;
switch (hashingAlgorithm) {
case murmur3:
hf = Hashing.murmur3_128();
break;
case adler32:
hf = Hashing.adler32();
break;
case crc32:
hf = Hashing.crc32();
break;
case sha1:
hf = Hashing.sha1();
break;
case sha256:
hf = Hashing.sha256();
break;
case siphash24:
hf = Hashing.sipHash24();
break;
case md5:
default:
hf = Hashing.md5();
break;
}
final HashCode hc = hf.newHasher().putString(key, Charsets.UTF_8).hash();
final byte[] digest = hc.asBytes();
if (maxDigestBytes != null && maxDigestBytes > 0 && maxDigestBytes < digest.length) {
return Arrays.copyOfRange(digest, 0, maxDigestBytes);
}
return digest;
}
public static void main(String args[]) {
BasicConfigurator.resetConfiguration();
BasicConfigurator.configure(new ConsoleAppender(new PatternLayout("%d{HH:mm:ss,SSS} [%t] %p %c %x - %m%n")));
org.apache.log4j.Logger.getRootLogger().setLevel(Level.DEBUG);
String key = "MAP_LT:721af5a5-3452-4b62-86fb-5f31ccde8d99_187978153X28X2787347X1601330156682";
System.out.println(getHashedKeyEncoded(key, HashingAlgorithm.murmur3, null, null));
}
}
| 5,770
| 36.718954
| 205
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/util/Pair.java
|
package com.netflix.evcache.util;
public class Pair<E1, E2> {
public E1 first() {
return first;
}
public void setFirst(E1 first) {
this.first = first;
}
public E2 second() {
return second;
}
public void setSecond(E2 second) {
this.second = second;
}
private E1 first;
private E2 second;
public Pair(E1 first, E2 second) {
this.first = first;
this.second = second;
}
}
| 469
| 16.407407
| 38
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/util/CircularIterator.java
|
package com.netflix.evcache.util;
import java.lang.reflect.Array;
import java.util.Collection;
import java.util.Iterator;
/**
* A circular iterator for ReplicaSets. This ensures that all ReplicaSets are
* equal number of requests.
*
* @author smadappa
*/
public class CircularIterator<T> {
private Entry<T> entry;
private int size = 0;
/**
* Creates an instance of ReplicaSetCircularIterator across all ReplicaSets.
*
* @param allReplicaSets
* Set of all available ReplicaSets.
*/
public CircularIterator(Collection<T> allReplicaSets) {
if (allReplicaSets == null || allReplicaSets.isEmpty()) return;
Entry<T> pEntry = null;
for (Iterator<T> itr = allReplicaSets.iterator(); itr.hasNext();) {
size++;
final T rSet = itr.next();
final Entry<T> newEntry = new Entry<T>(rSet, pEntry);
if (entry == null) entry = newEntry;
pEntry = newEntry;
}
/*
* Connect the first and the last entry to form a circular list
*/
if (pEntry != null) {
entry.next = pEntry;
}
}
/**
* Returns the next ReplicaSet which should get the request.
*
* @return - the next ReplicaSetCircularIterator in the iterator. If there
* are none then null is returned.
*/
public T next() {
if (entry == null) return null;
entry = entry.next;
return entry.element;
}
/**
* Returns the next ReplicaSet excluding the given ReplicaSet which should
* get the request.
*
* @return - the next ReplicaSet in the iterator. If there are none then
* null is returned.
*/
public T next(T ignoreReplicaSet) {
if (entry == null) return null;
entry = entry.next;
if (entry.element.equals(ignoreReplicaSet)) {
return entry.next.element;
} else {
return entry.element;
}
}
public int getSize() {
return size;
}
/**
* The Entry keeps track of the current element and next element in the
* list.
*
* @author smadappa
*
* @param <E>
*/
static class Entry<E> {
private E element;
private Entry<E> next;
/**
* Creates an instance of Entry.
*/
Entry(E element, Entry<E> next) {
this.element = element;
this.next = next;
}
}
public String toString() {
final StringBuilder current = new StringBuilder();
if (entry != null) {
Entry<T> startEntry = entry;
if(startEntry.element.getClass().isArray()) {
for(int i = 0; i < Array.getLength(startEntry.element); i++) {
if(i > 0) current.append(",");
current.append("[").append(i).append(", ").append(Array.get(startEntry.element, i).toString()).append("]");
}
} else {
current.append(startEntry.element);
}
while (!entry.next.equals(startEntry)) {
if(entry.next.element.getClass().isArray()) {
for(int i = 0; i < Array.getLength(entry.next.element); i++) {
if(i > 0) current.append(",");
current.append("[").append(i).append(", ").append(Array.get(entry.next.element, i).toString()).append("]");
}
} else {
current.append(",[").append(entry.next.element).append("]");
}
entry = entry.next;
}
}
return "Server Group Iterator : { size=" + getSize() + "; Server Group=" + current.toString() + "}";
}
}
| 3,802
| 29.669355
| 131
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/util/RetryCount.java
|
package com.netflix.evcache.util;
public class RetryCount {
private int retryCount;
public RetryCount() {
retryCount = 1;
}
public void incr() {
retryCount++;
}
public int get(){
return retryCount;
}
}
| 255
| 16.066667
| 33
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/util/EVCacheBulkDataDto.java
|
package com.netflix.evcache.util;
import com.netflix.evcache.EVCacheKey;
import java.util.List;
import java.util.Map;
public class EVCacheBulkDataDto<T> {
private Map<String, T> decanonicalR;
private List<EVCacheKey> evcKeys;
public EVCacheBulkDataDto(Map<String, T> decanonicalR, List<EVCacheKey> evcKeys) {
this.decanonicalR = decanonicalR;
this.evcKeys = evcKeys;
}
public Map<String, T> getDecanonicalR() {
return decanonicalR;
}
public List<EVCacheKey> getEvcKeys() {
return evcKeys;
}
public void setDecanonicalR(Map<String, T> decanonicalR) {
this.decanonicalR = decanonicalR;
}
public void setEvcKeys(List<EVCacheKey> evcKeys) {
this.evcKeys = evcKeys;
}
}
| 769
| 22.333333
| 86
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/util/ZoneFallbackIterator.java
|
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.evcache.util;
import java.util.Iterator;
import java.util.Set;
/**
* A Zone Based fallback circular iterator. This ensures that during a fallback
* scenario the requests are spread out across all zones evenly.
*
* @author smadappa
*/
public class ZoneFallbackIterator {
private Entry<String> entry;
private int size = 0;
/**
* Creates an instance of ZoneFallbackIterator given all the zones.
*
* @param allZones
* Set of all available zones.
*/
public ZoneFallbackIterator(Set<String> allZones) {
if (allZones == null || allZones.size() == 0) return;
Entry<String> pEntry = null;
for (Iterator<String> itr = allZones.iterator(); itr.hasNext();) {
size++;
final String zone = itr.next();
final Entry<String> newEntry = new Entry<String>(zone, pEntry);
if (entry == null) entry = newEntry;
pEntry = newEntry;
}
/*
* Connect the first and the last entry to form a circular list
*/
if (pEntry != null) {
entry.next = pEntry;
}
}
/**
* Returns the next zone from the set which should get the request.
*
* @return - the next zone in the iterator. If there are none then null is
* returned.
*/
public String next() {
if (entry == null) return null;
entry = entry.next;
return entry.element;
}
/**
* Returns the next zone from the set excluding the given zone which should
* get the request.
*
* @return - the next zone in the iterator. If there are none then null is
* returned.
*/
public String next(String ignoreZone) {
if (entry == null) return null;
entry = entry.next;
if (entry.element.equals(ignoreZone)) {
return entry.next.element;
} else {
return entry.element;
}
}
public int getSize() {
return size;
}
/**
* The Entry keeps track of the current element and next element in the
* list.
*
* @author smadappa
*
* @param <E>
*/
static class Entry<E> {
private E element;
private Entry<E> next;
/**
* Creates an instance of Entry.
*/
Entry(E element, Entry<E> next) {
this.element = element;
this.next = next;
}
}
}
| 3,080
| 26.756757
| 79
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/util/EVCacheConfig.java
|
package com.netflix.evcache.util;
import java.lang.reflect.Type;
import java.util.function.Consumer;
import java.util.function.Function;
import javax.inject.Inject;
import com.netflix.archaius.DefaultPropertyFactory;
import com.netflix.archaius.api.Property;
import com.netflix.archaius.api.PropertyListener;
import com.netflix.archaius.api.PropertyRepository;
import com.netflix.archaius.api.config.CompositeConfig;
import com.netflix.archaius.config.DefaultCompositeConfig;
import com.netflix.archaius.config.DefaultSettableConfig;
import com.netflix.archaius.config.EnvironmentConfig;
import com.netflix.archaius.config.SystemConfig;
import com.netflix.evcache.config.EVCachePersistedProperties;
public class EVCacheConfig {
private static EVCacheConfig INSTANCE;
/**
* This is an hack, should find a better way to do this
**/
private static PropertyRepository propertyRepository;
@Inject
public EVCacheConfig(PropertyRepository repository) {
PropertyRepository _propertyRepository = null;
if(repository == null) {
try {
final CompositeConfig applicationConfig = new DefaultCompositeConfig(true);
CompositeConfig remoteLayer = new DefaultCompositeConfig(true);
applicationConfig.addConfig("RUNTIME", new DefaultSettableConfig());
applicationConfig.addConfig("REMOTE", remoteLayer);
applicationConfig.addConfig("SYSTEM", SystemConfig.INSTANCE);
applicationConfig.addConfig("ENVIRONMENT", EnvironmentConfig.INSTANCE);
final EVCachePersistedProperties remote = new EVCachePersistedProperties();
remoteLayer.addConfig("remote-1", remote.getPollingDynamicConfig());
_propertyRepository = new DefaultPropertyFactory(applicationConfig);
} catch (Exception e) {
e.printStackTrace();
_propertyRepository = new DefaultPropertyFactory(new DefaultCompositeConfig());
}
} else {
_propertyRepository = repository;
}
propertyRepository = new EVCachePropertyRepository(_propertyRepository);
//propertyRepository = _propertyRepository;
INSTANCE = this;
}
private EVCacheConfig() {
this(null);
}
public static EVCacheConfig getInstance() {
if(INSTANCE == null) new EVCacheConfig();
return INSTANCE;
}
public PropertyRepository getPropertyRepository() {
return propertyRepository;
}
public static void setPropertyRepository(PropertyRepository repository) {
propertyRepository = repository;
}
class EVCachePropertyRepository implements PropertyRepository {
private final PropertyRepository delegate;
EVCachePropertyRepository(PropertyRepository delegate) {
this.delegate = delegate;
}
@Override
public <T> Property<T> get(String key, Class<T> type) {
return new EVCacheProperty<T>(delegate.get(key, type));
}
@Override
public <T> Property<T> get(String key, Type type) {
return new EVCacheProperty<T>(delegate.get(key, type));
}
}
class EVCacheProperty<T> implements Property<T> {
private final Property<T> property;
EVCacheProperty(Property<T> prop) {
property = prop;
}
@Override
public T get() {
return property.get();
}
@Override
public String getKey() {
return property.getKey();
}
@Override
public void addListener(PropertyListener<T> listener) {
// TODO Auto-generated method stub
property.addListener(listener);
}
@Override
public void removeListener(PropertyListener<T> listener) {
// TODO Auto-generated method stub
property.removeListener(listener);
}
@Override
public Subscription onChange(Consumer<T> consumer) {
// TODO Auto-generated method stub
return property.onChange(consumer);
}
@Override
public Subscription subscribe(Consumer<T> consumer) {
// TODO Auto-generated method stub
return property.subscribe(consumer);
}
@Override
public Property<T> orElse(T defaultValue) {
// TODO Auto-generated method stub
return new EVCacheProperty<T>(property.orElse(defaultValue));
}
@Override
public Property<T> orElseGet(String key) {
// TODO Auto-generated method stub
return new EVCacheProperty<T>(property.orElseGet(key));
}
@Override
public <S> Property<S> map(Function<T, S> mapper) {
// TODO Auto-generated method stub
return property.map(mapper);
}
@Override
public String toString() {
return "EVCacheProperty [Key=" + getKey() + ",value="+get() + "]";
}
}
}
| 5,051
| 30.575
| 92
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/util/SupplierUtils.java
|
package com.netflix.evcache.util;
import java.util.concurrent.Callable;
import java.util.function.Supplier;
public final class SupplierUtils {
private SupplierUtils() {
}
public static <T> Supplier<T> wrap(Callable<T> callable) {
return () -> {
try {
return callable.call();
}
catch (RuntimeException e) {
throw e;
}
catch (Exception e) {
throw new RuntimeException(e);
}
};
}
}
| 534
| 21.291667
| 62
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/util/Sneaky.java
|
package com.netflix.evcache.util;
/**
* Sneaky can be used to sneakily throw checked exceptions without actually declaring this in your method's throws clause.
* This somewhat contentious ability should be used carefully, of course.
*/
public class Sneaky {
public static RuntimeException sneakyThrow(Throwable t) {
if ( t == null ) throw new NullPointerException("t");
Sneaky.<RuntimeException>sneakyThrow0(t);
return null;
}
@SuppressWarnings("unchecked")
private static <T extends Throwable> void sneakyThrow0(Throwable t) throws T {
throw (T)t;
}
}
| 610
| 31.157895
| 122
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/metrics/EVCacheMetricsFactory.java
|
package com.netflix.evcache.metrics;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.DistributionSummary;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Spectator;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.api.Timer;
import com.netflix.spectator.api.histogram.PercentileTimer;
import com.netflix.spectator.ipc.IpcStatus;
import net.spy.memcached.ops.StatusCode;
@SuppressWarnings("deprecation")
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings(value = { "NF_LOCAL_FAST_PROPERTY",
"PMB_POSSIBLE_MEMORY_BLOAT" }, justification = "Creates only when needed")
public final class EVCacheMetricsFactory {
private final Map<String, Number> monitorMap = new ConcurrentHashMap<String, Number>();
private final Map<String, Counter> counterMap = new ConcurrentHashMap<String, Counter>();
private final Map<String, DistributionSummary> distributionSummaryMap = new ConcurrentHashMap<String, DistributionSummary>();
private final Lock writeLock = (new ReentrantReadWriteLock()).writeLock();
private final Map<String, Timer> timerMap = new HashMap<String, Timer>();
private static final EVCacheMetricsFactory INSTANCE = new EVCacheMetricsFactory();
private EVCacheMetricsFactory() {
}
public static EVCacheMetricsFactory getInstance() {
return INSTANCE;
}
public Map<String, Counter> getAllCounters() {
return counterMap;
}
public Map<String, Timer> getAllTimers() {
return timerMap;
}
public Map<String, Number> getAllMonitor() {
return monitorMap;
}
public Map<String, DistributionSummary> getAllDistributionSummaryMap() {
return distributionSummaryMap;
}
public Registry getRegistry() {
return Spectator.globalRegistry();
}
public AtomicLong getLongGauge(String name) {
return getLongGauge(name, null);
}
public AtomicLong getLongGauge(String cName, Collection<Tag> tags) {
final String name = tags != null ? cName + tags.toString() : cName;
AtomicLong gauge = (AtomicLong)monitorMap.get(name);
if (gauge == null) {
writeLock.lock();
try {
if (monitorMap.containsKey(name)) {
gauge = (AtomicLong)monitorMap.get(name);
} else {
if(tags != null) {
final Id id = getId(cName, tags);
gauge = getRegistry().gauge(id, new AtomicLong());
} else {
final Id id = getId(cName, null);
gauge = getRegistry().gauge(id, new AtomicLong());
}
monitorMap.put(name, gauge);
}
} finally {
writeLock.unlock();
}
}
return gauge;
}
private void addCommonTags(List<Tag> tagList) {
tagList.add(new BasicTag(OWNER, "evcache"));
final String additionalTags = EVCacheConfig.getInstance().getPropertyRepository().get("evcache.additional.tags", String.class).orElse(null).get();
if(additionalTags != null && additionalTags.length() > 0) {
final StringTokenizer st = new StringTokenizer(additionalTags, ",");
while(st.hasMoreTokens()) {
final String token = st.nextToken().trim();
String val = System.getProperty(token);
if(val == null) val = System.getenv(token);
if(val != null) tagList.add(new BasicTag(token, val));
}
}
}
public void addAppNameTags(List<Tag> tagList, String appName) {
tagList.add(new BasicTag(EVCacheMetricsFactory.CACHE, appName));
tagList.add(new BasicTag(EVCacheMetricsFactory.ID, appName));
}
public Id getId(String name, Collection<Tag> tags) {
final List<Tag> tagList = new ArrayList<Tag>();
if(tags != null) tagList.addAll(tags);
addCommonTags(tagList);
return getRegistry().createId(name, tagList);
}
public Counter getCounter(String cName, Collection<Tag> tags) {
final String name = tags != null ? cName + tags.toString() : cName;
Counter counter = counterMap.get(name);
if (counter == null) {
writeLock.lock();
try {
if (counterMap.containsKey(name)) {
counter = counterMap.get(name);
} else {
List<Tag> tagList = new ArrayList<Tag>(tags.size() + 1);
tagList.addAll(tags);
final Id id = getId(cName, tagList);
counter = getRegistry().counter(id);
counterMap.put(name, counter);
}
} finally {
writeLock.unlock();
}
}
return counter;
}
public Counter getCounter(String name) {
return getCounter(name, null);
}
public void increment(String name) {
final Counter counter = getCounter(name);
counter.increment();
}
public void increment(String cName, Collection<Tag> tags) {
final Counter counter = getCounter(cName, tags);
counter.increment();
}
@Deprecated
public Timer getPercentileTimer(String metric, Collection<Tag> tags) {
return getPercentileTimer(metric, tags, Duration.ofMillis(100));
}
public Timer getPercentileTimer(String metric, Collection<Tag> tags, Duration max) {
final String name = tags != null ? metric + tags.toString() : metric;
final Timer duration = timerMap.get(name);
if (duration != null) return duration;
writeLock.lock();
try {
if (timerMap.containsKey(name))
return timerMap.get(name);
else {
Id id = getId(metric, tags);
final Timer _duration = PercentileTimer.builder(getRegistry()).withId(id).withRange(Duration.ofNanos(100000), max).build();
timerMap.put(name, _duration);
return _duration;
}
} finally {
writeLock.unlock();
}
}
public DistributionSummary getDistributionSummary(String name, Collection<Tag> tags) {
final String metricName = (tags != null ) ? name + tags.toString() : name;
final DistributionSummary _ds = distributionSummaryMap.get(metricName);
if(_ds != null) return _ds;
final Registry registry = Spectator.globalRegistry();
if (registry != null) {
Id id = getId(name, tags);
final DistributionSummary ds = registry.distributionSummary(id);
distributionSummaryMap.put(metricName, ds);
return ds;
}
return null;
}
public String getStatusCode(StatusCode sc) {
switch(sc) {
case CANCELLED :
return IpcStatus.cancelled.name();
case TIMEDOUT :
return IpcStatus.timeout.name();
case INTERRUPTED :
return EVCacheMetricsFactory.INTERRUPTED;
case SUCCESS :
return IpcStatus.success.name();
case ERR_NOT_FOUND:
return "not_found";
case ERR_EXISTS:
return "exists";
case ERR_2BIG:
return "too_big";
case ERR_INVAL:
return "invalid";
case ERR_NOT_STORED:
return "not_stored";
case ERR_DELTA_BADVAL:
return "bad_value";
case ERR_NOT_MY_VBUCKET:
return "not_my_vbucket";
case ERR_UNKNOWN_COMMAND:
return "unknown_command";
case ERR_NO_MEM:
return "no_mem";
case ERR_NOT_SUPPORTED:
return "not_supported";
case ERR_INTERNAL:
return "error_internal";
case ERR_BUSY:
return "error_busy";
case ERR_TEMP_FAIL:
return "temp_failure";
case ERR_CLIENT :
return "error_client";
default :
return sc.name().toLowerCase();
}
}
/**
* External Metric Names
*/
public static final String OVERALL_CALL = "evcache.client.call";
public static final String OVERALL_KEYS_SIZE = "evcache.client.call.keys.size";
public static final String COMPRESSION_RATIO = "evcache.client.compression.ratio";
/**
* External IPC Metric Names
*/
public static final String IPC_CALL = "ipc.client.call";
public static final String IPC_SIZE_INBOUND = "ipc.client.call.size.inbound";
public static final String IPC_SIZE_OUTBOUND = "ipc.client.call.size.outbound";
public static final String OWNER = "owner";
public static final String ID = "id";
/**
* Internal Metric Names
*/
public static final String CONFIG = "internal.evc.client.config";
public static final String DATA_SIZE = "internal.evc.client.datasize";
public static final String IN_MEMORY = "internal.evc.client.inmemorycache";
public static final String FAST_FAIL = "internal.evc.client.fastfail";
public static final String INTERNAL_OPERATION = "internal.evc.client.operation";
public static final String INTERNAL_PAUSE = "internal.evc.client.pause";
public static final String INTERNAL_LATCH = "internal.evc.client.latch";
public static final String INTERNAL_LATCH_VERIFY = "internal.evc.client.latch.verify";
public static final String INTERNAL_FAIL = "internal.evc.client.fail";
public static final String INTERNAL_EVENT_FAIL = "internal.evc.client.event.fail";
public static final String INTERNAL_RECONNECT = "internal.evc.client.reconnect";
public static final String INTERNAL_EXECUTOR = "internal.evc.client.executor";
public static final String INTERNAL_EXECUTOR_SCHEDULED = "internal.evc.client.scheduledExecutor";
public static final String INTERNAL_POOL_INIT_ERROR = "internal.evc.client.init.error";
public static final String INTERNAL_NUM_CHUNK_SIZE = "internal.evc.client.chunking.numOfChunks";
public static final String INTERNAL_CHUNK_DATA_SIZE = "internal.evc.client.chunking.dataSize";
public static final String INTERNAL_ADD_CALL_FIXUP = "internal.evc.client.addCall.fixUp";
public static final String INTERNAL_POOL_SG_CONFIG = "internal.evc.client.pool.asg.config";
public static final String INTERNAL_POOL_CONFIG = "internal.evc.client.pool.config";
public static final String INTERNAL_POOL_REFRESH = "internal.evc.client.pool.refresh";
public static final String INTERNAL_BOOTSTRAP_EUREKA = "internal.evc.client.pool.bootstrap.eureka";
public static final String INTERNAL_STATS = "internal.evc.client.stats";
public static final String INTERNAL_TTL = "internal.evc.item.ttl";
/*
* Internal pool config values
*/
public static final String POOL_READ_INSTANCES = "readInstances";
public static final String POOL_WRITE_INSTANCES = "writeInstances";
public static final String POOL_RECONCILE = "reconcile";
public static final String POOL_CHANGED = "asgChanged";
public static final String POOL_SERVERGROUP_STATUS = "asgStatus";
public static final String POOL_READ_Q_SIZE = "readQueue";
public static final String POOL_WRITE_Q_SIZE = "writeQueue";
public static final String POOL_REFRESH_QUEUE_FULL = "refreshOnQueueFull";
public static final String POOL_REFRESH_ASYNC = "refreshAsync";
public static final String POOL_OPERATIONS = "operations";
/**
* Metric Tags Names
*/
public static final String CACHE = "ipc.server.app";
public static final String SERVERGROUP = "ipc.server.asg";
public static final String ZONE = "ipc.server.zone";
public static final String ATTEMPT = "ipc.attempt";
public static final String IPC_RESULT = "ipc.result";
public static final String IPC_STATUS = "ipc.status";
//public static final String FAIL_REASON = "ipc.error.group";
/*
* Metric Tags moved to IPC format
*/
public static final String CALL_TAG = "evc.call";
public static final String CALL_TYPE_TAG = "evc.call.type";
public static final String CACHE_HIT = "evc.cache.hit";
public static final String CONNECTION_ID = "evc.connection.id";
public static final String TTL = "evc.ttl";
public static final String PAUSE_REASON = "evc.pause.reason";
public static final String LATCH = "evc.latch";
public static final String FAIL_COUNT = "evc.fail.count";
public static final String COMPLETE_COUNT = "evc.complete.count";
public static final String RECONNECT_COUNT = "evc.reconnect.count";
public static final String FETCH_AFTER_PAUSE = "evc.fetch.after.pause";
public static final String FAILED_SERVERGROUP = "evc.failed.asg";
public static final String CONFIG_NAME = "evc.config";
public static final String STAT_NAME = "evc.stat.name";
public static final String FAILED_HOST = "evc.failed.host";
public static final String OPERATION = "evc.operation";
public static final String OPERATION_STATUS = "evc.operation.status";
public static final String NUMBER_OF_ATTEMPTS = "evc.attempts";
public static final String NUMBER_OF_KEYS = "evc.keys.count";
public static final String METRIC = "evc.metric";
public static final String FAILURE_REASON = "evc.fail.reason";
public static final String PREFIX = "evc.prefix";
public static final String EVENT = "evc.event";
public static final String EVENT_STAGE = "evc.event.stage";
public static final String CONNECTION = "evc.connection.type";
public static final String TLS = "evc.connection.tls";
public static final String COMPRESSION_TYPE = "evc.compression.type";
/**
* Metric Tags Values
*/
public static final String SIZE = "size";
public static final String PORT = "port";
public static final String CONNECT = "connect";
public static final String DISCONNECT = "disconnect";
public static final String SUCCESS = "success";
public static final String FAIL = "failure";
public static final String TIMEOUT = "timeout";
public static final String CHECKED_OP_TIMEOUT = "CheckedOperationTimeout";
public static final String CANCELLED = "cancelled";
public static final String THROTTLED = "throttled";
public static final String ERROR = "error";
public static final String READ = "read";
public static final String WRITE = "write";
public static final String YES = "yes";
public static final String NO = "no";
public static final String PARTIAL = "partial";
public static final String UNKNOWN = "unknown";
public static final String INTERRUPTED = "interrupted";
public static final String SCHEDULE = "Scheduling";
public static final String GC = "gc";
public static final String NULL_CLIENT = "nullClient";
public static final String INVALID_TTL = "invalidTTL";
public static final String NULL_ZONE = "nullZone";
public static final String NULL_SERVERGROUP = "nullASG";
public static final String RECONNECT = "reconnect";
public static final String CALLBACK = "callback";
public static final String VERIFY = "verify";
public static final String READ_QUEUE_FULL = "readQueueFull";
public static final String INACTIVE_NODE = "inactiveNode";
public static final String IGNORE_INACTIVE_NODES = "ignoreInactiveNode";
public static final String INCORRECT_CHUNKS = "incorrectNumOfChunks";
public static final String INVALID_CHUNK_SIZE = "invalidChunkSize";
public static final String CHECK_SUM_ERROR = "checkSumError";
public static final String KEY_HASH_COLLISION = "KeyHashCollision";
public static final String NUM_CHUNK_SIZE = "numOfChunks";
public static final String CHUNK_DATA_SIZE = "dataSize";
public static final String NOT_AVAILABLE = "notAvailable";
public static final String NOT_ACTIVE = "notActive";
public static final String WRONG_KEY_RETURNED = "wrongKeyReturned";
public static final String INITIAL = "initial";
public static final String SECOND = "second";
public static final String THIRD_UP = "third_up";
/**
* Metric Tag Value for Operations
*/
public static final String BULK_OPERATION = "BULK";
public static final String GET_OPERATION = "GET";
public static final String GET_AND_TOUCH_OPERATION = "GET_AND_TOUCH";
public static final String DELETE_OPERATION = "DELETE";
public static final String TOUCH_OPERATION = "TOUCH";
public static final String AOA_OPERATION = "APPEND_OR_ADD";
public static final String AOA_OPERATION_APPEND = "APPEND_OR_ADD-APPEND";
public static final String AOA_OPERATION_ADD = "APPEND_OR_ADD-ADD";
public static final String AOA_OPERATION_REAPPEND = "APPEND_OR_ADD-RETRY-APPEND";
public static final String SET_OPERATION = "SET";
public static final String ADD_OPERATION = "ADD";
public static final String REPLACE_OPERATION = "REPLACE";
public static final String META_GET_OPERATION = "M_GET";
public static final String META_SET_OPERATION = "M_SET";
public static final String META_DEBUG_OPERATION = "M_DEBUG";
}
| 20,630
| 47.888626
| 154
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheNodeLocator.java
|
package com.netflix.evcache.pool;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import com.netflix.archaius.api.Property;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.evcache.util.EVCacheConfig;
import net.spy.memcached.DefaultHashAlgorithm;
import net.spy.memcached.EVCacheMemcachedNodeROImpl;
import net.spy.memcached.HashAlgorithm;
import net.spy.memcached.MemcachedNode;
import net.spy.memcached.NodeLocator;
import net.spy.memcached.util.KetamaNodeLocatorConfiguration;
public class EVCacheNodeLocator implements NodeLocator {
private static final Logger log = LoggerFactory.getLogger(EVCacheNodeLocator.class);
private TreeMap<Long, MemcachedNode> ketamaNodes;
protected final EVCacheClient client;
private final Property<Boolean> partialStringHash;
private final Property<String> hashDelimiter;
private final Collection<MemcachedNode> allNodes;
private final HashAlgorithm hashingAlgorithm;
private final KetamaNodeLocatorConfiguration config;
/**
* Create a new KetamaNodeLocator using specified nodes and the specifed
* hash algorithm and configuration.
*
* @param nodes
* The List of nodes to use in the Ketama consistent hash
* continuum
* @param alg
* The hash algorithm to use when choosing a node in the Ketama
* consistent hash continuum
* @param conf
*/
public EVCacheNodeLocator(EVCacheClient client, List<MemcachedNode> nodes, HashAlgorithm alg, KetamaNodeLocatorConfiguration conf) {
super();
this.allNodes = nodes;
this.hashingAlgorithm = alg;
this.config = conf;
this.client = client;
this.partialStringHash = EVCacheConfig.getInstance().getPropertyRepository().get(client.getAppName() + "." + client.getServerGroupName() + ".hash.on.partial.key", Boolean.class)
.orElseGet(client.getAppName()+ ".hash.on.partial.key").orElse(false);
this.hashDelimiter = EVCacheConfig.getInstance().getPropertyRepository().get(client.getAppName() + "." + client.getServerGroupName() + ".hash.delimiter", String.class)
.orElseGet(client.getAppName() + ".hash.delimiter").orElse(":");
setKetamaNodes(nodes);
}
private EVCacheNodeLocator(EVCacheClient client, TreeMap<Long, MemcachedNode> smn, Collection<MemcachedNode> an, HashAlgorithm alg, KetamaNodeLocatorConfiguration conf) {
super();
this.ketamaNodes = smn;
this.allNodes = an;
this.hashingAlgorithm = alg;
this.config = conf;
this.client = client;
this.partialStringHash = EVCacheConfig.getInstance().getPropertyRepository().get(client.getAppName() + "." + client.getServerGroupName() + ".hash.on.partial.key", Boolean.class)
.orElseGet(client.getAppName()+ ".hash.on.partial.key").orElse(false);
this.hashDelimiter = EVCacheConfig.getInstance().getPropertyRepository().get(client.getAppName() + "." + client.getServerGroupName() + ".hash.delimiter", String.class)
.orElseGet(client.getAppName() + ".hash.delimiter").orElse(":");
}
/*
* @see net.spy.memcached.NodeLocator#getAll
*/
public Collection<MemcachedNode> getAll() {
return allNodes;
}
/*
* @see net.spy.memcached.NodeLocator#getPrimary
*/
public MemcachedNode getPrimary(String k) {
if (partialStringHash.get()) {
final int index = k.indexOf(hashDelimiter.get());
if (index > 0) {
k = k.substring(0, index);
}
}
final long hash = hashingAlgorithm.hash(k);
Map.Entry<Long, MemcachedNode> entry = ketamaNodes.ceilingEntry(hash);
if (entry == null) {
entry = ketamaNodes.firstEntry();
}
return entry.getValue();
}
/*
* @return Returns the max key in the hashing distribution
*/
public long getMaxKey() {
return getKetamaNodes().lastKey().longValue();
}
public MemcachedNode getNodeForKey(long _hash) {
long start = (log.isDebugEnabled()) ? System.nanoTime() : 0;
try {
Long hash = Long.valueOf(_hash);
hash = ketamaNodes.ceilingKey(hash);
if (hash == null) {
hash = ketamaNodes.firstKey();
}
return ketamaNodes.get(hash);
} finally {
if (log.isDebugEnabled()) {
final long end = System.nanoTime();
log.debug("getNodeForKey : \t" + (end - start) / 1000);
}
}
}
public Iterator<MemcachedNode> getSequence(String k) {
final List<MemcachedNode> allKetamaNodes = new ArrayList<MemcachedNode>(getKetamaNodes().values());
Collections.shuffle(allKetamaNodes);
return allKetamaNodes.iterator();
}
public NodeLocator getReadonlyCopy() {
final TreeMap<Long, MemcachedNode> ketamaNaodes = new TreeMap<Long, MemcachedNode>(getKetamaNodes());
final Collection<MemcachedNode> aNodes = new ArrayList<MemcachedNode>(allNodes.size());
// Rewrite the values a copy of the map.
for (Map.Entry<Long, MemcachedNode> me : ketamaNaodes.entrySet()) {
me.setValue(new EVCacheMemcachedNodeROImpl(me.getValue()));
}
// Copy the allNodes collection.
for (MemcachedNode n : allNodes) {
aNodes.add(new EVCacheMemcachedNodeROImpl(n));
}
return new EVCacheNodeLocator(client, ketamaNaodes, aNodes, hashingAlgorithm, config);
}
/**
* @return the ketamaNodes
*/
protected TreeMap<Long, MemcachedNode> getKetamaNodes() {
return ketamaNodes;
}
/**
* @return the readonly view of ketamaNodes. This is mailnly for admin
* purposes
*/
public Map<Long, MemcachedNode> getKetamaNodeMap() {
return Collections.<Long, MemcachedNode> unmodifiableMap(ketamaNodes);
}
/**
* Setup the KetamaNodeLocator with the list of nodes it should use.
*
* @param nodes
* a List of MemcachedNodes for this KetamaNodeLocator to use in
* its continuum
*/
protected final void setKetamaNodes(List<MemcachedNode> nodes) {
TreeMap<Long, MemcachedNode> newNodeMap = new TreeMap<Long, MemcachedNode>();
final int numReps = config.getNodeRepetitions();
for (MemcachedNode node : nodes) {
// Ketama does some special work with md5 where it reuses chunks.
if (hashingAlgorithm == DefaultHashAlgorithm.KETAMA_HASH) {
for (int i = 0; i < numReps / 4; i++) {
final String hashString = config.getKeyForNode(node, i);
byte[] digest = DefaultHashAlgorithm.computeMd5(hashString);
if (log.isDebugEnabled()) log.debug("digest : " + digest);
for (int h = 0; h < 4; h++) {
long k = ((long) (digest[3 + h * 4] & 0xFF) << 24)
| ((long) (digest[2 + h * 4] & 0xFF) << 16)
| ((long) (digest[1 + h * 4] & 0xFF) << 8)
| (digest[h * 4] & 0xFF);
newNodeMap.put(Long.valueOf(k), node);
if (log.isDebugEnabled()) log.debug("Key : " + hashString + " ; hash : " + k + "; node " + node );
}
}
} else {
for (int i = 0; i < numReps; i++) {
final Long hashL = Long.valueOf(hashingAlgorithm.hash(config.getKeyForNode(node, i)));
newNodeMap.put(hashL, node);
}
}
}
if (log.isDebugEnabled()) log.debug("NewNodeMapSize : " + newNodeMap.size() + "; MapSize : " + (numReps * nodes.size()));
if (log.isTraceEnabled()) {
for(Long key : newNodeMap.keySet()) {
log.trace("Hash : " + key + "; Node : " + newNodeMap.get(key));
}
}
ketamaNodes = newNodeMap;
}
@Override
public void updateLocator(List<MemcachedNode> nodes) {
setKetamaNodes(nodes);
}
@Override
public String toString() {
return "EVCacheNodeLocator [ketamaNodes=" + ketamaNodes + ", EVCacheClient=" + client + ", partialStringHash=" + partialStringHash
+ ", hashDelimiter=" + hashDelimiter + ", allNodes=" + allNodes + ", hashingAlgorithm=" + hashingAlgorithm + ", config=" + config + "]";
}
}
| 8,737
| 38.36036
| 185
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheNodeList.java
|
package com.netflix.evcache.pool;
import java.io.IOException;
import java.net.UnknownServiceException;
import java.util.Map;
public interface EVCacheNodeList {
/**
* Discover memcached instances suitable for our use from the Discovery
* Service.
*
*
* @param appName The EVCache app for which we need instances
* @throws UnknownServiceException
* if no suitable instances can be found
* @throws IllegalStateException
* if an error occurred in the Discovery service
*
* TODO : Add a fallback to get the list say from PersistedProperties
*/
public abstract Map<ServerGroup, EVCacheServerGroupConfig> discoverInstances(String appName) throws IOException;
}
| 752
| 30.375
| 116
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheScheduledExecutorMBean.java
|
package com.netflix.evcache.pool;
public interface EVCacheScheduledExecutorMBean {
boolean isShutdown();
boolean isTerminating();
boolean isTerminated();
int getCorePoolSize();
int getMaximumPoolSize();
int getQueueSize();
int getPoolSize();
int getActiveCount();
int getLargestPoolSize();
long getTaskCount();
long getCompletedTaskCount();
}
| 399
| 13.814815
| 48
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheClientPoolMBean.java
|
package com.netflix.evcache.pool;
import java.util.Map;
public interface EVCacheClientPoolMBean {
int getInstanceCount();
Map<String, String> getInstancesByZone();
Map<String, Integer> getInstanceCountByZone();
Map<String, String> getReadZones();
Map<String, Integer> getReadInstanceCountByZone();
Map<String, String> getWriteZones();
Map<String, Integer> getWriteInstanceCountByZone();
String getFallbackServerGroup();
Map<String, String> getReadServerGroupByZone();
String getLocalServerGroupCircularIterator();
void refreshPool();
String getPoolDetails();
String getEVCacheWriteClientsCircularIterator();
}
| 681
| 19.666667
| 55
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheServerGroupConfig.java
|
package com.netflix.evcache.pool;
import java.net.InetSocketAddress;
import java.util.Set;
public class EVCacheServerGroupConfig {
private final ServerGroup serverGroup;
private final Set<InetSocketAddress> inetSocketAddress;
public EVCacheServerGroupConfig(ServerGroup serverGroup, Set<InetSocketAddress> inetSocketAddress) {
super();
this.serverGroup = serverGroup;
this.inetSocketAddress = inetSocketAddress;
}
public ServerGroup getServerGroup() {
return serverGroup;
}
public Set<InetSocketAddress> getInetSocketAddress() {
return inetSocketAddress;
}
@Override
public String toString() {
return "EVCacheInstanceConfig [InetSocketAddress=" + inetSocketAddress + "]";
}
}
| 775
| 24.866667
| 104
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheKetamaNodeLocatorConfiguration.java
|
package com.netflix.evcache.pool;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.HashMap;
import java.util.Map;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.util.EVCacheConfig;
import net.spy.memcached.MemcachedNode;
import net.spy.memcached.util.DefaultKetamaNodeLocatorConfiguration;
public class EVCacheKetamaNodeLocatorConfiguration extends DefaultKetamaNodeLocatorConfiguration {
protected final EVCacheClient client;
protected final Property<Integer> bucketSize;
protected final Map<MemcachedNode, String> socketAddresses = new HashMap<MemcachedNode, String>();
public EVCacheKetamaNodeLocatorConfiguration(EVCacheClient client) {
this.client = client;
this.bucketSize = EVCacheConfig.getInstance().getPropertyRepository().get(client.getAppName() + "." + client.getServerGroupName() + ".bucket.size", Integer.class)
.orElseGet(client.getAppName()+ ".bucket.size").orElse(super.getNodeRepetitions());
}
/**
* Returns the number of discrete hashes that should be defined for each
* node in the continuum.
*
* @return NUM_REPS repetitions.
*/
public int getNodeRepetitions() {
return bucketSize.get().intValue();
}
/**
* Returns the socket address of a given MemcachedNode.
*
* @param node - The MemcachedNode which we're interested in
* @return The socket address of the given node format is of the following
* For ec2 classic instances - "publicHostname/privateIp:port" (ex - ec2-174-129-159-31.compute-1.amazonaws.com/10.125.47.114:11211)
* For ec2 vpc instances - "privateIp/privateIp:port" (ex - 10.125.47.114/10.125.47.114:11211)
* privateIp is also known as local ip
*/
@Override
public String getKeyForNode(MemcachedNode node, int repetition) {
String result = socketAddresses.get(node);
if(result == null) {
final SocketAddress socketAddress = node.getSocketAddress();
if(socketAddress instanceof InetSocketAddress) {
final InetSocketAddress isa = (InetSocketAddress)socketAddress;
result = isa.getHostName() + '/' + isa.getAddress().getHostAddress() + ":11211";
} else {
result=String.valueOf(socketAddress);
if (result.startsWith("/")) {
result = result.substring(1);
}
}
socketAddresses.put(node, result);
}
return result + "-" + repetition;
}
@Override
public String toString() {
return "EVCacheKetamaNodeLocatorConfiguration [EVCacheClient=" + client + ", BucketSize=" + getNodeRepetitions() + "]";
}
}
| 2,757
| 38.971014
| 170
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheExecutorMBean.java
|
package com.netflix.evcache.pool;
public interface EVCacheExecutorMBean {
boolean isShutdown();
boolean isTerminating();
boolean isTerminated();
int getCorePoolSize();
int getMaximumPoolSize();
int getQueueSize();
int getPoolSize();
int getActiveCount();
int getLargestPoolSize();
long getTaskCount();
long getCompletedTaskCount();
}
| 390
| 13.481481
| 39
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheValue.java
|
package com.netflix.evcache.pool;
import java.io.Serializable;
import java.util.Arrays;
public class EVCacheValue implements Serializable {
/**
*
*/
private static final long serialVersionUID = 3182483105524224448L;
private final String key;
private final byte[] value;
private final int flags;
private final long ttl;
private final long createTime;
public EVCacheValue(String key, byte[] value, int flags, long ttl, long createTime) {
super();
this.key = key;
this.value = value;
this.flags = flags;
this.ttl = ttl;
this.createTime = createTime;
}
public String getKey() {
return key;
}
public byte[] getValue() {
return value;
}
public int getFlags() {
return flags;
}
public long getTTL() {
return ttl;
}
public long getCreateTimeUTC() {
return createTime;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (int) (createTime ^ (createTime >>> 32));
result = prime * result + ((key == null) ? 0 : key.hashCode());
result = prime * result + (int) (ttl ^ (ttl >>> 32));
result = prime * result + (int) (flags);
result = prime * result + Arrays.hashCode(value);
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EVCacheValue other = (EVCacheValue) obj;
if (createTime != other.createTime)
return false;
if (key == null) {
if (other.key != null)
return false;
} else if (!key.equals(other.key))
return false;
if (flags != other.flags)
return false;
if (ttl != other.ttl)
return false;
if (!Arrays.equals(value, other.value))
return false;
return true;
}
@Override
public String toString() {
return "EVCacheValue [key=" + key + ", value=" + Arrays.toString(value) + ", flags=" + flags + ", ttl=" + ttl + ", createTime="
+ createTime + "]";
}
}
| 2,336
| 24.966667
| 135
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheClientUtil.java
|
package com.netflix.evcache.pool;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import com.netflix.evcache.EVCacheKey;
import net.spy.memcached.transcoders.Transcoder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.evcache.EVCacheLatch;
import com.netflix.evcache.EVCacheLatch.Policy;
import com.netflix.evcache.operation.EVCacheLatchImpl;
import net.spy.memcached.CachedData;
public class EVCacheClientUtil {
private static final Logger log = LoggerFactory.getLogger(EVCacheClientUtil.class);
private final ChunkTranscoder ct = new ChunkTranscoder();
private final String _appName;
private final long _operationTimeout;
public EVCacheClientUtil(String appName, long operationTimeout) {
this._appName = appName;
this._operationTimeout = operationTimeout;
}
//TODO: Remove this todo. This method has been made hashing agnostic.
/**
* TODO : once metaget is available we need to get the remaining ttl from an existing entry and use it
*/
public EVCacheLatch add(EVCacheKey evcKey, final CachedData cd, Transcoder evcacheValueTranscoder, int timeToLive, Policy policy, final EVCacheClient[] clients, int latchCount, boolean fixMissing) throws Exception {
if (cd == null) return null;
final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy, latchCount, _appName);
CachedData cdHashed = null;
Boolean firstStatus = null;
for (EVCacheClient client : clients) {
CachedData cd1;
if (evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) {
if(cdHashed == null) {
final EVCacheValue val = new EVCacheValue(evcKey.getCanonicalKey(client.isDuetClient()), cd.getData(), cd.getFlags(), timeToLive, System.currentTimeMillis());
cdHashed = evcacheValueTranscoder.encode(val);
}
cd1 = cdHashed;
} else {
cd1 = cd;
}
String key = evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
final Future<Boolean> f = client.add(key, timeToLive, cd1, latch);
if (log.isDebugEnabled()) log.debug("ADD : Op Submitted : APP " + _appName + ", key " + key + "; future : " + f + "; client : " + client);
if(fixMissing) {
boolean status = f.get().booleanValue();
if(!status) { // most common case
if(firstStatus == null) {
for(int i = 0; i < clients.length; i++) {
latch.countDown();
}
return latch;
} else {
return fixup(client, clients, evcKey, timeToLive, policy);
}
}
if(firstStatus == null) firstStatus = Boolean.valueOf(status);
}
}
return latch;
}
private EVCacheLatch fixup(EVCacheClient sourceClient, EVCacheClient[] destClients, EVCacheKey evcKey, int timeToLive, Policy policy) {
final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy, destClients.length, _appName);
try {
final CachedData readData = sourceClient.get(evcKey.getDerivedKey(sourceClient.isDuetClient(), sourceClient.getHashingAlgorithm(), sourceClient.shouldEncodeHashKey(), sourceClient.getMaxDigestBytes(), sourceClient.getMaxHashLength(), sourceClient.getBaseEncoder()), ct, false, false);
if(readData != null) {
sourceClient.touch(evcKey.getDerivedKey(sourceClient.isDuetClient(), sourceClient.getHashingAlgorithm(), sourceClient.shouldEncodeHashKey(), sourceClient.getMaxDigestBytes(), sourceClient.getMaxHashLength(), sourceClient.getBaseEncoder()), timeToLive);
for(EVCacheClient destClient : destClients) {
destClient.set(evcKey.getDerivedKey(destClient.isDuetClient(), destClient.getHashingAlgorithm(), destClient.shouldEncodeHashKey(), destClient.getMaxDigestBytes(), destClient.getMaxHashLength(), destClient.getBaseEncoder()), readData, timeToLive, latch);
}
}
latch.await(_operationTimeout, TimeUnit.MILLISECONDS);
} catch (Exception e) {
log.error("Error reading the data", e);
}
return latch;
}
}
| 4,652
| 51.875
| 296
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/ChunkTranscoder.java
|
package com.netflix.evcache.pool;
import net.spy.memcached.CachedData;
import net.spy.memcached.transcoders.BaseSerializingTranscoder;
import net.spy.memcached.transcoders.Transcoder;
/**
* A local transcoder used only by EVCache client to ensure we don't try to deserialize chunks
*
* @author smadappa
*
*/
public class ChunkTranscoder extends BaseSerializingTranscoder implements Transcoder<CachedData> {
public ChunkTranscoder() {
super(Integer.MAX_VALUE);
}
public boolean asyncDecode(CachedData d) {
return false;
}
public CachedData decode(CachedData d) {
return d;
}
public CachedData encode(CachedData o) {
return o;
}
public int getMaxSize() {
return Integer.MAX_VALUE;
}
}
| 768
| 20.361111
| 98
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/ServerGroup.java
|
package com.netflix.evcache.pool;
public class ServerGroup implements Comparable<ServerGroup> {
private final String zone;
private final String name;
public ServerGroup(String zone, String name) {
super();
this.zone = zone;
this.name = name;
}
public String getZone() {
return zone;
}
public String getName() {
return name;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + ((zone == null) ? 0 : zone.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (!(obj instanceof ServerGroup)) return false;
ServerGroup other = (ServerGroup) obj;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
if (zone == null) {
if (other.zone != null)
return false;
} else if (!zone.equals(other.zone))
return false;
return true;
}
@Override
public String toString() {
return "Server Group [zone=" + zone + (name.equals(zone) ? "" : ", name=" + name) + "]";
}
@Override
public int compareTo(ServerGroup o) {
return toString().compareTo(o.toString());
}
}
| 1,559
| 24.16129
| 96
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/SimpleNodeListProvider.java
|
package com.netflix.evcache.pool;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.nio.charset.Charset;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.TimeUnit;
import com.netflix.archaius.api.PropertyRepository;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.json.JSONArray;
import org.json.JSONObject;
import org.json.JSONTokener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.net.InetAddresses;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.spectator.api.Tag;
import com.netflix.evcache.pool.EVCacheClientPool;
public class SimpleNodeListProvider implements EVCacheNodeList {
private static final Logger log = LoggerFactory.getLogger(EVCacheClientPool.class);
private static final String EUREKA_TIMEOUT = "evcache.eureka.timeout";
private String currentNodeList = "";
private final int timeout;
private String region = null;
private String env = null;
public SimpleNodeListProvider() {
final String timeoutStr = System.getProperty(EUREKA_TIMEOUT);
this.timeout = (timeoutStr != null) ? Integer.parseInt(timeoutStr) : 5000;
final String sysEnv = System.getenv("NETFLIX_ENVIRONMENT");
if(sysEnv != null) {
env = sysEnv;
} else {
String propEnv = null;
if(propEnv == null) propEnv = System.getProperty("@environment");
if(propEnv == null) propEnv = System.getProperty("eureka.environment");
if(propEnv == null) propEnv = System.getProperty("netflix.environment");
env = propEnv;
}
final String sysRegion = System.getenv("EC2_REGION");
if(sysRegion != null) {
region = sysRegion;
} else {
String propRegion = null;
if(propRegion == null) propRegion = System.getProperty("@region");
if(propRegion == null) propRegion = System.getProperty("eureka.region");
if(propRegion == null) propRegion = System.getProperty("netflix.region");
region = propRegion;
}
}
/**
* Pass a System Property of format
*
* <EVCACHE_APP>-NODES=setname0=instance01:port,instance02:port,
* instance03:port;setname1=instance11:port,instance12:port,instance13:port;
* setname2=instance21:port,instance22:port,instance23:port
*
*/
@Override
public Map<ServerGroup, EVCacheServerGroupConfig> discoverInstances(String appName) throws IOException {
final String propertyName = appName + "-NODES";
final String nodeListString = EVCacheConfig.getInstance().getPropertyRepository().get(propertyName, String.class).orElse("").get();
if (log.isDebugEnabled()) log.debug("List of Nodes = " + nodeListString);
if(nodeListString != null && nodeListString.length() > 0) return bootstrapFromSystemProperty(nodeListString);
if(env != null && region != null) return bootstrapFromEureka(appName);
return Collections.<ServerGroup, EVCacheServerGroupConfig> emptyMap();
}
/**
* Netflix specific impl so we can load from eureka.
* @param appName
* @return
* @throws IOException
*/
private Map<ServerGroup, EVCacheServerGroupConfig> bootstrapFromEureka(String appName) throws IOException {
if(env == null || region == null) return Collections.<ServerGroup, EVCacheServerGroupConfig> emptyMap();
final String url = "http://discoveryreadonly." + region + ".dyn" + env + ".netflix.net:7001/v2/apps/" + appName;
final CloseableHttpClient httpclient = HttpClients.createDefault();
final long start = System.currentTimeMillis();
PropertyRepository props = EVCacheConfig.getInstance().getPropertyRepository();
CloseableHttpResponse httpResponse = null;
try {
final RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(timeout).setConnectTimeout(timeout).build();
HttpGet httpGet = new HttpGet(url);
httpGet.addHeader("Accept", "application/json");
httpGet.setConfig(requestConfig);
httpResponse = httpclient.execute(httpGet);
final int statusCode = httpResponse.getStatusLine().getStatusCode();
if (!(statusCode >= 200 && statusCode < 300)) {
log.error("Status Code : " + statusCode + " for url " + url);
return Collections.<ServerGroup, EVCacheServerGroupConfig> emptyMap();
}
final InputStreamReader in = new InputStreamReader(httpResponse.getEntity().getContent(), Charset.defaultCharset());
final JSONTokener js = new JSONTokener(in);
final JSONObject jsonObj = new JSONObject(js);
final JSONObject application = jsonObj.getJSONObject("application");
final JSONArray instances = application.getJSONArray("instance");
final Map<ServerGroup, EVCacheServerGroupConfig> serverGroupMap = new HashMap<ServerGroup, EVCacheServerGroupConfig>();
final int securePort = Integer.parseInt(props.get("evcache.secure.port", String.class)
.orElse(EVCacheClientPool.DEFAULT_SECURE_PORT).get());
for(int i = 0; i < instances.length(); i++) {
final JSONObject instanceObj = instances.getJSONObject(i);
final JSONObject metadataObj = instanceObj.getJSONObject("dataCenterInfo").getJSONObject("metadata");
final String asgName = instanceObj.getString("asgName");
final Property<Boolean> asgEnabled = props.get(asgName + ".enabled", Boolean.class).orElse(true);
final boolean isSecure = props.get(asgName + ".use.secure", Boolean.class)
.orElseGet(appName + ".use.secure")
.orElseGet("evcache.use.secure")
.orElse(false).get();
if (!asgEnabled.get()) {
if(log.isDebugEnabled()) log.debug("ASG " + asgName + " is disabled so ignoring it");
continue;
}
final String zone = metadataObj.getString("availability-zone");
final ServerGroup rSet = new ServerGroup(zone, asgName);
final String localIp = metadataObj.getString("local-ipv4");
final JSONObject instanceMetadataObj = instanceObj.getJSONObject("metadata");
final String evcachePortString = instanceMetadataObj.optString("evcache.port",
EVCacheClientPool.DEFAULT_PORT);
final int evcachePort = Integer.parseInt(evcachePortString);
final int port = isSecure ? securePort : evcachePort;
EVCacheServerGroupConfig config = serverGroupMap.get(rSet);
if(config == null) {
config = new EVCacheServerGroupConfig(rSet, new HashSet<InetSocketAddress>());
serverGroupMap.put(rSet, config);
// final ArrayList<Tag> tags = new ArrayList<Tag>(2);
// tags.add(new BasicTag(EVCacheMetricsFactory.CACHE, appName));
// tags.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, rSet.getName()));
// EVCacheMetricsFactory.getInstance().getLongGauge(EVCacheMetricsFactory.CONFIG, tags).set(Long.valueOf(port));
}
final InetAddress add = InetAddresses.forString(localIp);
final InetAddress inetAddress = InetAddress.getByAddress(localIp, add.getAddress());
final InetSocketAddress address = new InetSocketAddress(inetAddress, port);
config.getInetSocketAddress().add(address);
}
if (log.isDebugEnabled()) log.debug("Returning : " + serverGroupMap);
return serverGroupMap;
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("URL : " + url + "; Timeout " + timeout, e);
} finally {
if (httpResponse != null) {
try {
httpResponse.close();
} catch (IOException e) {
}
}
final List<Tag> tagList = new ArrayList<Tag>(2);
EVCacheMetricsFactory.getInstance().addAppNameTags(tagList, appName);
if (log.isDebugEnabled()) log.debug("Total Time to execute " + url + " " + (System.currentTimeMillis() - start) + " msec.");
EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.INTERNAL_BOOTSTRAP_EUREKA, tagList, Duration.ofMillis(100)).record(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS);
}
return Collections.<ServerGroup, EVCacheServerGroupConfig> emptyMap();
}
private Map<ServerGroup, EVCacheServerGroupConfig> bootstrapFromSystemProperty(String nodeListString ) throws IOException {
final Map<ServerGroup, EVCacheServerGroupConfig> instancesSpecific = new HashMap<ServerGroup,EVCacheServerGroupConfig>();
final StringTokenizer setTokenizer = new StringTokenizer(nodeListString, ";");
while (setTokenizer.hasMoreTokens()) {
final String token = setTokenizer.nextToken();
final StringTokenizer replicaSetTokenizer = new StringTokenizer(token, "=");
while (replicaSetTokenizer.hasMoreTokens()) {
final String replicaSetToken = replicaSetTokenizer.nextToken();
final String instanceToken = replicaSetTokenizer.nextToken();
final StringTokenizer instanceTokenizer = new StringTokenizer(instanceToken, ",");
final Set<InetSocketAddress> instanceList = new HashSet<InetSocketAddress>();
final ServerGroup rSet = new ServerGroup(replicaSetToken, replicaSetToken);
final EVCacheServerGroupConfig config = new EVCacheServerGroupConfig(rSet, instanceList);
instancesSpecific.put(rSet, config);
while (instanceTokenizer.hasMoreTokens()) {
final String instance = instanceTokenizer.nextToken();
int index = instance.indexOf(':');
String host = instance.substring(0, index);
String port = instance.substring(index + 1);
int ind = host.indexOf('/');
if (ind == -1) {
final InetAddress add = InetAddress.getByName(host);
instanceList.add(new InetSocketAddress(add, Integer.parseInt(port)));
} else {
final String hostName = host.substring(0, ind);
final String localIp = host.substring(ind + 1);
final InetAddress add = InetAddresses.forString(localIp);
final InetAddress inetAddress = InetAddress.getByAddress(hostName, add.getAddress());
instanceList.add(new InetSocketAddress(inetAddress, Integer.parseInt(port)));
}
}
}
}
currentNodeList = nodeListString;
if(log.isDebugEnabled()) log.debug("List by Servergroup" + instancesSpecific);
return instancesSpecific;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("{\"Current Node List\":\"");
builder.append(currentNodeList);
builder.append("\"");
builder.append("\"}");
return builder.toString();
}
}
| 12,043
| 49.818565
| 215
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheExecutor.java
|
package com.netflix.evcache.pool;
import java.lang.management.ManagementFactory;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.spectator.api.patterns.ThreadPoolMonitor;
public class EVCacheExecutor extends ThreadPoolExecutor implements EVCacheExecutorMBean {
private static final Logger log = LoggerFactory.getLogger(EVCacheExecutor.class);
private final Property<Integer> maxAsyncPoolSize;
private final Property<Integer> coreAsyncPoolSize;
private final String name;
public EVCacheExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, RejectedExecutionHandler handler, String name) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit,
new LinkedBlockingQueue<Runnable>(),
new ThreadFactoryBuilder().setDaemon(true).setNameFormat( "EVCacheExecutor-" + name + "-%d").build());
this.name = name;
maxAsyncPoolSize = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheExecutor." + name + ".max.size", Integer.class).orElse(maximumPoolSize);
setMaximumPoolSize(maxAsyncPoolSize.get());
coreAsyncPoolSize = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheExecutor." + name + ".core.size", Integer.class).orElse(corePoolSize);
setCorePoolSize(coreAsyncPoolSize.get());
setKeepAliveTime(keepAliveTime, unit);
maxAsyncPoolSize.subscribe(this::setMaximumPoolSize);
coreAsyncPoolSize.subscribe(i -> {
setCorePoolSize(i);
prestartAllCoreThreads();
});
setupMonitoring(name);
ThreadPoolMonitor.attach(EVCacheMetricsFactory.getInstance().getRegistry(), this, EVCacheMetricsFactory.INTERNAL_EXECUTOR + "-" + name);
}
private void setupMonitoring(String name) {
try {
ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=ThreadPool,SubGroup="+name);
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
if (mbeanServer.isRegistered(mBeanName)) {
if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one.");
mbeanServer.unregisterMBean(mBeanName);
}
mbeanServer.registerMBean(this, mBeanName);
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception", e);
}
}
public void shutdown() {
try {
ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=ThreadPool,SubGroup="+name);
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
mbeanServer.unregisterMBean(mBeanName);
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception", e);
}
super.shutdown();
}
@Override
public int getQueueSize() {
return getQueue().size();
}
}
| 3,512
| 42.37037
| 172
|
java
|
EVCache
|
EVCache-master/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheClientPool.java
|
package com.netflix.evcache.pool;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.pool.observer.EVCacheConnectionObserver;
import com.netflix.evcache.util.CircularIterator;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.evcache.util.ServerGroupCircularIterator;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Tag;
import net.spy.memcached.EVCacheNode;
import net.spy.memcached.MemcachedNode;
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "PRMC_POSSIBLY_REDUNDANT_METHOD_CALLS", "REC_CATCH_EXCEPTION", "MDM_THREAD_YIELD" })
public class EVCacheClientPool implements Runnable, EVCacheClientPoolMBean {
private static final Logger log = LoggerFactory.getLogger(EVCacheClientPool.class);
private final String _appName;
private final String _zone;
private final EVCacheClientPoolManager manager;
private ServerGroupCircularIterator localServerGroupIterator = null;
private final Property<Boolean> _zoneAffinity;
private final Property<Integer> _poolSize; // Number of MemcachedClients to each cluster
private final Property<Integer> _readTimeout; // Timeout for readOperation
private final Property<Integer> _bulkReadTimeout; // Timeout for readOperation
public static final String DEFAULT_PORT = "11211";
public static final String DEFAULT_SECURE_PORT = "11443";
private final Property<Boolean> _retryAcrossAllReplicas;
private long lastReconcileTime = 0;
private final Property<Integer> logOperations;
private final Property<Set<String>> logOperationCalls;
private final Property<Set<String>> cloneWrite;
// name of the duet EVCache application, if applicable.
private final Property<String> duet;
// indicates if duet needs to be primary
private final Property<Boolean> duetPrimary;
// evCacheClientPool of the duet EVCache application, if applicable. Supports daisy chaining.
private EVCacheClientPool duetClientPool;
// indicates if this evCacheClientPool is a duet. This property is used to mark EVCacheClients of this pool
// as duet if applicable. The duet property on the EVCacheClient is then used to know what kind of key of
// EVCacheKey (i.e. normal key vs duet key) should be passed to the client
private boolean isDuet;
private final Property<Integer> _opQueueMaxBlockTime; // Timeout for adding an operation
private final Property<Integer> _operationTimeout;// Timeout for write operation
private final Property<Integer> _maxReadQueueSize;
private final Property<Integer> reconcileInterval;
private final Property<Integer> _maxRetries;
private final Property<Boolean> _pingServers;
private final Property<Boolean> refreshConnectionOnReadQueueFull;
private final Property<Integer> refreshConnectionOnReadQueueFullSize;
private final ThreadPoolExecutor asyncRefreshExecutor;
private final Property<Boolean> _disableAsyncRefresh;
private final List<Tag> tagList;
// private final Id poolSizeId;
//private final Map<String, Counter> counterMap = new ConcurrentHashMap<String, Counter>();
private final Map<String, Gauge> gaugeMap = new ConcurrentHashMap<String, Gauge>();
private final ReentrantLock refreshLock = new ReentrantLock();
@SuppressWarnings("serial")
private final Map<ServerGroup, Property<Boolean>> writeOnlyFastPropertyMap = new ConcurrentHashMap<ServerGroup, Property<Boolean>>() {
@Override
public Property<Boolean> get(Object _serverGroup) {
final ServerGroup serverGroup = ServerGroup.class.cast(_serverGroup);
Property<Boolean> isServerGroupInWriteOnlyMode = super.get(serverGroup);
if (isServerGroupInWriteOnlyMode != null) return isServerGroupInWriteOnlyMode;
isServerGroupInWriteOnlyMode = EVCacheConfig.getInstance().
getPropertyRepository().get(_appName + "." + serverGroup.getName() + ".EVCacheClientPool.writeOnly", Boolean.class)
.orElseGet(_appName + "." + serverGroup.getZone() + ".EVCacheClientPool.writeOnly").orElse(false);
put(serverGroup, isServerGroupInWriteOnlyMode);
return isServerGroupInWriteOnlyMode;
};
};
private final AtomicLong numberOfModOps = new AtomicLong(0);
private boolean _shutdown = false;
private Map<ServerGroup, List<EVCacheClient>> memcachedInstancesByServerGroup = new ConcurrentHashMap<ServerGroup, List<EVCacheClient>>();
private Map<ServerGroup, List<EVCacheClient>> memcachedReadInstancesByServerGroup = new ConcurrentHashMap<ServerGroup, List<EVCacheClient>>();
private Map<ServerGroup, List<EVCacheClient>> memcachedWriteInstancesByServerGroup = new ConcurrentSkipListMap<ServerGroup, List<EVCacheClient>>();
private final Map<InetSocketAddress, Long> evCacheDiscoveryConnectionLostSet = new ConcurrentHashMap<InetSocketAddress, Long>();
private Map<String, ServerGroupCircularIterator> readServerGroupByZone = new ConcurrentHashMap<String, ServerGroupCircularIterator>();
private ServerGroupCircularIterator memcachedFallbackReadInstances = new ServerGroupCircularIterator(Collections.<ServerGroup> emptySet());
private CircularIterator<EVCacheClient[]> allEVCacheWriteClients = new CircularIterator<EVCacheClient[]>(Collections.<EVCacheClient[]> emptyList());
private final EVCacheNodeList provider;
EVCacheClientPool(final String appName, final EVCacheNodeList provider, final ThreadPoolExecutor asyncRefreshExecutor, final EVCacheClientPoolManager manager, boolean isDuet) {
this._appName = appName;
this.provider = provider;
this.asyncRefreshExecutor = asyncRefreshExecutor;
this.manager = manager;
this.isDuet = isDuet;
String ec2Zone = System.getenv("EC2_AVAILABILITY_ZONE");
if (ec2Zone == null) ec2Zone = System.getProperty("EC2_AVAILABILITY_ZONE");
this._zone = (ec2Zone == null) ? "GLOBAL" : ec2Zone;
final EVCacheConfig config = EVCacheConfig.getInstance();
final Consumer<Integer> callback = t -> {
clearState();
refreshPool(true, true);
};
this._zoneAffinity = config.getPropertyRepository().get(appName + ".EVCacheClientPool.zoneAffinity", Boolean.class).orElse(true);
this._poolSize = config.getPropertyRepository().get(appName + ".EVCacheClientPool.poolSize", Integer.class).orElse(1);
this._poolSize.subscribe(callback);
this._readTimeout = config.getPropertyRepository().get(appName + ".EVCacheClientPool.readTimeout", Integer.class).orElse(manager.getDefaultReadTimeout().get());
this._readTimeout.subscribe(callback);
this._bulkReadTimeout = config.getPropertyRepository().get(appName + ".EVCacheClientPool.bulkReadTimeout", Integer.class).orElse(_readTimeout.get());
this._bulkReadTimeout.subscribe(callback);
this.refreshConnectionOnReadQueueFull = config.getPropertyRepository().get(appName + ".EVCacheClientPool.refresh.connection.on.readQueueFull", Boolean.class).orElseGet("EVCacheClientPool.refresh.connection.on.readQueueFull").orElse(false);
this.refreshConnectionOnReadQueueFullSize = config.getPropertyRepository().get(appName + ".EVCacheClientPool.refresh.connection.on.readQueueFull.size", Integer.class).orElseGet("EVCacheClientPool.refresh.connection.on.readQueueFull.size").orElse(100);
this._opQueueMaxBlockTime = config.getPropertyRepository().get(appName + ".operation.QueueMaxBlockTime", Integer.class).orElse(10);
this._opQueueMaxBlockTime.subscribe(callback);
this._operationTimeout = config.getPropertyRepository().get(appName + ".operation.timeout", Integer.class).orElseGet("evcache.operation.timeout").orElse(2500);
this._operationTimeout.subscribe(callback);
this._maxReadQueueSize = config.getPropertyRepository().get(appName + ".max.read.queue.length", Integer.class).orElse(50);
this._retryAcrossAllReplicas = config.getPropertyRepository().get(_appName + ".retry.all.copies", Boolean.class).orElse(false);
this._disableAsyncRefresh = config.getPropertyRepository().get(_appName + ".disable.async.refresh", Boolean.class).orElse(false);
this._maxRetries = config.getPropertyRepository().get(_appName + ".max.retry.count", Integer.class).orElse(1);
Function<String, Set<String>> splitSet = t -> Arrays.stream(t.split(",")).collect(Collectors.toSet());
this.logOperations = config.getPropertyRepository().get(appName + ".log.operation", Integer.class).orElse(0);
this.logOperationCalls = config.getPropertyRepository().get(appName + ".log.operation.calls", String.class).orElse("SET,DELETE,GMISS,TMISS,BMISS_ALL,TOUCH,REPLACE").map(splitSet);
this.reconcileInterval = config.getPropertyRepository().get(appName + ".reconcile.interval", Integer.class).orElse(600000);
this.cloneWrite = config.getPropertyRepository().get(appName + ".clone.writes.to", String.class).map(splitSet).orElse(Collections.emptySet());
this.cloneWrite.subscribe(i -> {
setupClones();
});
this.duet = config.getPropertyRepository().get(appName + ".duet", String.class).orElseGet("evcache.duet").orElse("");
this.duet.subscribe(i -> {
setupDuet();
});
this.duetPrimary = config.getPropertyRepository().get(appName + ".duet.primary", Boolean.class).orElseGet("evcache.duet.primary").orElse(false);
tagList = new ArrayList<Tag>(2);
EVCacheMetricsFactory.getInstance().addAppNameTags(tagList, _appName);
this._pingServers = config.getPropertyRepository().get(appName + ".ping.servers", Boolean.class).orElseGet("evcache.ping.servers").orElse(false);
setupMonitoring();
//init all callbacks
refreshPool(false, true);
setupDuet();
setupClones();
if (log.isInfoEnabled()) log.info(toString());
}
private void setupClones() {
for(String cloneApp : cloneWrite.get()) {
manager.initEVCache(cloneApp);
}
}
private void setupDuet() {
// check if duet is already setup, if yes, remove the current duet.
if (duetClientPool != null && !duetClientPool.getAppName().equalsIgnoreCase(duet.get())) {
duetClientPool = null;
log.info("Removed duet");
}
if (null == duetClientPool && !duet.get().isEmpty()) {
duetClientPool = manager.initEVCache(duet.get(), true);
log.info("Completed setup of a duet with name: " + duet.get());
}
}
private void clearState() {
cleanupMemcachedInstances(true);
memcachedInstancesByServerGroup.clear();
memcachedReadInstancesByServerGroup.clear();
memcachedWriteInstancesByServerGroup.clear();
readServerGroupByZone.clear();
memcachedFallbackReadInstances = new ServerGroupCircularIterator(Collections.<ServerGroup> emptySet());
}
private EVCacheClient getEVCacheClientForReadInternal() {
if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) {
if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup);
if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true);
return null;
}
try {
List<EVCacheClient> clients = null;
if (_zoneAffinity.get()) {
if (localServerGroupIterator != null) {
clients = memcachedReadInstancesByServerGroup.get(localServerGroupIterator.next());
}
if (clients == null) {
final ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next();
if (fallbackServerGroup == null) {
if (log.isDebugEnabled()) log.debug("fallbackServerGroup is null.");
return null;
}
clients = memcachedReadInstancesByServerGroup.get(fallbackServerGroup);
}
} else {
clients = new ArrayList<EVCacheClient>(memcachedReadInstancesByServerGroup.size() - 1);
for (Iterator<ServerGroup> itr = memcachedReadInstancesByServerGroup.keySet().iterator(); itr
.hasNext();) {
final ServerGroup serverGroup = itr.next();
final List<EVCacheClient> clientList = memcachedReadInstancesByServerGroup.get(serverGroup);
final EVCacheClient client = selectClient(clientList);
if (client != null) clients.add(client);
}
}
return selectClient(clients);
} catch (Throwable t) {
log.error("Exception trying to get an readable EVCache Instances for zone {}", t);
return null;
}
}
/**
* Returns EVCacheClient of this pool if available. Otherwise, will return EVCacheClient of the duet.
* @return
*/
public EVCacheClient getEVCacheClientForRead() {
EVCacheClient evCacheClient = getEVCacheClientForReadInternal();
// most common production scenario
if (null == duetClientPool) {
return evCacheClient;
}
// return duet if current client is not available or if duet is primary
if (null == evCacheClient || duetPrimary.get()) {
EVCacheClient duetClient = duetClientPool.getEVCacheClientForRead();
// if duetClient is not present, fallback to evCacheClient
return null == duetClient ? evCacheClient : duetClient;
}
return evCacheClient;
}
private List<EVCacheClient> getAllEVCacheClientForReadInternal() {
if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) {
if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup);
if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true);
return Collections.<EVCacheClient> emptyList();
}
try {
List<EVCacheClient> clients = null;
if (localServerGroupIterator != null) {
clients = memcachedReadInstancesByServerGroup.get(localServerGroupIterator.next());
}
if (clients == null) {
final ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next();
if (fallbackServerGroup == null) {
if (log.isDebugEnabled()) log.debug("fallbackServerGroup is null.");
return Collections.<EVCacheClient> emptyList();
}
clients = memcachedReadInstancesByServerGroup.get(fallbackServerGroup);
}
return clients;
} catch (Throwable t) {
log.error("Exception trying to get readable EVCache Instances for zone ", t);
return Collections.<EVCacheClient> emptyList();
}
}
public List<EVCacheClient> getAllEVCacheClientForRead() {
List<EVCacheClient> evCacheClients = getAllEVCacheClientForReadInternal();
// most common production scenario
if (null == duetClientPool) {
return evCacheClients;
}
List<EVCacheClient> duetEVCacheClients = duetClientPool.getAllEVCacheClientForRead();
if (null == evCacheClients)
return duetEVCacheClients;
if (null == duetEVCacheClients)
return evCacheClients;
if (duetPrimary.get()) {
List<EVCacheClient> clients = new ArrayList<>(duetEVCacheClients);
clients.addAll(evCacheClients);
return clients;
} else {
List<EVCacheClient> clients = new ArrayList<>(evCacheClients);
clients.addAll(duetEVCacheClients);
return clients;
}
}
private EVCacheClient selectClient(List<EVCacheClient> clients) {
if (clients == null || clients.isEmpty()) {
if (log.isDebugEnabled()) log.debug("clients is null returning null and forcing pool refresh!!!");
if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true);
return null;
}
if (clients.size() == 1) {
return clients.get(0); // Frequently used scenario
}
final long currentVal = numberOfModOps.incrementAndGet();
// Get absolute value of current val to ensure correctness even at 9 quintillion+ requests
// make sure to truncate after the mod. This allows up to 2^31 clients.
final int index = Math.abs((int) (currentVal % clients.size()));
return clients.get(index);
}
private EVCacheClient getEVCacheClientForReadExcludeInternal(ServerGroup rsetUsed) {
if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) {
if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup);
if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true);
return null;
}
try {
ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next(rsetUsed);
if (fallbackServerGroup == null || fallbackServerGroup.equals(rsetUsed)) {
return null;
}
final List<EVCacheClient> clients = memcachedReadInstancesByServerGroup.get(fallbackServerGroup);
return selectClient(clients);
} catch (Throwable t) {
log.error("Exception trying to get an readable EVCache Instances for zone {}", rsetUsed, t);
return null;
}
}
public EVCacheClient getEVCacheClientForReadExclude(ServerGroup rsetUsed) {
EVCacheClient evCacheClient = getEVCacheClientForReadExcludeInternal(rsetUsed);
// most common production scenario
if (null == duetClientPool) {
return evCacheClient;
}
// return duet if current client is not available or if duet is primary
if (null == evCacheClient || duetPrimary.get()) {
EVCacheClient duetClient = duetClientPool.getEVCacheClientForReadExclude(rsetUsed);
// if duetClient is not present, fallback to evCacheClient
return null == duetClient ? evCacheClient : duetClient;
}
return evCacheClient;
}
private EVCacheClient getEVCacheClientInternal(ServerGroup serverGroup) {
if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) {
if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup);
if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true);
return null;
}
try {
List<EVCacheClient> clients = memcachedReadInstancesByServerGroup.get(serverGroup);
if (clients == null) {
final ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next();
if (fallbackServerGroup == null) {
if (log.isDebugEnabled()) log.debug("fallbackServerGroup is null.");
return null;
}
clients = memcachedReadInstancesByServerGroup.get(fallbackServerGroup);
}
return selectClient(clients);
} catch (Throwable t) {
log.error("Exception trying to get an readable EVCache Instances for ServerGroup {}", serverGroup, t);
return null;
}
}
public EVCacheClient getEVCacheClient(ServerGroup serverGroup) {
EVCacheClient evCacheClient = getEVCacheClientInternal(serverGroup);
// most common production scenario
if (null == duetClientPool) {
return evCacheClient;
}
// return duet if current client is not available or if duet is primary
if (null == evCacheClient || duetPrimary.get()) {
EVCacheClient duetClient = duetClientPool.getEVCacheClient(serverGroup);
// if duetClient is not present, fallback to evCacheClient
return null == duetClient ? evCacheClient : duetClient;
}
return evCacheClient;
}
private List<EVCacheClient> getEVCacheClientsForReadExcludingInternal(ServerGroup serverGroupToExclude) {
if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) {
if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup);
if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true);
return Collections.<EVCacheClient> emptyList();
}
try {
if (_retryAcrossAllReplicas.get()) {
List<EVCacheClient> clients = new ArrayList<EVCacheClient>(memcachedReadInstancesByServerGroup.size() - 1);
for (Iterator<ServerGroup> itr = memcachedReadInstancesByServerGroup.keySet().iterator(); itr
.hasNext();) {
final ServerGroup serverGroup = itr.next();
if (serverGroup.equals(serverGroupToExclude)) continue;
final List<EVCacheClient> clientList = memcachedReadInstancesByServerGroup.get(serverGroup);
final EVCacheClient client = selectClient(clientList);
if (client != null) clients.add(client);
}
return clients;
} else {
if(_maxRetries.get() == 1) {
final EVCacheClient client = getEVCacheClientForReadExclude(serverGroupToExclude);
if (client != null) return Collections.singletonList(client);
} else {
int maxNumberOfPossibleRetries = memcachedReadInstancesByServerGroup.size() - 1;
if(maxNumberOfPossibleRetries > _maxRetries.get()) {
maxNumberOfPossibleRetries = _maxRetries.get();
}
final List<EVCacheClient> clients = new ArrayList<EVCacheClient>(_maxRetries.get());
for(int i = 0; i < maxNumberOfPossibleRetries; i++) {
ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next(serverGroupToExclude);
if (fallbackServerGroup == null ) {
return clients;
}
final List<EVCacheClient> clientList = memcachedReadInstancesByServerGroup.get(fallbackServerGroup);
final EVCacheClient client = selectClient(clientList);
if (client != null) clients.add(client);
}
return clients;
}
}
} catch (Throwable t) {
log.error("Exception trying to get an readable EVCache Instances for zone {}", serverGroupToExclude, t);
}
return Collections.<EVCacheClient> emptyList();
}
public List<EVCacheClient> getEVCacheClientsForReadExcluding(ServerGroup serverGroupToExclude) {
List<EVCacheClient> evCacheClients = getEVCacheClientsForReadExcludingInternal(serverGroupToExclude);
// most common production scenario
if (null == duetClientPool) {
return evCacheClients;
}
List<EVCacheClient> duetEVCacheClients = duetClientPool.getEVCacheClientsForReadExcluding(serverGroupToExclude);
if (null == evCacheClients)
return duetEVCacheClients;
if (null == duetEVCacheClients)
return evCacheClients;
if (duetPrimary.get()) {
List<EVCacheClient> clients = new ArrayList<>(duetEVCacheClients);
clients.addAll(evCacheClients);
return clients;
} else {
List<EVCacheClient> clients = new ArrayList<>(evCacheClients);
clients.addAll(duetEVCacheClients);
return clients;
}
}
public boolean isInWriteOnly(ServerGroup serverGroup) {
if (memcachedReadInstancesByServerGroup.containsKey(serverGroup)) {
return false;
}
if(memcachedWriteInstancesByServerGroup.containsKey(serverGroup)) {
return true;
}
return false;
}
private EVCacheClient[] getWriteOnlyEVCacheClientsInternal() {
try {
if((cloneWrite.get().size() == 0)) {
int size = memcachedWriteInstancesByServerGroup.size() - memcachedReadInstancesByServerGroup.size();
if (size == 0) return new EVCacheClient[0];
final EVCacheClient[] clientArr = new EVCacheClient[size];
for (ServerGroup serverGroup : memcachedWriteInstancesByServerGroup.keySet()) {
if (!memcachedReadInstancesByServerGroup.containsKey(serverGroup) && size > 0) {
final List<EVCacheClient> clients = memcachedWriteInstancesByServerGroup.get(serverGroup);
if (clients.size() == 1) {
clientArr[--size] = clients.get(0); // frequently used use case
} else {
final long currentVal = numberOfModOps.incrementAndGet();
final int index = (int) (currentVal % clients.size());
clientArr[--size] = (index < 0) ? clients.get(0) : clients.get(index);
}
}
}
return clientArr;
} else {
final List<EVCacheClient> evcacheClientList = new ArrayList<EVCacheClient>();
for(String cloneApp : cloneWrite.get()) {
final EVCacheClient[] clients = manager.getEVCacheClientPool(cloneApp).getWriteOnlyEVCacheClients();
if(clients == null || clients.length == 0) continue;
for(int i = 0; i < clients.length; i++) {
evcacheClientList.add(clients[i]);
}
}
for (ServerGroup serverGroup : memcachedWriteInstancesByServerGroup.keySet()) {
if (!memcachedReadInstancesByServerGroup.containsKey(serverGroup)) {
final List<EVCacheClient> clients = memcachedWriteInstancesByServerGroup.get(serverGroup);
if (clients.size() == 1) {
evcacheClientList.add(clients.get(0)); // frequently used use case
} else {
final long currentVal = numberOfModOps.incrementAndGet();
final int index = (int) (currentVal % clients.size());
evcacheClientList.add((index < 0) ? clients.get(0) : clients.get(index));
}
}
}
return evcacheClientList.toArray(new EVCacheClient[0]);
}
} catch (Throwable t) {
log.error("Exception trying to get an array of writable EVCache Instances", t);
return new EVCacheClient[0];
}
}
public EVCacheClient[] getWriteOnlyEVCacheClients() {
EVCacheClient[] evCacheClients = getWriteOnlyEVCacheClientsInternal();
// most common production scenario
if (null == duetClientPool) {
return evCacheClients;
}
EVCacheClient[] duetEVCacheClients = duetClientPool.getWriteOnlyEVCacheClients();
if (null == evCacheClients || evCacheClients.length == 0) {
return duetEVCacheClients;
}
if (null == duetEVCacheClients || duetEVCacheClients.length == 0) {
return evCacheClients;
}
if (duetPrimary.get()) {
// return write-only of duet app and all writers of original app to which duet is attached
// get all writers of original app
evCacheClients = getEVCacheClientForWriteInternal();
EVCacheClient[] allEVCacheClients = Arrays.copyOf(duetEVCacheClients, duetEVCacheClients.length + evCacheClients.length);
System.arraycopy(evCacheClients, 0, allEVCacheClients, duetEVCacheClients.length, evCacheClients.length);
return allEVCacheClients;
} else {
// return write-only of original app and all writers of duet app
// get all writers of duet app
duetEVCacheClients = duetClientPool.getEVCacheClientForWrite();
EVCacheClient[] allEVCacheClients = Arrays.copyOf(evCacheClients, evCacheClients.length + duetEVCacheClients.length);
System.arraycopy(duetEVCacheClients, 0, allEVCacheClients, evCacheClients.length, duetEVCacheClients.length);
return allEVCacheClients;
}
}
EVCacheClient[] getAllWriteClients() {
try {
if(allEVCacheWriteClients != null) {
EVCacheClient[] clientArray = allEVCacheWriteClients.next();
if(clientArray == null || clientArray.length == 0 ) {
if (log.isInfoEnabled()) log.info("Refreshing the write client array.");
try {
refreshLock.lock();
clientArray = allEVCacheWriteClients.next();
if(clientArray == null || clientArray.length == 0 ) {
refreshPool(false, true);
clientArray = allEVCacheWriteClients.next();
}
}
finally {
refreshLock.unlock();
}
}
if (log.isDebugEnabled()) log.debug("clientArray : " + clientArray);
if(clientArray == null ) return new EVCacheClient[0];
return clientArray;
}
final EVCacheClient[] clientArr = new EVCacheClient[memcachedWriteInstancesByServerGroup.size()];
int i = 0;
for (ServerGroup serverGroup : memcachedWriteInstancesByServerGroup.keySet()) {
final List<EVCacheClient> clients = memcachedWriteInstancesByServerGroup.get(serverGroup);
if (clients.size() == 1) {
clientArr[i++] = clients.get(0); // frequently used usecase
} else {
final long currentVal = numberOfModOps.incrementAndGet();
final int index = (int) (currentVal % clients.size());
clientArr[i++] = (index < 0) ? clients.get(0) : clients.get(index);
}
}
if(clientArr == null ) return new EVCacheClient[0];
return clientArr;
} catch (Throwable t) {
log.error("Exception trying to get an array of writable EVCache Instances", t);
return new EVCacheClient[0];
}
}
private EVCacheClient[] getEVCacheClientForWriteInternal() {
try {
if((cloneWrite.get().size() == 0)) {
return getAllWriteClients();
} else {
final List<EVCacheClient> evcacheClientList = new ArrayList<EVCacheClient>();
final EVCacheClient[] clientArr = getAllWriteClients();
for(EVCacheClient client : clientArr) {
evcacheClientList.add(client);
}
for(String cloneApp : cloneWrite.get()) {
final EVCacheClient[] cloneWriteArray = manager.getEVCacheClientPool(cloneApp).getAllWriteClients();
for(int j = 0; j < cloneWriteArray.length; j++) {
evcacheClientList.add(cloneWriteArray[j]);
}
}
return evcacheClientList.toArray(new EVCacheClient[0]);
}
} catch (Throwable t) {
log.error("Exception trying to get an array of writable EVCache Instances", t);
return new EVCacheClient[0];
}
}
public EVCacheClient[] getEVCacheClientForWrite() {
EVCacheClient[] evCacheClients = getEVCacheClientForWriteInternal();
// most common production scenario
if (null == duetClientPool) {
return evCacheClients;
}
EVCacheClient[] duetEVCacheClients = duetClientPool.getEVCacheClientForWrite();
if (null == evCacheClients || evCacheClients.length == 0) {
return duetEVCacheClients;
}
if (null == duetEVCacheClients || duetEVCacheClients.length == 0) {
return evCacheClients;
}
if (duetPrimary.get()) {
EVCacheClient[] allEVCacheClients = Arrays.copyOf(duetEVCacheClients, duetEVCacheClients.length + evCacheClients.length);
System.arraycopy(evCacheClients, 0, allEVCacheClients, duetEVCacheClients.length, evCacheClients.length);
return allEVCacheClients;
} else {
EVCacheClient[] allEVCacheClients = Arrays.copyOf(evCacheClients, evCacheClients.length + duetEVCacheClients.length);
System.arraycopy(duetEVCacheClients, 0, allEVCacheClients, evCacheClients.length, duetEVCacheClients.length);
return allEVCacheClients;
}
}
private void refresh() throws IOException {
refresh(false);
}
protected boolean haveInstancesInServerGroupChanged(ServerGroup serverGroup, Set<InetSocketAddress> discoveredHostsInServerGroup) {
final List<EVCacheClient> clients = memcachedInstancesByServerGroup.get(serverGroup);
// 1. if we have discovered instances in zone but not in our map then
// return immediately
if (clients == null) return true;
// 2. Do a quick check based on count (active, inactive and discovered)
for (int i = 0; i < clients.size(); i++) {
final int size = clients.size();
final EVCacheClient client = clients.get(i);
final EVCacheConnectionObserver connectionObserver = client.getConnectionObserver();
final int activeServerCount = connectionObserver.getActiveServerCount();
final int inActiveServerCount = connectionObserver.getInActiveServerCount();
final int sizeInDiscovery = discoveredHostsInServerGroup.size();
final int sizeInHashing = client.getNodeLocator().getAll().size();
if (i == 0) getConfigGauge("sizeInDiscovery", serverGroup).set(Long.valueOf(sizeInDiscovery));
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup + "\n\tActive Count : " + activeServerCount
+ "\n\tInactive Count : " + inActiveServerCount + "\n\tDiscovery Count : " + sizeInDiscovery + "\n\tsizeInHashing : " + sizeInHashing);
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ "\n\tActive Count : " + activeServerCount + "\n\tInactive Count : "
+ inActiveServerCount + "\n\tDiscovery Count : " + sizeInDiscovery + "\n\tsizeInHashing : " + sizeInHashing);
final long currentTime = System.currentTimeMillis();
boolean reconcile = false;
if (currentTime - lastReconcileTime > reconcileInterval.get()) {
reconcile = true;
lastReconcileTime = currentTime;
getConfigGauge(EVCacheMetricsFactory.POOL_RECONCILE, serverGroup).set(Long.valueOf(1));
} else {
getConfigGauge(EVCacheMetricsFactory.POOL_RECONCILE, serverGroup).set(Long.valueOf(0));
}
final boolean hashingSizeDiff = (sizeInHashing != sizeInDiscovery && sizeInHashing != activeServerCount);
if (reconcile || activeServerCount != sizeInDiscovery || inActiveServerCount > 0 || hashingSizeDiff) {
if (log.isDebugEnabled()) log.debug("\n\t" + _appName + " & " + serverGroup
+ " experienced an issue.\n\tActive Server Count : " + activeServerCount);
if (log.isDebugEnabled()) log.debug("\n\tInActive Server Count : " + inActiveServerCount
+ "\n\tDiscovered Instances : " + sizeInDiscovery);
// 1. If a host is in discovery and we don't have an active or
// inActive connection to it then we will have to refresh our
// list. Typical case is we have replaced an existing node or
// expanded the cluster.
for (InetSocketAddress instance : discoveredHostsInServerGroup) {
if (!connectionObserver.getActiveServers().containsKey(instance) && !connectionObserver.getInActiveServers().containsKey(instance)) {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; instance : " + instance
+ " not found and will shutdown the client and init it again.");
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(1));
return true;
}
}
// 2. If a host is not in discovery and is
// inActive for more than 15 mins then we will have to refresh our
// list. Typical case is we have replaced an existing node or
// decreasing the cluster. Replacing an instance should not take
// more than 20 mins (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html#types-of-instance-status-checks).
// Even if it does then we will refresh the client twice which
// should be ok.
// NOTE : For a zombie instance this will mean that it will take
// 15 mins after detaching and taking it OOS to be removed
// unless we force a refresh
// 12/5/2015 - Should we even do this anymore
for (Entry<InetSocketAddress, Long> entry : connectionObserver.getInActiveServers().entrySet()) {
if ((currentTime - entry.getValue().longValue()) > 1200000 && !discoveredHostsInServerGroup.contains(entry.getKey())) {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; instance : " + entry.getKey()
+ " not found in discovery and will shutdown the client and init it again.");
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(2));
return true;
}
}
// 3. Check to see if there are any inactive connections. If we
// find inactive connections and this node is not in discovery
// then we will refresh the client.
final Collection<MemcachedNode> allNodes = client.getNodeLocator().getAll();
for (MemcachedNode node : allNodes) {
if (node instanceof EVCacheNode) {
final EVCacheNode evcNode = ((EVCacheNode) node);
// If the connection to a node is not active then we
// will reconnect the client.
if (!evcNode.isActive() && !discoveredHostsInServerGroup.contains(evcNode.getSocketAddress())) {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; Node : " + node + " is not active. Will shutdown the client and init it again.");
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(3));
return true;
}
}
}
// 4. if there is a difference in the number of nodes in the
// KetamaHashingMap then refresh
if (hashingSizeDiff) {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; PoolSize : " + size + "; ActiveConnections : " + activeServerCount
+ "; InactiveConnections : " + inActiveServerCount + "; InDiscovery : " + sizeInDiscovery
+ "; InHashing : " + sizeInHashing + "; hashingSizeDiff : " + hashingSizeDiff
+ ". Since there is a diff in hashing size will shutdown the client and init it again.");
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(4));
return true;
}
// 5. If a host is in not discovery and we have an active connection to it for more than 20 mins then we will refresh
// Typical case is we have replaced an existing node but it has zombie. We are able to connect to it (hypervisor) but not talk to it
// or prana has shutdown successfully but not memcached. In such scenario we will refresh the cluster
for(InetSocketAddress instance : connectionObserver.getActiveServers().keySet()) {
if(!discoveredHostsInServerGroup.contains(instance)) {
if(!evCacheDiscoveryConnectionLostSet.containsKey(instance)) {
evCacheDiscoveryConnectionLostSet.put(instance, Long.valueOf(currentTime));
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; instance : " + instance + " not found in discovery. We will add to our list and monitor it.");
} else {
long lostDur = (currentTime - evCacheDiscoveryConnectionLostSet.get(instance).longValue());
if (lostDur >= 1200000) {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; instance : " + instance + " not found in discovery for the past 20 mins and will shutdown the client and init it again.");
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(5));
evCacheDiscoveryConnectionLostSet.remove(instance);
return true;
} else {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; instance : " + instance + " not found in discovery for " + lostDur + " msec.");
}
}
}
}
// 9. If we have removed all instances or took them OOS in a
// ServerGroup then shutdown the client
if (sizeInDiscovery == 0) {
if (activeServerCount == 0 || inActiveServerCount > activeServerCount) {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; Will shutdown the client since there are no active servers and no servers for this ServerGroup in disocvery.");
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(9));
return true;
}
}
}
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(0));
}
reportPoolConifg();
return false;
}
private List<InetSocketAddress> getMemcachedSocketAddressList(final Set<InetSocketAddress> discoveredHostsInZone) {
final List<InetSocketAddress> memcachedNodesInZone = new ArrayList<InetSocketAddress>();
for (InetSocketAddress hostAddress : discoveredHostsInZone) {
memcachedNodesInZone.add(hostAddress);
}
return memcachedNodesInZone;
}
private void shutdownClientsInZone(List<EVCacheClient> clients) {
if (clients == null || clients.isEmpty()) return;
// Shutdown the old clients in 60 seconds, this will give ample time to
// cleanup anything pending in its queue
for (EVCacheClient oldClient : clients) {
try {
final boolean obsRemoved = oldClient.removeConnectionObserver();
if (log.isDebugEnabled()) log.debug("Connection observer removed " + obsRemoved);
final boolean status = oldClient.shutdown(60, TimeUnit.SECONDS);
if (log.isDebugEnabled()) log.debug("Shutting down -> Client {" + oldClient.toString() + "}; status : "
+ status);
} catch (Exception ex) {
log.error("Exception while shutting down the old Client", ex);
}
}
}
private void setupNewClientsByServerGroup(ServerGroup serverGroup, List<EVCacheClient> newClients) {
final List<EVCacheClient> currentClients = memcachedInstancesByServerGroup.put(serverGroup, newClients);
// if the zone is in write only mode then remove it from the Map
final Property<Boolean> isZoneInWriteOnlyMode = writeOnlyFastPropertyMap.get(serverGroup);
if (isZoneInWriteOnlyMode.get().booleanValue()) {
memcachedReadInstancesByServerGroup.remove(serverGroup);
} else {
memcachedReadInstancesByServerGroup.put(serverGroup, newClients);
}
memcachedWriteInstancesByServerGroup.put(serverGroup, newClients);
setupAllEVCacheWriteClientsArray();
if (currentClients == null || currentClients.isEmpty()) return;
// Now since we have replace the old instances shutdown all the old
// clients
if (log.isDebugEnabled()) log.debug("Replaced an existing Pool for ServerGroup : " + serverGroup + "; and app "
+ _appName + " ;\n\tOldClients : " + currentClients + ";\n\tNewClients : " + newClients);
for (EVCacheClient client : currentClients) {
if (!client.isShutdown()) {
if (log.isDebugEnabled()) log.debug("Shutting down in Fallback -> AppName : " + _appName
+ "; ServerGroup : " + serverGroup + "; client {" + client + "};");
try {
if (client.getConnectionObserver() != null) {
final boolean obsRemoved = client.removeConnectionObserver();
if (log.isDebugEnabled()) log.debug("Connection observer removed " + obsRemoved);
}
final boolean status = client.shutdown(5, TimeUnit.SECONDS);
if (log.isDebugEnabled()) log.debug("Shutting down {" + client + "} ; status : " + status);
} catch (Exception ex) {
log.error("Exception while shutting down the old Client", ex);
}
}
}
// Paranoid Here. Even though we have shutdown the old clients do it
// again as we noticed issues while shutting down MemcachedNodes
shutdownClientsInZone(currentClients);
}
// Check if a zone has been moved to Write only. If so, remove the app from
// the read map.
// Similarly if the app has been moved to Read+Write from write only add it
// back to the read map.
private void updateMemcachedReadInstancesByZone() {
for (ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) {
final Property<Boolean> isZoneInWriteOnlyMode = writeOnlyFastPropertyMap.get(serverGroup);
if (isZoneInWriteOnlyMode.get().booleanValue()) {
if (memcachedReadInstancesByServerGroup.containsKey(serverGroup)) {
memcachedReadInstancesByServerGroup.remove(serverGroup);
}
} else {
if (!memcachedReadInstancesByServerGroup.containsKey(serverGroup)) {
memcachedReadInstancesByServerGroup.put(serverGroup, memcachedInstancesByServerGroup.get(serverGroup));
}
}
// if we lose over 50% of instances put that zone in writeonly mode.
final List<EVCacheClient> clients = memcachedReadInstancesByServerGroup.get(serverGroup);
if (clients != null && !clients.isEmpty()) {
final EVCacheClient client = clients.get(0);
if (client != null) {
final EVCacheConnectionObserver connectionObserver = client.getConnectionObserver();
if (connectionObserver != null) {
final int activeServerCount = connectionObserver.getActiveServerCount();
final int inActiveServerCount = connectionObserver.getInActiveServerCount();
if (inActiveServerCount > activeServerCount) {
memcachedReadInstancesByServerGroup.remove(serverGroup);
getConfigGauge(EVCacheMetricsFactory.POOL_SERVERGROUP_STATUS, serverGroup).set(Long.valueOf(1));
} else {
getConfigGauge(EVCacheMetricsFactory.POOL_SERVERGROUP_STATUS, serverGroup).set(Long.valueOf(2));
}
}
}
} else {
final List<EVCacheClient> clientsWrite = memcachedInstancesByServerGroup.get(serverGroup);
if (clientsWrite != null && !clientsWrite.isEmpty()) {
getConfigGauge(EVCacheMetricsFactory.POOL_SERVERGROUP_STATUS, serverGroup).set(Long.valueOf(0));
}
}
}
if (memcachedReadInstancesByServerGroup.size() != memcachedFallbackReadInstances.getSize()) {
memcachedFallbackReadInstances = new ServerGroupCircularIterator(memcachedReadInstancesByServerGroup.keySet());
Map<String, Set<ServerGroup>> readServerGroupByZoneMap = new ConcurrentHashMap<String, Set<ServerGroup>>();
for (ServerGroup serverGroup : memcachedReadInstancesByServerGroup.keySet()) {
Set<ServerGroup> serverGroupList = readServerGroupByZoneMap.get(serverGroup.getZone());
if (serverGroupList == null) {
serverGroupList = new HashSet<ServerGroup>();
readServerGroupByZoneMap.put(serverGroup.getZone(), serverGroupList);
}
serverGroupList.add(serverGroup);
}
Map<String, ServerGroupCircularIterator> _readServerGroupByZone = new ConcurrentHashMap<String, ServerGroupCircularIterator>();
for (Entry<String, Set<ServerGroup>> readServerGroupByZoneEntry : readServerGroupByZoneMap.entrySet()) {
_readServerGroupByZone.put(readServerGroupByZoneEntry.getKey(), new ServerGroupCircularIterator(readServerGroupByZoneEntry.getValue()));
}
this.readServerGroupByZone = _readServerGroupByZone;
localServerGroupIterator = readServerGroupByZone.get(_zone);
}
}
private void cleanupMemcachedInstances(boolean force) {
pingServers();
for (Iterator<Entry<ServerGroup, List<EVCacheClient>>> it = memcachedInstancesByServerGroup.entrySet().iterator(); it.hasNext();) {
final Entry<ServerGroup, List<EVCacheClient>> serverGroupEntry = it.next();
final List<EVCacheClient> instancesInAServerGroup = serverGroupEntry.getValue();
boolean removeEntry = false;
for (EVCacheClient client : instancesInAServerGroup) {
final EVCacheConnectionObserver connectionObserver = client.getConnectionObserver();
if (connectionObserver.getActiveServerCount() == 0 && connectionObserver.getInActiveServerCount() > 0) {
removeEntry = true;
}
}
if (force || removeEntry) {
final ServerGroup serverGroup = serverGroupEntry.getKey();
memcachedReadInstancesByServerGroup.remove(serverGroup);
memcachedWriteInstancesByServerGroup.remove(serverGroup);
for (EVCacheClient client : instancesInAServerGroup) {
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup + " has no active servers. Cleaning up this ServerGroup.");
client.shutdown(0, TimeUnit.SECONDS);
client.getConnectionObserver().shutdown();
}
it.remove();
allEVCacheWriteClients = null;
}
}
}
private synchronized void refresh(boolean force) throws IOException {
final long start = System.currentTimeMillis();
if (log.isDebugEnabled()) log.debug("refresh APP : " + _appName + "; force : " + force);
try {
final Map<ServerGroup, EVCacheServerGroupConfig> instances = provider.discoverInstances(_appName);
if (log.isDebugEnabled()) log.debug("instances : " + instances);
// if no instances are found check to see if a clean up is needed
// and bail immediately.
if (instances == null || instances.isEmpty()) {
if (!memcachedInstancesByServerGroup.isEmpty()) cleanupMemcachedInstances(false);
return;
}
for(ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) {
if(!instances.containsKey(serverGroup)) {
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ " does not exist or is not enabled or is out of service. We will shutdown this client and remove it.");
serverGroupDisabled(serverGroup);
}
}
boolean updateAllEVCacheWriteClients = false;
for (Entry<ServerGroup, EVCacheServerGroupConfig> serverGroupEntry : instances.entrySet()) {
final ServerGroup serverGroup = serverGroupEntry.getKey();
final EVCacheServerGroupConfig config = serverGroupEntry.getValue();
final Set<InetSocketAddress> discoverdInstanceInServerGroup = config.getInetSocketAddress();
final String zone = serverGroup.getZone();
final Set<InetSocketAddress> discoveredHostsInServerGroup = (discoverdInstanceInServerGroup == null)
? Collections.<InetSocketAddress> emptySet() : discoverdInstanceInServerGroup;
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ "\n\tSize : " + discoveredHostsInServerGroup.size()
+ "\n\tInstances in ServerGroup : " + discoveredHostsInServerGroup);
if (discoveredHostsInServerGroup.size() == 0 && memcachedInstancesByServerGroup.containsKey(serverGroup)) {
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ " has no active servers. Cleaning up this ServerGroup.");
serverGroupDisabled(serverGroup);
continue;
}
boolean instanceChangeInServerGroup = force;
if (instanceChangeInServerGroup) {
if (log.isWarnEnabled()) log.warn("FORCE REFRESH :: AppName :" + _appName + "; ServerGroup : "
+ serverGroup + "; Changed : " + instanceChangeInServerGroup);
} else {
instanceChangeInServerGroup = haveInstancesInServerGroupChanged(serverGroup, discoveredHostsInServerGroup);
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ "\n\tinstanceChangeInServerGroup : " + instanceChangeInServerGroup);
if (!instanceChangeInServerGroup) {
// quick exit as everything looks fine. No new instances
// found and were inactive
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; Changed : " + instanceChangeInServerGroup);
continue;
}
}
// Let us create a list of SocketAddress from the discovered
// instances in zone
final List<InetSocketAddress> memcachedSAInServerGroup = getMemcachedSocketAddressList(discoveredHostsInServerGroup);
if (memcachedSAInServerGroup.size() > 0) {
// now since there is a change with the instances in the
// zone. let us go ahead and create a new EVCacheClient with
// the new settings
final int poolSize = _poolSize.get();
final List<EVCacheClient> newClients = new ArrayList<EVCacheClient>(poolSize);
for (int i = 0; i < poolSize; i++) {
final int maxQueueSize = EVCacheConfig.getInstance().getPropertyRepository().get(_appName + ".max.queue.length", Integer.class).orElse(16384).get();
EVCacheClient client;
try {
client = new EVCacheClient(_appName, zone, i, config, memcachedSAInServerGroup, maxQueueSize,
_maxReadQueueSize, _readTimeout, _bulkReadTimeout, _opQueueMaxBlockTime, _operationTimeout, this, isDuet);
newClients.add(client);
final int id = client.getId();
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; intit : client.getId() : " + id);
lastReconcileTime = System.currentTimeMillis();
} catch (Exception e) {
incrementFailure(EVCacheMetricsFactory.INTERNAL_POOL_INIT_ERROR, config.getServerGroup());
log.error("Unable to create EVCacheClient for app - " + _appName + " and Server Group - " + serverGroup.getName(), e);
}
}
if (newClients.size() > 0) {
setupNewClientsByServerGroup(serverGroup, newClients);
updateAllEVCacheWriteClients = true;
}
}
}
if(updateAllEVCacheWriteClients) {
setupAllEVCacheWriteClientsArray();
}
// Check to see if a zone has been removed, if so remove them from
// the active list
if (memcachedInstancesByServerGroup.size() > instances.size()) {
if (log.isDebugEnabled()) log.debug("\n\tAppName :" + _appName + ";\n\tServerGroup Discovered : " + instances.keySet()
+ ";\n\tCurrent ServerGroup in EVCache Client : " + memcachedInstancesByServerGroup.keySet());
cleanupMemcachedInstances(false);
}
updateMemcachedReadInstancesByZone();
updateQueueStats();
if (_pingServers.get()) pingServers();
} catch (Throwable t) {
log.error("Exception while refreshing the Server list", t);
} finally {
EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.INTERNAL_POOL_REFRESH, tagList, Duration.ofMillis(100)).record(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS);
}
if (log.isDebugEnabled()) log.debug("refresh APP : " + _appName + "; DONE");
}
private void setupAllEVCacheWriteClientsArray() {
final List<EVCacheClient[]> newClients = new ArrayList<EVCacheClient[]>(_poolSize.get());
try {
final int serverGroupSize = memcachedWriteInstancesByServerGroup.size();
for(int ind = 0; ind < _poolSize.get(); ind++) {
final EVCacheClient[] clientArr = new EVCacheClient[serverGroupSize];
int i = 0;
for (ServerGroup serverGroup : memcachedWriteInstancesByServerGroup.keySet()) {
final List<EVCacheClient> clients = memcachedWriteInstancesByServerGroup.get(serverGroup);
if(clients.size() > ind) {
clientArr[i++] = clients.get(ind); // frequently used usecase
} else {
log.warn("Incorrect pool size detected for AppName : " + _appName + "; PoolSize " + _poolSize.get() + "; serverGroup : " + serverGroup + "; ind : " + ind + "; i : " + i);
if(clients.size() > 0) {
clientArr[i++] = clients.get(0);
}
}
}
newClients.add(clientArr);
}
this.allEVCacheWriteClients = new CircularIterator<EVCacheClient[]>(newClients);
} catch (Throwable t) {
log.error("Exception trying to create an array of writable EVCache Instances for App : " + _appName, t);
}
}
private void updateQueueStats() {
for (ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) {
List<EVCacheClient> clients = memcachedInstancesByServerGroup.get(serverGroup);
for(EVCacheClient client : clients) {
getStatsGauge(EVCacheMetricsFactory.POOL_WRITE_Q_SIZE, client).set(Long.valueOf(client.getWriteQueueLength()));
getStatsGauge(EVCacheMetricsFactory.POOL_READ_Q_SIZE, client).set(Long.valueOf(client.getReadQueueLength()));
if(refreshConnectionOnReadQueueFull.get()) {
final Collection<MemcachedNode> allNodes = client.getNodeLocator().getAll();
for (MemcachedNode node : allNodes) {
if (node instanceof EVCacheNode) {
final EVCacheNode evcNode = ((EVCacheNode) node);
if(evcNode.getReadQueueSize() >= refreshConnectionOnReadQueueFullSize.get().intValue()) {
EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.POOL_REFRESH_QUEUE_FULL, evcNode.getTags()).increment();
client.getEVCacheMemcachedClient().reconnectNode(evcNode);
}
}
}
}
}
}
}
public void pingServers() {
try {
final Map<ServerGroup, List<EVCacheClient>> allServers = getAllInstancesByZone();
for (Entry<ServerGroup, List<EVCacheClient>> entry : allServers.entrySet()) {
final List<EVCacheClient> listOfClients = entry.getValue();
for (EVCacheClient client : listOfClients) {
final Map<SocketAddress, String> versions = client.getVersions();
for (Entry<SocketAddress, String> vEntry : versions.entrySet()) {
if (log.isDebugEnabled()) log.debug("Host : " + vEntry.getKey() + " : " + vEntry.getValue());
}
}
}
if (duetClientPool != null)
duetClientPool.pingServers();
} catch (Throwable t) {
log.error("Error while pinging the servers", t);
}
}
public void serverGroupDisabled(final ServerGroup serverGroup) {
if (memcachedInstancesByServerGroup.containsKey(serverGroup)) {
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ " has no active servers. Cleaning up this ServerGroup.");
final List<EVCacheClient> clients = memcachedInstancesByServerGroup.remove(serverGroup);
memcachedReadInstancesByServerGroup.remove(serverGroup);
memcachedWriteInstancesByServerGroup.remove(serverGroup);
setupAllEVCacheWriteClientsArray();
for (EVCacheClient client : clients) {
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ "\n\tClient : " + client + " will be shutdown in 30 seconds.");
client.shutdown(30, TimeUnit.SECONDS);
client.getConnectionObserver().shutdown();
}
}
if (duetClientPool != null)
duetClientPool.serverGroupDisabled(serverGroup);
}
public void refreshAsync(MemcachedNode node) {
if (log.isInfoEnabled()) log.info("Pool is being refresh as the EVCacheNode is not available. " + node.toString());
if(!_disableAsyncRefresh.get()) {
if (node instanceof EVCacheNode) {
final EVCacheNode evcNode = ((EVCacheNode) node);
EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.POOL_REFRESH_ASYNC, evcNode.getTags()).increment();
}
boolean force = (System.currentTimeMillis() - lastReconcileTime) > ( manager.getDefaultRefreshInterval().get() * 1000 ) ? true : false;
if(!force) force = !node.isActive();
refreshPool(true, force);
}
if (duetClientPool != null)
duetClientPool.refreshAsync(node);
}
public void run() {
try {
refresh();
} catch (Throwable t) {
if (log.isDebugEnabled()) log.debug("Error Refreshing EVCache Instance list for " + _appName, t);
}
}
void shutdown() {
if (log.isDebugEnabled()) log.debug("EVCacheClientPool for App : " + _appName + " and Zone : " + _zone + " is being shutdown.");
_shutdown = true;
for(ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) {
if (log.isDebugEnabled()) log.debug("\nSHUTDOWN\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup);
serverGroupDisabled(serverGroup);
}
setupMonitoring();
}
private Gauge getConfigGauge(String metric, ServerGroup serverGroup) {
final String name = (serverGroup == null ? metric : metric + serverGroup.getName() + isInWriteOnly(serverGroup));
Gauge gauge = gaugeMap.get(name );
if(gauge != null) return gauge;
final List<Tag> tags = new ArrayList<Tag>(5);
EVCacheMetricsFactory.getInstance().addAppNameTags(tags, _appName);
tags.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, metric));
if(serverGroup != null) {
tags.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, serverGroup.getName()));
}
final Id id = EVCacheMetricsFactory.getInstance().getId(EVCacheMetricsFactory.INTERNAL_POOL_SG_CONFIG, tags);
gauge = EVCacheMetricsFactory.getInstance().getRegistry().gauge(id);
gaugeMap.put(name, gauge);
return gauge;
}
private Gauge getStatsGauge(String metric, EVCacheClient client) {
final String name = metric + client.getServerGroupName();
Gauge gauge = gaugeMap.get(name );
if(gauge != null) return gauge;
final List<Tag> tags = new ArrayList<Tag>(4);
EVCacheMetricsFactory.getInstance().addAppNameTags(tags, _appName);
tags.add(new BasicTag(EVCacheMetricsFactory.STAT_NAME, metric));
tags.add(new BasicTag(EVCacheMetricsFactory.CONNECTION_ID, String.valueOf(client.getId())));
tags.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, client.getServerGroupName()));
final Id id = EVCacheMetricsFactory.getInstance().getId(EVCacheMetricsFactory.INTERNAL_STATS, tags);
gauge = EVCacheMetricsFactory.getInstance().getRegistry().gauge(id);
gaugeMap.put(name, gauge);
return gauge;
}
private void incrementFailure(String metric, ServerGroup serverGroup) {
final List<Tag> tags = new ArrayList<Tag>(4);
EVCacheMetricsFactory.getInstance().addAppNameTags(tags, _appName);
tags.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, metric));
tags.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, serverGroup.getName()));
EVCacheMetricsFactory.getInstance().increment(EVCacheMetricsFactory.INTERNAL_POOL_INIT_ERROR, tags);
}
private void reportPoolConifg() {
final int size = getPoolSize();
for(ServerGroup key : memcachedInstancesByServerGroup.keySet()) {
getConfigGauge("poolSize", key).set(memcachedInstancesByServerGroup.get(key).size());
final EVCacheClient client = memcachedInstancesByServerGroup.get(key).get(0);
if(client != null) {
getConfigGauge("readTimeout", key).set(getReadTimeout().get());
getConfigGauge("bulkReadTimeout", key).set(getBulkReadTimeout().get());
getConfigGauge("numberOfServerGoups", key).set(memcachedInstancesByServerGroup.size());
getConfigGauge("maxReadQueueLength", key).set(_maxReadQueueSize.get());
getConfigGauge("instanceCount", key).set(client.getMemcachedNodesInZone().size());;
final EVCacheConnectionObserver connectionObserver = client.getConnectionObserver();
if(connectionObserver != null) {
final int activeServerCount = connectionObserver.getActiveServerCount();
final int inActiveServerCount = connectionObserver.getInActiveServerCount();
final int sizeInHashing = client.getNodeLocator().getAll().size();
getConfigGauge("activeServerCount", key).set(Long.valueOf(activeServerCount));
getConfigGauge("activeConnectionCount", key).set(Long.valueOf(activeServerCount * size));
getConfigGauge("inActiveServerCount", key).set(Long.valueOf(inActiveServerCount));
getConfigGauge("sizeInHashing", key).set(Long.valueOf(sizeInHashing));
}
final List<EVCacheClient> readClients = memcachedReadInstancesByServerGroup.get(key);
if (readClients != null && readClients.size() > 0) {
getConfigGauge(EVCacheMetricsFactory.POOL_READ_INSTANCES, key).set(Long.valueOf(readClients.get(0).getConnectionObserver().getActiveServerCount()));
}
final List<EVCacheClient> writeClients = memcachedWriteInstancesByServerGroup.get(key);
if (writeClients != null && writeClients.size() > 0) {
getConfigGauge(EVCacheMetricsFactory.POOL_WRITE_INSTANCES, key).set(Long.valueOf(writeClients.get(0).getConnectionObserver().getActiveServerCount()));
}
}
}
}
private void setupMonitoring() {
try {
final ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=" + _appName
+ ",SubGroup=pool");
final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
if (mbeanServer.isRegistered(mBeanName)) {
if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one.");
mbeanServer.unregisterMBean(mBeanName);
}
if (!_shutdown) {
mbeanServer.registerMBean(this, mBeanName);
}
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception", e);
}
}
public int getInstanceCount() {
int instances = 0;
for (ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) {
instances += memcachedInstancesByServerGroup.get(serverGroup).get(0).getConnectionObserver().getActiveServerCount();
}
if (duetClientPool != null)
instances += duetClientPool.getInstanceCount();
return instances;
}
public Map<String, String> getInstancesByZone() {
Map<String, String> instanceMap = new HashMap<String, String>();
for (ServerGroup zone : memcachedInstancesByServerGroup.keySet()) {
final List<EVCacheClient> instanceList = memcachedInstancesByServerGroup.get(zone);
instanceMap.put(zone.toString(), instanceList.toString());
}
if (duetClientPool != null)
instanceMap.putAll(duetClientPool.getInstancesByZone());
return instanceMap;
}
public Map<String, Integer> getInstanceCountByZone() {
final Map<String, Integer> instancesByZone = new HashMap<String, Integer>(memcachedInstancesByServerGroup.size() * 2);
for (ServerGroup zone : memcachedInstancesByServerGroup.keySet()) {
instancesByZone.put(zone.getName(), Integer.valueOf(memcachedInstancesByServerGroup.get(zone).get(0).getConnectionObserver().getActiveServerCount()));
}
if (duetClientPool != null)
instancesByZone.putAll(duetClientPool.getInstanceCountByZone());
return instancesByZone;
}
public Map<String, String> getReadZones() {
final Map<String, String> instanceMap = new HashMap<String, String>();
for (ServerGroup key : memcachedReadInstancesByServerGroup.keySet()) {
instanceMap.put(key.getName(), memcachedReadInstancesByServerGroup.get(key).toString());
}
if (duetClientPool != null)
instanceMap.putAll(duetClientPool.getReadZones());
return instanceMap;
}
public Map<String, Integer> getReadInstanceCountByZone() {
final Map<String, Integer> instanceMap = new HashMap<String, Integer>();
for (ServerGroup key : memcachedReadInstancesByServerGroup.keySet()) {
instanceMap.put(key.getName(), Integer.valueOf(memcachedReadInstancesByServerGroup.get(key).get(0)
.getConnectionObserver().getActiveServerCount()));
}
if (duetClientPool != null)
instanceMap.putAll(duetClientPool.getReadInstanceCountByZone());
return instanceMap;
}
public Map<String, String> getWriteZones() {
final Map<String, String> instanceMap = new HashMap<String, String>();
for (ServerGroup key : memcachedWriteInstancesByServerGroup.keySet()) {
instanceMap.put(key.toString(), memcachedWriteInstancesByServerGroup.get(key).toString());
}
if (duetClientPool != null)
instanceMap.putAll(duetClientPool.getWriteZones());
return instanceMap;
}
private Map<ServerGroup, List<EVCacheClient>> getAllInstancesByZoneInternal() {
return Collections.unmodifiableMap(memcachedInstancesByServerGroup);
}
public Map<ServerGroup, List<EVCacheClient>> getAllInstancesByZone() {
if (duetClientPool != null) {
Map<ServerGroup, List<EVCacheClient>> allInstanceMap = new ConcurrentHashMap<>();
allInstanceMap.putAll(getAllInstancesByZoneInternal());
allInstanceMap.putAll(duetClientPool.getAllInstancesByZone());
return Collections.unmodifiableMap(allInstanceMap);
}
return getAllInstancesByZoneInternal();
}
Map<ServerGroup, List<EVCacheClient>> getAllInstancesByServerGroupInternal() {
return memcachedInstancesByServerGroup;
}
public Map<ServerGroup, List<EVCacheClient>> getAllInstancesByServerGroup() {
if (duetClientPool == null) {
return getAllInstancesByServerGroupInternal();
}
Map<ServerGroup, List<EVCacheClient>> allInstancesByServerGroup = new ConcurrentHashMap<>();
allInstancesByServerGroup.putAll(getAllInstancesByServerGroupInternal());
allInstancesByServerGroup.putAll(duetClientPool.getAllInstancesByServerGroup());
return allInstancesByServerGroup;
}
private Map<String, Integer> getWriteInstanceCountByZoneInternal() {
final Map<String, Integer> instanceMap = new HashMap<String, Integer>();
for (ServerGroup key : memcachedWriteInstancesByServerGroup.keySet()) {
instanceMap.put(key.toString(), Integer.valueOf(memcachedWriteInstancesByServerGroup.get(key).get(0).getConnectionObserver().getActiveServerCount()));
}
return instanceMap;
}
public Map<String, Integer> getWriteInstanceCountByZone() {
Map<String, Integer> instanceMap = getWriteInstanceCountByZoneInternal();
if (duetClientPool != null)
instanceMap.putAll(duetClientPool.getWriteInstanceCountByZone());
return instanceMap;
}
private Map<String, String> getReadServerGroupByZoneInternal() {
final Map<String, String> instanceMap = new HashMap<String, String>();
for (String key : readServerGroupByZone.keySet()) {
instanceMap.put(key, readServerGroupByZone.get(key).toString());
}
return instanceMap;
}
public Map<String, String> getReadServerGroupByZone() {
Map<String, String> instanceMap = getReadServerGroupByZoneInternal();
if (duetClientPool != null)
instanceMap.putAll(duetClientPool.getReadServerGroupByZone());
return instanceMap;
}
public void refreshPool() {
refreshPool(false, true);
if (duetClientPool != null)
duetClientPool.refreshPool(false, true);
}
public void refreshPool(boolean async, boolean force) {
if (log.isDebugEnabled()) log.debug("Refresh Pool : async : " + async + "; force : " + force);
try {
if(async && asyncRefreshExecutor.getQueue().size() == 0) {
asyncRefreshExecutor.submit(new Runnable() {
@Override
public void run() {
try {
refresh(force);
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
});
} else {
refresh(force);
}
} catch (Throwable t) {
if (log.isDebugEnabled()) log.debug("Error Refreshing EVCache Instance list from MBean : " + _appName, t);
}
if (duetClientPool != null)
duetClientPool.refreshPool(async, force);
}
public String getFallbackServerGroup() {
if (memcachedFallbackReadInstances.getSize() != 0 || duetClientPool == null)
return memcachedFallbackReadInstances.toString();
return duetClientPool.getFallbackServerGroup();
}
public boolean supportsFallback() {
return memcachedFallbackReadInstances.getSize() > 1 || (duetClientPool != null && duetPrimary.get() && duetClientPool.supportsFallback());
}
public boolean isLogEventEnabled() {
return (logOperations.get() > 0);
}
public boolean shouldLogOperation(String key, String op) {
if (!isLogEventEnabled()) return false;
if (!logOperationCalls.get().contains(op)) return false;
return key.hashCode() % 1000 <= logOperations.get();
}
@Override
public String getLocalServerGroupCircularIterator() {
return (localServerGroupIterator == null) ? (duetClientPool == null ? "NONE" : duetClientPool.getLocalServerGroupCircularIterator()) : localServerGroupIterator.toString();
}
@Override
public String getEVCacheWriteClientsCircularIterator() {
return (allEVCacheWriteClients == null) ? (duetClientPool == null ? "NONE" : duetClientPool.getEVCacheWriteClientsCircularIterator()) : allEVCacheWriteClients.toString();
}
public String getPoolDetails() {
return toString();
}
@Override
public String toString() {
return "\nEVCacheClientPool [\n\t_appName=" + _appName + ",\n\t_zone=" + _zone
+ ",\n\tlocalServerGroupIterator=" + localServerGroupIterator + ",\n\t_poolSize=" + _poolSize + ",\n\t_readTimeout=" + _readTimeout
+ ",\n\t_bulkReadTimeout=" + _bulkReadTimeout + ",\n\tlogOperations=" + logOperations + ",\n\t_opQueueMaxBlockTime=" + _opQueueMaxBlockTime
+ ",\n\t_operationTimeout=" + _operationTimeout + ",\n\t_maxReadQueueSize=" + _maxReadQueueSize + ",\n\t_pingServers=" + _pingServers
+ ",\n\twriteOnlyFastPropertyMap=" + writeOnlyFastPropertyMap + ",\n\tnumberOfModOps=" + numberOfModOps.get() + ",\n\t_shutdown=" + _shutdown
+ ",\n\tmemcachedInstancesByServerGroup=" + memcachedInstancesByServerGroup + ",\n\tmemcachedReadInstancesByServerGroup=" + memcachedReadInstancesByServerGroup
+ ",\n\tmemcachedWriteInstancesByServerGroup=" + memcachedWriteInstancesByServerGroup + ",\n\treadServerGroupByZone=" + readServerGroupByZone
+ ",\n\tmemcachedFallbackReadInstances=" + memcachedFallbackReadInstances + "\n]"
+ ", \n\tallEVCacheWriteClients=" + allEVCacheWriteClients
+ "\n]" + (duetClientPool == null ? "" : duetClientPool.toString());
}
public int getPoolSize() {
return _poolSize.get() + (duetClientPool == null ? 0 : duetClientPool.getPoolSize());
}
public Property<Integer> getLogOperations() {
return logOperations;
}
public Property<Integer> getOpQueueMaxBlockTime() {
return _opQueueMaxBlockTime;
}
public Property<Integer> getOperationTimeout() {
if (duetClientPool !=null && duetPrimary.get()) {
return duetClientPool.getOperationTimeout();
}
return _operationTimeout;
}
public Property<Integer> getMaxReadQueueSize() {
return _maxReadQueueSize;
}
public Property<Boolean> getPingServers() {
return _pingServers;
}
public long getNumberOfModOps() {
return numberOfModOps.get();
}
public boolean isShutdown() {
return _shutdown;
}
public String getZone() {
return this._zone;
}
public String getAppName() {
return this._appName;
}
public EVCacheClientPoolManager getEVCacheClientPoolManager() {
return this.manager;
}
public Map<ServerGroup, Property<Boolean>> getWriteOnlyFastPropertyMap() {
if (duetClientPool != null) {
Map<ServerGroup, Property<Boolean>> allMap = new ConcurrentHashMap<>();
allMap.putAll(writeOnlyFastPropertyMap);
allMap.putAll(duetClientPool.getWriteOnlyFastPropertyMap());
return Collections.unmodifiableMap(allMap);
}
return Collections.unmodifiableMap(writeOnlyFastPropertyMap);
}
public Property<Integer> getReadTimeout() {
if (duetClientPool != null && duetPrimary.get()) {
return duetClientPool.getReadTimeout();
}
return _readTimeout;
}
public Property<Integer> getBulkReadTimeout() {
return _bulkReadTimeout;
}
/*
* This method is helpful in cases where there is typically a large backlog of work queued up, and is
* expensive to loose all that work when a client is shut down.
* Block the thread until all the queues are processed or at most 30 seconds.
* Will return the count of items left in the queues. 0 means none left.
*/
public int join() {
int size = 0;
int counter = 0;
do {
for(List<EVCacheClient> clientList : getAllInstancesByServerGroup().values()) {
for(EVCacheClient client : clientList) {
size +=client.getWriteQueueLength();
size +=client.getReadQueueLength();
}
}
if(size > 0) {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
log.error("");
}
}
if(counter++ > 3000) break;
} while(size > 0);
return size;
}
public long getLastReconcileTime() {
return lastReconcileTime;
}
public Property<Set<String>> getOperationToLog() {
return logOperationCalls;
}
}
| 86,014
| 50.816265
| 259
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.