code stringlengths 73 34.1k | label stringclasses 1 value |
|---|---|
public static ZContext shadow(ZContext ctx)
{
ZContext context = new ZContext(ctx.context, false, ctx.ioThreads);
context.linger = ctx.linger;
context.sndhwm = ctx.sndhwm;
context.rcvhwm = ctx.rcvhwm;
context.pipehwm = ctx.pipehwm;
return context;
} | java |
public Socket fork(ZThread.IAttachedRunnable runnable, Object... args)
{
return ZThread.fork(this, runnable, args);
} | java |
public static ByteBuffer putUInt64(ByteBuffer buf, long value)
{
buf.put((byte) ((value >>> 56) & 0xff));
buf.put((byte) ((value >>> 48) & 0xff));
buf.put((byte) ((value >>> 40) & 0xff));
buf.put((byte) ((value >>> 32) & 0xff));
buf.put((byte) ((value >>> 24) & 0xff));
buf.put((byte) ((value >>> 16) & 0xff));
buf.put((byte) ((value >>> 8) & 0xff));
buf.put((byte) ((value) & 0xff));
return buf;
} | java |
public static Socket fork(ZContext ctx, IAttachedRunnable runnable, Object... args)
{
Socket pipe = ctx.createSocket(SocketType.PAIR);
if (pipe != null) {
pipe.bind(String.format("inproc://zctx-pipe-%d", pipe.hashCode()));
}
else {
return null;
}
// Connect child pipe to our pipe
ZContext ccontext = ZContext.shadow(ctx);
Socket cpipe = ccontext.createSocket(SocketType.PAIR);
if (cpipe == null) {
return null;
}
cpipe.connect(String.format("inproc://zctx-pipe-%d", pipe.hashCode()));
// Prepare child thread
Thread shim = new ShimThread(ccontext, runnable, args, cpipe);
shim.start();
return pipe;
} | java |
private void startConnecting()
{
// Open the connecting socket.
try {
boolean rc = open();
// Connect may succeed in synchronous manner.
if (rc) {
handle = ioObject.addFd(fd);
connectEvent();
}
// Connection establishment may be delayed. Poll for its completion.
else {
handle = ioObject.addFd(fd);
ioObject.setPollConnect(handle);
socket.eventConnectDelayed(addr.toString(), -1);
}
}
catch (RuntimeException | IOException e) {
// Handle any other error condition by eventual reconnect.
if (fd != null) {
close();
}
addReconnectTimer();
}
} | java |
private void addReconnectTimer()
{
int rcIvl = getNewReconnectIvl();
ioObject.addTimer(rcIvl, RECONNECT_TIMER_ID);
// resolve address again to take into account other addresses
// besides the failing one (e.g. multiple dns entries).
try {
addr.resolve(options.ipv6);
}
catch (Exception ignored) {
// This will fail if the network goes away and the
// address cannot be resolved for some reason. Try
// not to fail as the event loop will quit
}
socket.eventConnectRetried(addr.toString(), rcIvl);
timerStarted = true;
} | java |
private int getNewReconnectIvl()
{
// The new interval is the current interval + random value.
int interval = currentReconnectIvl + (Utils.randomInt() % options.reconnectIvl);
// Only change the current reconnect interval if the maximum reconnect
// interval was set and if it's larger than the reconnect interval.
if (options.reconnectIvlMax > 0 && options.reconnectIvlMax > options.reconnectIvl) {
// Calculate the next interval
currentReconnectIvl = Math.min(currentReconnectIvl * 2, options.reconnectIvlMax);
}
return interval;
} | java |
private boolean open() throws IOException
{
assert (fd == null);
// Resolve the address
if (addr == null) {
throw new IOException("Null address");
}
addr.resolve(options.ipv6);
Address.IZAddress resolved = addr.resolved();
if (resolved == null) {
throw new IOException("Address not resolved");
}
SocketAddress sa = resolved.address();
if (sa == null) {
throw new IOException("Socket address not resolved");
}
// Create the socket.
if (options.selectorChooser == null) {
fd = SocketChannel.open();
}
else {
fd = options.selectorChooser.choose(resolved, options).openSocketChannel();
}
// On some systems, IPv4 mapping in IPv6 sockets is disabled by default.
// Switch it on in such cases.
// The method enableIpv4Mapping is empty. Still to be written
if (resolved.family() == StandardProtocolFamily.INET6) {
TcpUtils.enableIpv4Mapping(fd);
}
// Set the socket to non-blocking mode so that we get async connect().
TcpUtils.unblockSocket(fd);
// Set the socket buffer limits for the underlying socket.
if (options.sndbuf != 0) {
TcpUtils.setTcpSendBuffer(fd, options.sndbuf);
}
if (options.rcvbuf != 0) {
TcpUtils.setTcpReceiveBuffer(fd, options.rcvbuf);
}
// Set the IP Type-Of-Service priority for this socket
if (options.tos != 0) {
TcpUtils.setIpTypeOfService(fd, options.tos);
}
// TODO V4 Set a source address for conversations
if (resolved.sourceAddress() != null) {
// SocketChannel bind = channel.bind(resolved.sourceAddress());
// if (bind == null) {
// return false;
// }
}
// Connect to the remote peer.
boolean rc;
try {
rc = fd.connect(sa);
if (rc) {
// Connect was successful immediately.
}
else {
// Translate error codes indicating asynchronous connect has been
// launched to a uniform EINPROGRESS.
errno.set(ZError.EINPROGRESS);
}
}
catch (IllegalArgumentException e) {
// this will happen if sa is bad. Address validation is not documented but
// I've found that IAE is thrown in openjdk as well as on android.
throw new IOException(e.getMessage(), e);
}
return rc;
} | java |
private SocketChannel connect()
{
try {
// Async connect has finished. Check whether an error occurred
boolean finished = fd.finishConnect();
assert (finished);
return fd;
}
catch (IOException e) {
return null;
}
} | java |
protected void close()
{
assert (fd != null);
try {
fd.close();
socket.eventClosed(addr.toString(), fd);
}
catch (IOException e) {
socket.eventCloseFailed(addr.toString(), ZError.exccode(e));
}
fd = null;
} | java |
@Deprecated
public boolean setInterval(Timer timer, long interval)
{
assert (timer.parent == this);
return timer.setInterval(interval);
} | java |
public long timeout()
{
final long now = now();
for (Entry<Timer, Long> entry : entries()) {
final Timer timer = entry.getKey();
final Long expiration = entry.getValue();
if (timer.alive) {
// Live timer, lets return the timeout
if (expiration - now > 0) {
return expiration - now;
}
else {
return 0;
}
}
// Remove it from the list of active timers.
timers.remove(expiration, timer);
}
// Wait forever as no timers are alive
return -1;
} | java |
public int execute()
{
int executed = 0;
final long now = now();
for (Entry<Timer, Long> entry : entries()) {
final Timer timer = entry.getKey();
final Long expiration = entry.getValue();
// Dead timer, lets remove it and continue
if (!timer.alive) {
// Remove it from the list of active timers.
timers.remove(expiration, timer);
continue;
}
// Map is ordered, if we have to wait for current timer we can stop.
if (expiration - now > 0) {
break;
}
insert(timer);
timer.handler.time(timer.args);
++executed;
}
return executed;
} | java |
public static ZProxy newProxy(ZContext ctx, String name, Proxy sockets, String motdelafin, Object... args)
{
return new ZProxy(ctx, name, sockets, new ZmqPump(), motdelafin, args);
} | java |
public String restart(ZMsg hot)
{
ZMsg msg = new ZMsg();
msg.add(RESTART);
final boolean cold = hot == null;
if (cold) {
msg.add(Boolean.toString(false));
}
else {
msg.add(Boolean.toString(true));
msg.append(hot);
}
String status = EXITED;
if (agent.send(msg)) {
status = status(false);
}
return status;
} | java |
public String exit()
{
agent.send(EXIT);
exit.awaitSilent();
agent.close();
return EXITED;
} | java |
public String status(boolean sync)
{
if (exit.isExited()) {
return EXITED;
}
try {
String status = recvStatus();
if (agent.send(STATUS) && sync) {
// wait for the response to emulate sync
status = recvStatus();
// AND refill a status
if (EXITED.equals(status) || !agent.send(STATUS)) {
return EXITED;
}
}
return status;
}
catch (ZMQException e) {
return EXITED;
}
} | java |
private String recvStatus()
{
if (!agent.sign()) {
return EXITED;
}
// receive the status response
final ZMsg msg = agent.recv();
if (msg == null) {
return EXITED;
}
String status = msg.popString();
msg.destroy();
return status;
} | java |
public boolean containsPublicKey(byte[] publicKey)
{
Utils.checkArgument(
publicKey.length == 32,
"publickey needs to have a size of 32 bytes. got only " + publicKey.length);
return containsPublicKey(ZMQ.Curve.z85Encode(publicKey));
} | java |
public boolean containsPublicKey(String publicKey)
{
Utils.checkArgument(
publicKey.length() == 40,
"z85 publickeys should have a length of 40 bytes but got " + publicKey.length());
reloadIfNecessary();
return publicKeys.containsKey(publicKey);
} | java |
boolean checkForChanges()
{
// initialize with last checked files
final Map<File, byte[]> presents = new HashMap<>(fingerprints);
boolean modified = traverseDirectory(location, new IFileVisitor()
{
@Override
public boolean visitFile(File file)
{
return modified(presents.remove(file), file);
}
@Override
public boolean visitDir(File dir)
{
return modified(presents.remove(dir), dir);
}
});
// if some files remain, that means they have been deleted since last scan
return modified || !presents.isEmpty();
} | java |
public ZAuth configureCurve(String location)
{
Objects.requireNonNull(location, "Location has to be supplied");
return send(Mechanism.CURVE.name(), location);
} | java |
public ZapReply nextReply(boolean wait)
{
if (!repliesEnabled) {
System.out.println("ZAuth: replies are disabled. Please use replies(true);");
return null;
}
return ZapReply.recv(replies, wait);
} | java |
public static int send(SocketBase s, String str, int flags)
{
byte[] data = str.getBytes(CHARSET);
return send(s, data, data.length, flags);
} | java |
public static Msg recv(SocketBase s, int flags)
{
checkSocket(s);
Msg msg = recvMsg(s, flags);
if (msg == null) {
return null;
}
// At the moment an oversized message is silently truncated.
// TODO: Build in a notification mechanism to report the overflows.
//int to_copy = nbytes < len_ ? nbytes : len_;
return msg;
} | java |
public static String getMessageMetadata(Msg msg, String property)
{
String data = null;
Metadata metadata = msg.getMetadata();
if (metadata != null) {
data = metadata.get(property);
}
return data;
} | java |
public static boolean proxy(SocketBase frontend, SocketBase backend, SocketBase capture)
{
Utils.checkArgument(frontend != null, "Frontend socket has to be present for proxy");
Utils.checkArgument(backend != null, "Backend socket has to be present for proxy");
return Proxy.proxy(frontend, backend, capture, null);
} | java |
public final ZMonitor start()
{
if (started) {
System.out.println("ZMonitor: Unable to start while already started.");
return this;
}
agent.send(START);
agent.recv();
started = true;
return this;
} | java |
public final ZMonitor verbose(boolean verbose)
{
if (started) {
System.out.println("ZMonitor: Unable to change verbosity while already started.");
return this;
}
agent.send(VERBOSE, true);
agent.send(Boolean.toString(verbose));
agent.recv();
return this;
} | java |
public final ZMonitor add(Event... events)
{
if (started) {
System.out.println("ZMonitor: Unable to add events while already started.");
return this;
}
ZMsg msg = new ZMsg();
msg.add(ADD_EVENTS);
for (Event evt : events) {
msg.add(evt.name());
}
agent.send(msg);
agent.recv();
return this;
} | java |
public final ZEvent nextEvent(boolean wait)
{
if (!started) {
System.out.println("ZMonitor: Start before getting events.");
return null;
}
ZMsg msg = agent.recv(wait);
if (msg == null) {
return null;
}
return new ZEvent(msg);
} | java |
public boolean sendFrame(ZFrame frame, int flags)
{
final byte[] data = frame.getData();
final Msg msg = new Msg(data);
if (socketBase.send(msg, flags)) {
return true;
}
mayRaise();
return false;
} | java |
public static Pipe[] pair(ZObject[] parents, int[] hwms, boolean[] conflates)
{
Pipe[] pipes = new Pipe[2];
// Creates two pipe objects. These objects are connected by two ypipes,
// each to pass messages in one direction.
YPipeBase<Msg> upipe1 = conflates[0] ? new YPipeConflate<>()
: new YPipe<Msg>(Config.MESSAGE_PIPE_GRANULARITY.getValue());
YPipeBase<Msg> upipe2 = conflates[1] ? new YPipeConflate<>()
: new YPipe<Msg>(Config.MESSAGE_PIPE_GRANULARITY.getValue());
pipes[0] = new Pipe(parents[0], upipe1, upipe2, hwms[1], hwms[0], conflates[0]);
pipes[1] = new Pipe(parents[1], upipe2, upipe1, hwms[0], hwms[1], conflates[1]);
pipes[0].setPeer(pipes[1]);
pipes[1].setPeer(pipes[0]);
return pipes;
} | java |
public boolean checkRead()
{
if (!inActive) {
return false;
}
if (state != State.ACTIVE && state != State.WAITING_FOR_DELIMITER) {
return false;
}
// Check if there's an item in the pipe.
if (!inpipe.checkRead()) {
inActive = false;
return false;
}
// If the next item in the pipe is message delimiter,
// initiate termination process.
if (isDelimiter(inpipe.probe())) {
Msg msg = inpipe.read();
assert (msg != null);
processDelimiter();
return false;
}
return true;
} | java |
public Msg read()
{
if (!inActive) {
return null;
}
if (state != State.ACTIVE && state != State.WAITING_FOR_DELIMITER) {
return null;
}
while (true) {
Msg msg = inpipe.read();
if (msg == null) {
inActive = false;
return null;
}
// If this is a credential, save a copy and receive next message.
if (msg.isCredential()) {
credential = Blob.createBlob(msg);
continue;
}
// If delimiter was read, start termination process of the pipe.
if (msg.isDelimiter()) {
processDelimiter();
return null;
}
if (!msg.hasMore() && !msg.isIdentity()) {
msgsRead++;
}
if (lwm > 0 && msgsRead % lwm == 0) {
sendActivateWrite(peer, msgsRead);
}
return msg;
}
} | java |
public boolean checkWrite()
{
if (!outActive || state != State.ACTIVE) {
return false;
}
// TODO DIFF V4 small change, it is done like this in 4.2.2
boolean full = !checkHwm();
if (full) {
outActive = false;
return false;
}
return true;
} | java |
public boolean write(Msg msg)
{
if (!checkWrite()) {
return false;
}
boolean more = msg.hasMore();
boolean identity = msg.isIdentity();
outpipe.write(msg, more);
if (!more && !identity) {
msgsWritten++;
}
return true;
} | java |
public void rollback()
{
// Remove incomplete message from the outbound pipe.
Msg msg;
if (outpipe != null) {
while ((msg = outpipe.unwrite()) != null) {
assert (msg.hasMore());
}
}
} | java |
public void flush()
{
// The peer does not exist anymore at this point.
if (state == State.TERM_ACK_SENT) {
return;
}
if (outpipe != null && !outpipe.flush()) {
sendActivateRead(peer);
}
} | java |
public void terminate(boolean delay)
{
// Overload the value specified at pipe creation.
this.delay = delay;
// If terminate was already called, we can ignore the duplicit invocation.
if (state == State.TERM_REQ_SENT_1 || state == State.TERM_REQ_SENT_2) {
return;
}
// If the pipe is in the final phase of async termination, it's going to
// closed anyway. No need to do anything special here.
else if (state == State.TERM_ACK_SENT) {
return;
}
// The simple sync termination case. Ask the peer to terminate and wait
// for the ack.
else if (state == State.ACTIVE) {
sendPipeTerm(peer);
state = State.TERM_REQ_SENT_1;
}
// There are still pending messages available, but the user calls
// 'terminate'. We can act as if all the pending messages were read.
else if (state == State.WAITING_FOR_DELIMITER && !this.delay) {
outpipe = null;
sendPipeTermAck(peer);
state = State.TERM_ACK_SENT;
}
// If there are pending messages still available, do nothing.
else if (state == State.WAITING_FOR_DELIMITER) {
// do nothing
}
// We've already got delimiter, but not term command yet. We can ignore
// the delimiter and ack synchronously terminate as if we were in
// active state.
else if (state == State.DELIMITER_RECEIVED) {
sendPipeTerm(peer);
state = State.TERM_REQ_SENT_1;
}
// There are no other states.
else {
assert (false);
}
// Stop outbound flow of messages.
outActive = false;
if (outpipe != null) {
// Drop any unfinished outbound messages.
rollback();
// Write the delimiter into the pipe. Note that watermarks are not
// checked; thus the delimiter can be written even when the pipe is full.
Msg msg = new Msg();
msg.initDelimiter();
outpipe.write(msg, false);
flush();
}
} | java |
private void processDelimiter()
{
assert (state == State.ACTIVE || state == State.WAITING_FOR_DELIMITER);
if (state == State.ACTIVE) {
state = State.DELIMITER_RECEIVED;
}
else {
outpipe = null;
sendPipeTermAck(peer);
state = State.TERM_ACK_SENT;
}
} | java |
public void hiccup()
{
// If termination is already under way do nothing.
if (state != State.ACTIVE) {
return;
}
// We'll drop the pointer to the inpipe. From now on, the peer is
// responsible for deallocating it.
inpipe = null;
// Create new inpipe.
if (conflate) {
inpipe = new YPipeConflate<>();
}
else {
inpipe = new YPipe<>(Config.MESSAGE_PIPE_GRANULARITY.getValue());
}
inActive = true;
// Notify the peer about the hiccup.
sendHiccup(peer, inpipe);
} | java |
protected boolean rollback()
{
if (currentOut != null) {
currentOut.rollback();
currentOut = null;
moreOut = false;
}
return true;
} | java |
@Override
public final int encode(ValueReference<ByteBuffer> data, int size)
{
int bufferSize = size;
ByteBuffer buf = data.get();
if (buf == null) {
buf = this.buffer;
bufferSize = this.bufferSize;
buffer.clear();
}
if (inProgress == null) {
return 0;
}
int pos = 0;
buf.limit(buf.capacity());
while (pos < bufferSize) {
// If there are no more data to return, run the state machine.
// If there are still no data, return what we already have
// in the buffer.
if (toWrite == 0) {
if (newMsgFlag) {
inProgress = null;
break;
}
next();
}
// If there are no data in the buffer yet and we are able to
// fill whole buffer in a single go, let's use zero-copy.
// There's no disadvantage to it as we cannot stuck multiple
// messages into the buffer anyway. Note that subsequent
// write(s) are non-blocking, thus each single write writes
// at most SO_SNDBUF bytes at once not depending on how large
// is the chunk returned from here.
// As a consequence, large messages being sent won't block
// other engines running in the same I/O thread for excessive
// amounts of time.
if (pos == 0 && data.get() == null && toWrite >= bufferSize) {
writeBuf.limit(writeBuf.capacity());
data.set(writeBuf);
pos = toWrite;
writeBuf = null;
toWrite = 0;
return pos;
}
// Copy data to the buffer. If the buffer is full, return.
int toCopy = Math.min(toWrite, bufferSize - pos);
int limit = writeBuf.limit();
writeBuf.limit(Math.min(writeBuf.capacity(), writeBuf.position() + toCopy));
int current = buf.position();
buf.put(writeBuf);
toCopy = buf.position() - current;
writeBuf.limit(limit);
pos += toCopy;
toWrite -= toCopy;
}
data.set(buf);
return pos;
} | java |
private void nextStep(byte[] buf, int toWrite, Runnable next, boolean newMsgFlag)
{
if (buf != null) {
writeBuf = ByteBuffer.wrap(buf);
writeBuf.limit(toWrite);
}
else {
writeBuf = null;
}
this.toWrite = toWrite;
this.next = next;
this.newMsgFlag = newMsgFlag;
} | java |
public void addTimer(long timeout, IPollEvents sink, int id)
{
assert (Thread.currentThread() == worker);
final long expiration = clock() + timeout;
TimerInfo info = new TimerInfo(sink, id);
timers.insert(expiration, info);
changed = true;
} | java |
public void cancelTimer(IPollEvents sink, int id)
{
assert (Thread.currentThread() == worker);
TimerInfo copy = new TimerInfo(sink, id);
// Complexity of this operation is O(n). We assume it is rarely used.
TimerInfo timerInfo = timers.find(copy);
if (timerInfo != null) {
// let's defer the removal during the loop
timerInfo.cancelled = true;
}
} | java |
protected long executeTimers()
{
assert (Thread.currentThread() == worker);
changed = false;
// Fast track.
if (timers.isEmpty()) {
return 0L;
}
// Get the current time.
long current = clock();
// Execute the timers that are already due.
for (Entry<TimerInfo, Long> entry : timers.entries()) {
final TimerInfo timerInfo = entry.getKey();
if (timerInfo.cancelled) {
timers.remove(entry.getValue(), timerInfo);
continue;
}
// If we have to wait to execute the item, same will be true about
// all the following items (multimap is sorted). Thus we can stop
// checking the subsequent timers and return the time to wait for
// the next timer (at least 1ms).
final Long key = entry.getValue();
if (key > current) {
return key - current;
}
// Remove it from the list of active timers.
timers.remove(key, timerInfo);
// Trigger the timer.
timerInfo.sink.timerEvent(timerInfo.id);
}
// Remove empty list object
for (Entry<TimerInfo, Long> entry : timers.entries()) {
final Long key = entry.getValue();
if (!timers.hasValues(key)) {
timers.remove(key);
}
}
if (changed) {
return executeTimers();
}
// There are no more timers.
return 0L;
} | java |
@Override
protected void processTerm(int linger)
{
// Double termination should never happen.
assert (!terminating);
// Send termination request to all owned objects.
for (Own it : owned) {
sendTerm(it, linger);
}
registerTermAcks(owned.size());
owned.clear();
// Start termination process and check whether by chance we cannot
// terminate immediately.
terminating = true;
checkTermAcks();
} | java |
public boolean rm(Pipe pipe, IMtrieHandler func, XPub pub)
{
assert (pipe != null);
assert (func != null);
return rmHelper(pipe, new byte[0], 0, 0, func, pub);
} | java |
public boolean rm(Msg msg, Pipe pipe)
{
assert (msg != null);
assert (pipe != null);
return rmHelper(msg, 1, msg.size() - 1, pipe);
} | java |
public void match(ByteBuffer data, int size, IMtrieHandler func, XPub pub)
{
assert (data != null);
assert (func != null);
assert (pub != null);
Mtrie current = this;
int idx = 0;
while (true) {
// Signal the pipes attached to this node.
if (current.pipes != null) {
for (Pipe it : current.pipes) {
func.invoke(it, null, 0, pub);
}
}
// If we are at the end of the message, there's nothing more to match.
if (size == 0) {
break;
}
// If there are no subnodes in the trie, return.
if (current.count == 0) {
break;
}
byte c = data.get(idx);
// If there's one subnode (optimisation).
if (current.count == 1) {
if (c != current.min) {
break;
}
current = current.next[0];
idx++;
size--;
continue;
}
// If there are multiple subnodes.
if (c < current.min || c >= current.min + current.count) {
break;
}
if (current.next[c - current.min] == null) {
break;
}
current = current.next[c - current.min];
idx++;
size--;
}
} | java |
private void close()
{
assert (fd != null);
try {
fd.close();
socket.eventClosed(endpoint, fd);
}
catch (IOException e) {
socket.eventCloseFailed(endpoint, ZError.exccode(e));
}
fd = null;
} | java |
private SocketChannel accept() throws IOException
{
// The situation where connection cannot be accepted due to insufficient
// resources is considered valid and treated by ignoring the connection.
// Accept one connection and deal with different failure modes.
assert (fd != null);
SocketChannel sock = fd.accept();
if (!options.tcpAcceptFilters.isEmpty()) {
boolean matched = false;
for (TcpAddress.TcpAddressMask am : options.tcpAcceptFilters) {
if (am.matchAddress(address.address())) {
matched = true;
break;
}
}
if (!matched) {
try {
sock.close();
}
catch (IOException e) {
}
return null;
}
}
if (options.tos != 0) {
TcpUtils.setIpTypeOfService(sock, options.tos);
}
// Set the socket buffer limits for the underlying socket.
if (options.sndbuf != 0) {
TcpUtils.setTcpSendBuffer(sock, options.sndbuf);
}
if (options.rcvbuf != 0) {
TcpUtils.setTcpReceiveBuffer(sock, options.rcvbuf);
}
if (!isWindows) {
TcpUtils.setReuseAddress(sock, true);
}
return sock;
} | java |
public void attach(Pipe pipe)
{
// If we are in the middle of sending a message, we'll add new pipe
// into the list of eligible pipes. Otherwise we add it to the list
// of active pipes.
if (more) {
pipes.add(pipe);
Collections.swap(pipes, eligible, pipes.size() - 1);
eligible++;
}
else {
pipes.add(pipe);
Collections.swap(pipes, active, pipes.size() - 1);
active++;
eligible++;
}
} | java |
public void match(Pipe pipe)
{
int idx = pipes.indexOf(pipe);
// If pipe is already matching do nothing.
if (idx < matching) {
return;
}
// If the pipe isn't eligible, ignore it.
if (idx >= eligible) {
return;
}
// Mark the pipe as matching.
Collections.swap(pipes, idx, matching);
matching++;
} | java |
public void terminated(Pipe pipe)
{
// Remove the pipe from the list; adjust number of matching, active and/or
// eligible pipes accordingly.
if (pipes.indexOf(pipe) < matching) {
Collections.swap(pipes, pipes.indexOf(pipe), matching - 1);
matching--;
}
if (pipes.indexOf(pipe) < active) {
Collections.swap(pipes, pipes.indexOf(pipe), active - 1);
active--;
}
if (pipes.indexOf(pipe) < eligible) {
Collections.swap(pipes, pipes.indexOf(pipe), eligible - 1);
eligible--;
}
pipes.remove(pipe);
} | java |
public void activated(Pipe pipe)
{
// Move the pipe from passive to eligible state.
Collections.swap(pipes, pipes.indexOf(pipe), eligible);
eligible++;
// If there's no message being sent at the moment, move it to
// the active state.
if (!more) {
Collections.swap(pipes, eligible - 1, active);
active++;
}
} | java |
public boolean sendToMatching(Msg msg)
{
// Is this end of a multipart message?
boolean msgMore = msg.hasMore();
// Push the message to matching pipes.
distribute(msg);
// If mutlipart message is fully sent, activate all the eligible pipes.
if (!msgMore) {
active = eligible;
}
more = msgMore;
return true;
} | java |
private void distribute(Msg msg)
{
// If there are no matching pipes available, simply drop the message.
if (matching == 0) {
return;
}
// TODO isVsm
// Push copy of the message to each matching pipe.
for (int idx = 0; idx < matching; ++idx) {
if (!write(pipes.get(idx), msg)) {
--idx; // Retry last write because index will have been swapped
}
}
} | java |
private boolean write(Pipe pipe, Msg msg)
{
if (!pipe.write(msg)) {
Collections.swap(pipes, pipes.indexOf(pipe), matching - 1);
matching--;
Collections.swap(pipes, pipes.indexOf(pipe), active - 1);
active--;
Collections.swap(pipes, active, eligible - 1);
eligible--;
return false;
}
if (!msg.hasMore()) {
pipe.flush();
}
return true;
} | java |
public final boolean register(final SelectableChannel channel, final EventsHandler handler)
{
return register(channel, handler, IN | OUT | ERR);
} | java |
public final boolean unregister(final Object socketOrChannel)
{
if (socketOrChannel == null) {
return false;
}
CompositePollItem items = this.items.remove(socketOrChannel);
boolean rc = items != null;
if (rc) {
all.remove(items);
}
return rc;
} | java |
protected int poll(final long timeout, final boolean dispatchEvents)
{
// get all the raw items
final Set<PollItem> pollItems = new HashSet<>();
for (CompositePollItem it : all) {
pollItems.add(it.item());
}
// polling time
final int rc = poll(selector, timeout, pollItems);
if (!dispatchEvents) {
// raw result
return rc;
}
if (dispatch(all, pollItems.size())) {
// returns event counts after dispatch if everything went fine
return rc;
}
// error in dispatching
return -1;
} | java |
protected int poll(final Selector selector, final long tout, final Collection<zmq.poll.PollItem> items)
{
final int size = items.size();
return zmq.ZMQ.poll(selector, items.toArray(new PollItem[size]), size, tout);
} | java |
protected boolean dispatch(final Collection<? extends ItemHolder> all, int size)
{
ItemHolder[] array = all.toArray(new ItemHolder[all.size()]);
// protected against handlers unregistering during this loop
for (ItemHolder holder : array) {
EventsHandler handler = holder.handler();
if (handler == null) {
handler = globalHandler;
}
if (handler == null) {
// no handler, short-circuit
continue;
}
final PollItem item = holder.item();
final int events = item.readyOps();
if (events <= 0) {
// no events, short-circuit
continue;
}
final Socket socket = holder.socket();
final SelectableChannel channel = holder.item().getRawSocket();
if (socket != null) {
assert (channel == null);
// dispatch on socket
if (!handler.events(socket, events)) {
return false;
}
}
if (channel != null) {
// dispatch on channel
assert (socket == null);
if (!handler.events(channel, events)) {
return false;
}
}
}
return true;
} | java |
public boolean readable(final Object socketOrChannel)
{
final PollItem it = filter(socketOrChannel, READABLE);
if (it == null) {
return false;
}
return it.isReadable();
} | java |
public boolean writable(final Object socketOrChannel)
{
final PollItem it = filter(socketOrChannel, WRITABLE);
if (it == null) {
return false;
}
return it.isWritable();
} | java |
public boolean error(final Object socketOrChannel)
{
final PollItem it = filter(socketOrChannel, ERR);
if (it == null) {
return false;
}
return it.isError();
} | java |
protected boolean add(Object socketOrChannel, final ItemHolder holder)
{
if (socketOrChannel == null) {
Socket socket = holder.socket();
SelectableChannel ch = holder.item().getRawSocket();
if (ch == null) {
// not a channel
assert (socket != null);
socketOrChannel = socket;
}
else if (socket == null) {
// not a socket
socketOrChannel = ch;
}
}
assert (socketOrChannel != null);
CompositePollItem aggregate = items.get(socketOrChannel);
if (aggregate == null) {
aggregate = new CompositePollItem(socketOrChannel);
items.put(socketOrChannel, aggregate);
}
final boolean rc = aggregate.holders.add(holder);
if (rc) {
all.add(aggregate);
}
return rc;
} | java |
protected Collection<? extends ItemHolder> items()
{
for (CompositePollItem item : all) {
item.handler(globalHandler);
}
return all;
} | java |
protected Iterable<ItemHolder> items(final Object socketOrChannel)
{
final CompositePollItem aggregate = items.get(socketOrChannel);
if (aggregate == null) {
return Collections.emptySet();
}
return aggregate.holders;
} | java |
protected PollItem filter(final Object socketOrChannel, int events)
{
if (socketOrChannel == null) {
return null;
}
CompositePollItem item = items.get(socketOrChannel);
if (item == null) {
return null;
}
PollItem pollItem = item.item();
if (pollItem == null) {
return null;
}
if (pollItem.hasEvent(events)) {
return pollItem;
}
return null;
} | java |
public String getShortString()
{
String value = Wire.getShortString(needle, needle.position());
forward(value.length() + 1);
return value;
} | java |
public String getLongString()
{
String value = Wire.getLongString(needle, needle.position());
forward(value.length() + 4);
return value;
} | java |
public void putList(Collection<String> elements)
{
if (elements == null) {
putNumber1(0);
}
else {
Utils.checkArgument(elements.size() < 256, "Collection has to be smaller than 256 elements");
putNumber1(elements.size());
for (String string : elements) {
putString(string);
}
}
} | java |
public void putMap(Map<String, String> map)
{
if (map == null) {
putNumber1(0);
}
else {
Utils.checkArgument(map.size() < 256, "Map has to be smaller than 256 elements");
putNumber1(map.size());
for (Entry<String, String> entry : map.entrySet()) {
if (entry.getKey().contains("=")) {
throw new IllegalArgumentException("Keys cannot contain '=' sign. " + entry);
}
if (entry.getValue().contains("=")) {
throw new IllegalArgumentException("Values cannot contain '=' sign. " + entry);
}
String val = entry.getKey() + "=" + entry.getValue();
putString(val);
}
}
} | java |
public void push(T val)
{
backChunk.values[backPos] = val;
backChunk = endChunk;
backPos = endPos;
if (++endPos != size) {
return;
}
Chunk<T> sc = spareChunk;
if (sc != beginChunk) {
spareChunk = spareChunk.next;
endChunk.next = sc;
sc.prev = endChunk;
}
else {
endChunk.next = new Chunk<>(size, memoryPtr);
memoryPtr += size;
endChunk.next.prev = endChunk;
}
endChunk = endChunk.next;
endPos = 0;
} | java |
public void unpush()
{
// First, move 'back' one position backwards.
if (backPos > 0) {
--backPos;
}
else {
backPos = size - 1;
backChunk = backChunk.prev;
}
// Now, move 'end' position backwards. Note that obsolete end chunk
// is not used as a spare chunk. The analysis shows that doing so
// would require free and atomic operation per chunk deallocated
// instead of a simple free.
if (endPos > 0) {
--endPos;
}
else {
endPos = size - 1;
endChunk = endChunk.prev;
endChunk.next = null;
}
} | java |
public T pop()
{
T val = beginChunk.values[beginPos];
beginChunk.values[beginPos] = null;
beginPos++;
if (beginPos == size) {
beginChunk = beginChunk.next;
beginChunk.prev = null;
beginPos = 0;
}
return val;
} | java |
private void rebuild()
{
pollact = null;
pollSize = pollers.size();
if (pollset != null) {
pollset.close();
}
pollset = context.poller(pollSize);
assert (pollset != null);
pollact = new SPoller[pollSize];
int itemNbr = 0;
for (SPoller poller : pollers) {
pollset.register(poller.item);
pollact[itemNbr] = poller;
itemNbr++;
}
dirty = false;
} | java |
public int addPoller(PollItem pollItem, IZLoopHandler handler, Object arg)
{
if (pollItem.getRawSocket() == null && pollItem.getSocket() == null) {
return -1;
}
SPoller poller = new SPoller(pollItem, handler, arg);
pollers.add(poller);
dirty = true;
if (verbose) {
System.out.printf(
"I: zloop: register %s poller (%s, %s)\n",
pollItem.getSocket() != null ? pollItem.getSocket().getType() : "RAW",
pollItem.getSocket(),
pollItem.getRawSocket());
}
return 0;
} | java |
public int removeTimer(Object arg)
{
Objects.requireNonNull(arg, "Argument has to be supplied");
// We cannot touch self->timers because we may be executing that
// from inside the poll loop. So, we hold the arg on the zombie
// list, and process that list when we're done executing timers.
zombies.add(arg);
if (verbose) {
System.out.printf("I: zloop: cancel timer\n");
}
return 0;
} | java |
public ANRWatchDog setANRListener(ANRListener listener) {
if (listener == null) {
_anrListener = DEFAULT_ANR_LISTENER;
} else {
_anrListener = listener;
}
return this;
} | java |
public ANRWatchDog setANRInterceptor(ANRInterceptor interceptor) {
if (interceptor == null) {
_anrInterceptor = DEFAULT_ANR_INTERCEPTOR;
} else {
_anrInterceptor = interceptor;
}
return this;
} | java |
public ANRWatchDog setInterruptionListener(InterruptionListener listener) {
if (listener == null) {
_interruptionListener = DEFAULT_INTERRUPTION_LISTENER;
} else {
_interruptionListener = listener;
}
return this;
} | java |
private static Type processTypeForDescendantLookup(Type type) {
if (type instanceof ParameterizedType) {
return ((ParameterizedType) type).getRawType();
} else {
return type;
}
} | java |
private static <T> Stream<T> generateStream(T seed, Predicate<? super T> hasNext, UnaryOperator<T> next) {
final Spliterator<T> spliterator = Spliterators.spliteratorUnknownSize(new Iterator<T>() {
private T last = seed;
@Override
public boolean hasNext() {
return hasNext.test(last);
}
@Override
public T next() {
final T current = last;
last = next.apply(last);
return current;
}
}, Spliterator.ORDERED);
return StreamSupport.stream(spliterator, false);
} | java |
private static Set<TsBeanModel> writeBeanAndParentsFieldSpecs(
Writer writer, Settings settings, TsModel model, Set<TsBeanModel> emittedSoFar, TsBeanModel bean) {
if (emittedSoFar.contains(bean)) {
return new HashSet<>();
}
final TsBeanModel parentBean = getBeanModelByType(model, bean.getParent());
final Set<TsBeanModel> emittedBeans = parentBean != null
? writeBeanAndParentsFieldSpecs(writer, settings, model, emittedSoFar, parentBean)
: new HashSet<TsBeanModel>();
final String parentClassName = parentBean != null
? getBeanModelClassName(parentBean) + "Fields"
: "Fields";
writer.writeIndentedLine("");
writer.writeIndentedLine(
"class " + getBeanModelClassName(bean) + "Fields extends " + parentClassName + " {");
writer.writeIndentedLine(
settings.indentString + "constructor(parent?: Fields, name?: string) { super(parent, name); }");
for (TsPropertyModel property : bean.getProperties()) {
writeBeanProperty(writer, settings, model, bean, property);
}
writer.writeIndentedLine("}");
emittedBeans.add(bean);
return emittedBeans;
} | java |
private static boolean isOriginalTsType(TsType type) {
if (type instanceof TsType.BasicType) {
TsType.BasicType basicType = (TsType.BasicType)type;
return !(basicType.name.equals("null") || basicType.name.equals("undefined"));
}
return true;
} | java |
private static TsType extractOriginalTsType(TsType type) {
if (type instanceof TsType.OptionalType) {
return extractOriginalTsType(((TsType.OptionalType)type).type);
}
if (type instanceof TsType.UnionType) {
TsType.UnionType union = (TsType.UnionType)type;
List<TsType> originalTypes = new ArrayList<>();
for (TsType curType : union.types) {
if (isOriginalTsType(curType)) {
originalTypes.add(curType);
}
}
return originalTypes.size() == 1
? extractOriginalTsType(originalTypes.get(0))
: type;
}
if (type instanceof TsType.BasicArrayType) {
return extractOriginalTsType(((TsType.BasicArrayType)type).elementType);
}
return type;
} | java |
public static int findUnlinked(int pos, int end, DBIDArrayIter ix, PointerHierarchyRepresentationBuilder builder) {
while(pos < end) {
if(!builder.isLinked(ix.seek(pos))) {
return pos;
}
++pos;
}
return -1;
} | java |
private DoubleObjPair<Polygon> buildHullsRecursively(Cluster<Model> clu, Hierarchy<Cluster<Model>> hier, Map<Object, DoubleObjPair<Polygon>> hulls, Relation<? extends NumberVector> coords) {
final DBIDs ids = clu.getIDs();
FilteredConvexHull2D hull = new FilteredConvexHull2D();
for(DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
hull.add(coords.get(iter).toArray());
}
double weight = ids.size();
if(hier != null) {
final int numc = hier.numChildren(clu);
if(numc > 0) {
for(It<Cluster<Model>> iter = hier.iterChildren(clu); iter.valid(); iter.advance()) {
final Cluster<Model> iclu = iter.get();
DoubleObjPair<Polygon> poly = hulls.get(iclu);
if(poly == null) {
poly = buildHullsRecursively(iclu, hier, hulls, coords);
}
// Add inner convex hull to outer convex hull.
for(ArrayListIter<double[]> vi = poly.second.iter(); vi.valid(); vi.advance()) {
hull.add(vi.get());
}
weight += poly.first / numc;
}
}
}
DoubleObjPair<Polygon> pair = new DoubleObjPair<>(weight, hull.getHull());
hulls.put(clu, pair);
return pair;
} | java |
public static final Color getColorForValue(double val) {
// Color positions
double[] pos = new double[] { 0.0, 0.6, 0.8, 1.0 };
// Colors at these positions
Color[] cols = new Color[] { new Color(0.0f, 0.0f, 0.0f, 0.6f), new Color(0.0f, 0.0f, 1.0f, 0.8f), new Color(1.0f, 0.0f, 0.0f, 0.9f), new Color(1.0f, 1.0f, 0.0f, 1.0f) };
assert (pos.length == cols.length);
if(val < pos[0]) {
val = pos[0];
}
// Linear interpolation:
for(int i = 1; i < pos.length; i++) {
if(val <= pos[i]) {
Color prev = cols[i - 1];
Color next = cols[i];
final double mix = (val - pos[i - 1]) / (pos[i] - pos[i - 1]);
final int r = (int) ((1 - mix) * prev.getRed() + mix * next.getRed());
final int g = (int) ((1 - mix) * prev.getGreen() + mix * next.getGreen());
final int b = (int) ((1 - mix) * prev.getBlue() + mix * next.getBlue());
final int a = (int) ((1 - mix) * prev.getAlpha() + mix * next.getAlpha());
Color col = new Color(r, g, b, a);
return col;
}
}
return cols[cols.length - 1];
} | java |
public static int showSaveDialog(SVGPlot plot, int width, int height) {
JFileChooser fc = new JFileChooser(new File("."));
fc.setDialogTitle(DEFAULT_TITLE);
// fc.setFileFilter(new ImageFilter());
SaveOptionsPanel optionsPanel = new SaveOptionsPanel(fc, width, height);
fc.setAccessory(optionsPanel);
int ret = fc.showSaveDialog(null);
if(ret == JFileChooser.APPROVE_OPTION) {
fc.setDialogTitle("Saving... Please wait.");
File file = fc.getSelectedFile();
String format = optionsPanel.getSelectedFormat();
width = optionsPanel.getSelectedWidth();
height = optionsPanel.getSelectedHeight();
if(format == null || AUTOMAGIC_FORMAT.equals(format)) {
format = guessFormat(file.getName());
}
try {
if(format == null) {
showError(fc, "Error saving image.", "File format not recognized.");
}
else if("jpeg".equals(format) || "jpg".equals(format)) {
float quality = optionsPanel.getJPEGQuality();
plot.saveAsJPEG(file, width, height, quality);
}
else if("png".equals(format)) {
plot.saveAsPNG(file, width, height);
}
else if("ps".equals(format)) {
plot.saveAsPS(file);
}
else if("eps".equals(format)) {
plot.saveAsEPS(file);
}
else if("pdf".equals(format)) {
plot.saveAsPDF(file);
}
else if("svg".equals(format)) {
plot.saveAsSVG(file);
}
else {
showError(fc, "Error saving image.", "Unsupported format: " + format);
}
}
catch(java.lang.IncompatibleClassChangeError e) {
showError(fc, "Error saving image.", "It seems that your Java version is incompatible with this version of Batik and Jpeg writing. Sorry.");
}
catch(ClassNotFoundException e) {
showError(fc, "Error saving image.", "A class was not found when saving this image. Maybe installing Apache FOP will help (for PDF, PS and EPS output).\n" + e.toString());
}
catch(TransformerFactoryConfigurationError | Exception e) {
LOG.exception(e);
showError(fc, "Error saving image.", e.toString());
}
}
else if(ret == JFileChooser.ERROR_OPTION) {
showError(fc, "Error in file dialog.", "Unknown Error.");
}
else if(ret == JFileChooser.CANCEL_OPTION) {
// do nothing - except return result
}
return ret;
} | java |
public static String guessFormat(String name) {
String ext = FileUtil.getFilenameExtension(name);
for(String format : FORMATS) {
if(format.equalsIgnoreCase(ext)) {
return ext;
}
}
return null;
} | java |
@SuppressWarnings("unchecked")
public static <F> FeatureVectorAdapter<F> featureVectorAdapter(FeatureVector<F> prototype) {
return (FeatureVectorAdapter<F>) FEATUREVECTORADAPTER;
} | java |
public static <A> int getIndexOfMaximum(A array, NumberArrayAdapter<?, A> adapter) throws IndexOutOfBoundsException {
final int size = adapter.size(array);
int index = 0;
double max = adapter.getDouble(array, 0);
for (int i = 1; i < size; i++) {
double val = adapter.getDouble(array, i);
if (val > max) {
max = val;
index = i;
}
}
return index;
} | java |
public byte[] asByteArray(NumberVector vector) {
final long[] longValueList = new long[dimensionality];
for(int dim = 0; dim < dimensionality; ++dim) {
final double minValue = minValues[dim];
final double maxValue = maxValues[dim];
double dimValue = vector.doubleValue(dim);
dimValue = (dimValue - minValue) / (maxValue - minValue);
longValueList[dim] = (long) (dimValue * (Long.MAX_VALUE));
}
final byte[] bytes = new byte[Long.SIZE * dimensionality * (Long.SIZE / Byte.SIZE)];
int shiftCounter = 0;
for(int i = 0; i < Long.SIZE; ++i) {
for(int dim = 0; dim < dimensionality; ++dim) {
long byteValue = longValueList[dim];
int localShift = shiftCounter % Byte.SIZE;
bytes[(bytes.length - 1) - (shiftCounter / Byte.SIZE)] |= ((byteValue >> i) & 0x01) << localShift;
shiftCounter++;
}
}
return bytes;
} | java |
public OutlierResult run(Relation<V> relation) {
SimilarityQuery<V> snnInstance = similarityFunction.instantiate(relation);
FiniteProgress progress = LOG.isVerbose() ? new FiniteProgress("Assigning Subspace Outlier Degree", relation.size(), LOG) : null;
WritableDoubleDataStore sod_scores = DataStoreUtil.makeDoubleStorage(relation.getDBIDs(), DataStoreFactory.HINT_STATIC);
WritableDataStore<SODModel> sod_models = models ? DataStoreUtil.makeStorage(relation.getDBIDs(), DataStoreFactory.HINT_STATIC, SODModel.class) : null;
DoubleMinMax minmax = new DoubleMinMax();
for(DBIDIter iter = relation.iterDBIDs(); iter.valid(); iter.advance()) {
DBIDs neighborhood = getNearestNeighbors(relation, snnInstance, iter);
double[] center;
long[] weightVector = null;
double sod = 0.;
if(neighborhood.size() > 0) {
center = Centroid.make(relation, neighborhood).getArrayRef();
// Note: per-dimension variances; no covariances.
double[] variances = computePerDimensionVariances(relation, center, neighborhood);
double expectationOfVariance = Mean.of(variances);
weightVector = BitsUtil.zero(variances.length);
for(int d = 0; d < variances.length; d++) {
if(variances[d] < alpha * expectationOfVariance) {
BitsUtil.setI(weightVector, d);
}
}
sod = subspaceOutlierDegree(relation.get(iter), center, weightVector);
}
else {
center = relation.get(iter).toArray();
}
if(sod_models != null) {
sod_models.put(iter, new SODModel(center, weightVector));
}
sod_scores.putDouble(iter, sod);
minmax.put(sod);
LOG.incrementProcessed(progress);
}
LOG.ensureCompleted(progress);
// combine results.
OutlierScoreMeta meta = new BasicOutlierScoreMeta(minmax.getMin(), minmax.getMax());
OutlierResult sodResult = new OutlierResult(meta, new MaterializedDoubleRelation("Subspace Outlier Degree", "sod-outlier", sod_scores, relation.getDBIDs()));
if(sod_models != null) {
sodResult.addChildResult(new MaterializedRelation<>("Subspace Outlier Model", "sod-outlier", new SimpleTypeInformation<>(SODModel.class), sod_models, relation.getDBIDs()));
}
return sodResult;
} | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.