code
stringlengths 73
34.1k
| label
stringclasses 1
value |
|---|---|
protected Object getSession(String id, int version, boolean isSessionAccess, boolean forceSessionRetrieval, Object xdCorrelator) {
if (isSessionAccess) {
if (version == -1) {
_store.refreshSession(id, xdCorrelator);
} else {
_store.refreshSession(id, version, xdCorrelator);
}
}
ISession iSession = getSessionFromStore(id, version, isSessionAccess, forceSessionRetrieval, xdCorrelator);
if (iSession != null) {
if (isSessionAccess) {
boolean stillValid = _store.checkSessionStillValid(iSession, iSession.getLastAccessedTime());
if (stillValid) {
iSession.incrementRefCount();
_sessionEventDispatcher.sessionAccessed(iSession);
} else {
iSession = null;
}
}
} else {
if (isSessionAccess) {
_sessionEventDispatcher.sessionAccessUnknownKey(id);
}
}
return iSession;
}
|
java
|
public static byte[] copyCredToken(byte[] credToken) {
if (credToken == null) {
return null;
}
final int LEN = credToken.length;
if (LEN == 0) {
return new byte[LEN];
}
byte[] newCredToken = new byte[LEN];
System.arraycopy(credToken, 0, newCredToken, 0, LEN);
return newCredToken;
}
|
java
|
public static X509Certificate[] copyCertChain(X509Certificate[] certChain) {
if (certChain == null) {
return null;
}
final int LEN = certChain.length;
if (LEN == 0) {
return new X509Certificate[LEN];
}
X509Certificate[] newCertChain = new X509Certificate[LEN];
System.arraycopy(certChain, 0, newCertChain, 0, LEN);
return newCertChain;
}
|
java
|
public void waitOnChains(long quiesceTimeout) {
ChannelFramework cf = ChannelFrameworkFactory.getChannelFramework();
int elapsedTime = 0;
if (waitingChainNames.size() > 0 && elapsedTime < quiesceTimeout) {
if (TraceComponent.isAnyTracingEnabled() && tc.isEventEnabled()) {
Tr.event(this, tc, "Waiting on " + waitingChainNames.size() + " chain(s) to stop");
}
Iterator<String> iter = waitingChainNames.iterator();
while (iter.hasNext()) {
if (!cf.isChainRunning(iter.next()))
iter.remove();
}
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
// ignore
}
elapsedTime += 1000;
}
}
|
java
|
static void closeLink(CommsConnection conn) {
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "closeLink", conn);
conn.setSchemaSet(null);
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "closeLink");
}
|
java
|
static byte[] receiveHandshake(CommsConnection conn, byte[] data) throws JMFException {
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "receiveHandshake", conn);
Coder coder = new Coder(data);
conn.setSchemaSet(makeSchemaIdSet(coder));
byte[] ids = makeSchemaIdList(JMFRegistry.instance.retrieveAll());
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "receiveHandshake");
return ids;
}
|
java
|
static void receiveSchemas(CommsConnection conn, byte[] data) throws JMFException {
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "receiveSchemas", conn);
final SchemaSet ids;
try {
ids = (SchemaSet)conn.getSchemaSet();
if (ids == null) {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(tc, "getSchemaSet() returned null for CommsConnection: " + conn);
throw new IllegalStateException("CommsConnection returned null SchemaSet");
}
} catch (SIConnectionDroppedException e) {
// No FFDC code needed - this is not an unexpected condition when a connection fails during start of a new conversation on the connection
throw new IllegalStateException("CommsConnection threw exception", e);
}
Coder coder = new Coder(data);
addSchemaDefinitions(ids, coder);
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "receiveSchemas");
}
|
java
|
static void sendSchemas(CommsConnection conn, JMFSchema[] schemas)
throws SIConnectionLostException, SIConnectionUnavailableException {
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "sendSchemas", conn);
// Check if the recepient has all the schemas needed to decode the message.
SchemaSet ids = (SchemaSet)conn.getSchemaSet();
if (ids == null) {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(tc, "getSchemaSet() returned null for CommsConnection: " + conn);
throw new IllegalStateException("CommsConnection returned null SchemaSet");
}
JMFSchema[] missing = new JMFSchema[schemas.length];
int count = 0;
for (int i = 0; i < schemas.length; i++) {
if (!ids.contains(schemas[i].getLongID()))
missing[count++] = schemas[i];
}
// Send any missing schemas
if (count > 0) {
conn.sendMFPSchema(makeSchemaDefinitionList(missing, count));
// Update the list
for (int i = 0; i < count; i++)
ids.add(missing[i].getLongID());
}
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "sendSchemas");
}
//take an encoded list of schema IDs and return the corresponding encoded list of schemas
static byte[] getEncodedSchemataByEncodedIDs(byte[] encodedSchemaIDs) {
//double check that there are ids to decode
if(encodedSchemaIDs != null && encodedSchemaIDs.length >= 8){
//work out how many there are
int numSchemas = encodedSchemaIDs.length / 8;
long[] schemaIDs = new long[numSchemas];
int offset = 0;
//decode each one
for(int i=0; i<numSchemas; i++){
schemaIDs[i] = ArrayUtil.readLong(encodedSchemaIDs, offset);
offset += 8;
}
//get the encoded schemas
return getEncodedSchemataBySchemaIDs(schemaIDs);
}
else{
return new byte[0];
}
}
//get an encoded list of schemas from a list of schema IDs
static byte[] getEncodedSchemataBySchemaIDs(long[] schemaIDs) {
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "getEncodedSchemataBySchemaIDs", schemaIDs);
JMFSchema[] temp = new JMFSchema[schemaIDs.length];
int j = 0;
//get each one from the registry
for (int i = 0; i < schemaIDs.length; i++) {
JMFSchema schema = JSRegistry.instance.retrieve(schemaIDs[i]);
if (schema == null) {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(tc, "Unable to retrieve message schema "+schemaIDs[i]);
MessageDecodeFailedException e = new MessageDecodeFailedException("No schema registered for schema id "+schemaIDs[i]);
FFDCFilter.processException(e, "com.ibm.ws.sib.mfp.impl.SchemaManager.getEncodedSchemasBySchemaIDs", "281");
}
else{
temp[j++] = schema;
}
}
JMFSchema[] found = new JMFSchema[j];
System.arraycopy(temp,0,found,0,j);
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "getEncodedSchemataBySchemaIDs");
//encode the list and return it
return makeSchemaDefinitionList(found, j);
}
/*
* Helper methods
*/
// Create the encoded list of schema ids
private static byte[] makeSchemaIdList(JMFSchema[] schemas) {
Coder coder = new Coder(ArrayUtil.LONG_SIZE * schemas.length, schemas.length);
for (int i = 0; i < coder.count; i++) {
ArrayUtil.writeLong(coder.buffer, coder.offset, schemas[i].getID());
coder.offset += ArrayUtil.LONG_SIZE;
}
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
SibTr.debug(tc, "Encoding " + coder.count + " schema ids for handshake");
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
SibTr.bytes(tc, coder.buffer, Coder.HDR_LENGTH);
return coder.buffer;
}
// Create an initial Set of incoming schema ids
private static SchemaSet makeSchemaIdSet(Coder coder) {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
SibTr.debug(tc, "Decoding " + coder.count + "schema ids from handshake");
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
SibTr.bytes(tc, coder.buffer, Coder.HDR_LENGTH);
SchemaSet result = new SchemaSet();
for (int i = 0; i < coder.count; i++) {
long id = ArrayUtil.readLong(coder.buffer, coder.offset);
coder.offset += ArrayUtil.LONG_SIZE;
result.add(Long.valueOf(id));
}
return result;
}
// Create the encoded list of schema definitions
private static byte[] makeSchemaDefinitionList(JMFSchema[] schemas, int count) {
// Calculate the buffer size needed to encode all the schema defintions
int length = 0;
for (int i = 0; i < count; i++)
length += ArrayUtil.INT_SIZE + schemas[i].toByteArray().length;
// And encode them
Coder coder = new Coder(length, count);
for (int i = 0; i < coder.count; i++) {
byte[] b = schemas[i].toByteArray();
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(tc, " Encoded Schema "+ debugSchema(schemas[i]) + " for transmission");
ArrayUtil.writeInt(coder.buffer, coder.offset, b.length);
coder.offset += ArrayUtil.INT_SIZE;
System.arraycopy(b, 0, coder.buffer, coder.offset, b.length);
coder.offset += b.length;
}
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
SibTr.debug(tc, "Encoding " + coder.count + "new schema definitions");
return coder.buffer;
}
|
java
|
private static void addSchemaDefinitions(SchemaSet ids, Coder coder) throws JMFException {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
SibTr.debug(tc, "Decoding " + coder.count + "new schema definitions");
// Decode the new schemas and add them to the registry and the list of known
// schemas for this connection.
for (int i = 0; i < coder.count; i++) {
int length = ArrayUtil.readInt(coder.buffer, coder.offset);
coder.offset += ArrayUtil.INT_SIZE;
JMFSchema schema = JMFRegistry.instance.createJMFSchema(coder.buffer, coder.offset, length);
JMFRegistry.instance.register(schema);
ids.add(schema.getLongID());
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(tc, " Added Schema "+ debugSchema(schema));
coder.offset += length;
}
}
|
java
|
public void cleanUpSubject() {
if (temporarySubject != null) {
AccessController.doPrivileged(new PrivilegedAction<Object>() {
@Override
public Object run() {
removeSubjectPrincipals();
removeSubjectPublicCredentials();
removeSubjectPrivateCredentials();
return null;
}
});
}
temporarySubject = null;
}
|
java
|
protected void payloadWritten(int payloadSize)
{
if (tc.isEntryEnabled()) Tr.entry(tc, "payloadWritten", new Object[] {this, new Integer(payloadSize)});
// Track the unwritten payload decrease directly. We take no account for this classes header
// values in this figure. The total payload remains unchanged since we are not removing the
// corrisponding payload, just writing it to the underlying recovery log.
_unwrittenDataSize -= payloadSize;
// When writing existing payload, if the resulting unwritten data size has gone back down to
// zero then there will be no further need to account for the unwritten data header.
// When we pass on this payload adjustment we must account for the header size.
if (_unwrittenDataSize == 0)
{
_recUnit.payloadWritten(payloadSize + HEADER_SIZE);
}
else
{
_recUnit.payloadWritten(payloadSize);
}
if (tc.isDebugEnabled()) Tr.debug(tc, "unwrittenDataSize = " + _unwrittenDataSize + " totalDataSize = " + _totalDataSize);
if (tc.isEntryEnabled()) Tr.exit(tc, "payloadWritten");
}
|
java
|
protected void payloadDeleted(int totalPayloadSize, int unwrittenPayloadSize)
{
if (tc.isEntryEnabled()) Tr.entry(tc, "payloadDeleted", new Object[] {this, new Integer(totalPayloadSize), new Integer(unwrittenPayloadSize)});
// Track the payload decreases directly. We take no account for this classes header values
// in these figures.
_totalDataSize -= totalPayloadSize;
_unwrittenDataSize -= unwrittenPayloadSize;
// When removing existing payload, if the resulting unwritten data size has gone back down to
// zero then there will be no further need to account for the unwritten data header.
// When we pass on this payload adjustment we must account for the header size.
if (_unwrittenDataSize == 0 && (unwrittenPayloadSize != 0))
{
unwrittenPayloadSize += HEADER_SIZE;
}
// When removing existing payload, if the resulting written data size has gone back down to
// zero then there will be no further need to account for the written data header.
// When we pass on this payload adjustment we must account for the header size.
if (_totalDataSize == 0)
{
totalPayloadSize += HEADER_SIZE;
}
_recUnit.payloadDeleted(totalPayloadSize, unwrittenPayloadSize);
if (tc.isDebugEnabled()) Tr.debug(tc, "unwrittenDataSize = " + _unwrittenDataSize + " totalDataSize = " + _totalDataSize);
if (tc.isEntryEnabled()) Tr.exit(tc, "payloadDeleted");
}
|
java
|
public BeanId getBeanId() {
if (ivBeanId == null) {
ivBeanId = new BeanId(ivBMD.j2eeName, null, false);
}
return ivBeanId;
}
|
java
|
protected String promptForText(com.ibm.ws.security.audit.reader.utils.ConsoleWrapper stdin, PrintStream stdout) {
return promptForText(stdin, stdout,
"encode.enterText", "encode.reenterText",
"encode.readError", "encode.entriesDidNotMatch");
}
|
java
|
public static String getPropertyOrNull(String name) {
try {
return AccessController.doPrivileged(new SystemPropertyAction(name));
} catch (SecurityException ex) {
LOG.log(Level.FINE, "SecurityException raised getting property " + name, ex);
return null;
}
}
|
java
|
public MatchTarget duplicate()
{
try
{
return (MatchTarget) clone();
}
catch (CloneNotSupportedException e)
{
// No FFDC Code Needed.
// FFDC driven by wrapper class.
FFDC.processException(cclass,
"com.ibm.ws.sib.matchspace.MatchTarget.duplicate",
e,
"1:112:1.15");
// should not happen
throw new IllegalStateException();
}
}
|
java
|
@Override
public EJBBinding createBindingObject(HomeRecord hr,
HomeWrapperSet homeSet,
String interfaceName,
int interfaceIndex,
boolean local) {
return new EJBBinding(hr, interfaceName, interfaceIndex, local);
}
|
java
|
@Override
public EJBBinding createJavaBindingObject(HomeRecord hr,
HomeWrapperSet homeSet,
String interfaceName,
int interfaceIndex,
boolean local,
EJBBinding bindingObject) {
return bindingObject;
}
|
java
|
@Override
protected Class<?> loadClass(String className, boolean resolve)
throws ClassNotFoundException {
Class<?> loadedClass = null;
synchronized (this) {
loadedClass = findLoadedClass(className);
if (loadedClass == null) {
int index = className.lastIndexOf('.');
String packageName = index > 0 ? className.substring(0, index) : "";
if (packageList.contains(packageName)) {
try {
// first check our classpath
loadedClass = findClass(className);
} catch (ClassNotFoundException cnfe) {
// ignore this since we'll try the parent next
}
}
}
}
if (null == loadedClass) {
// then the parent classpath
loadedClass = super.loadClass(className, resolve);
}
// The resolve parameter is a legacy parameter that is effectively
// never used as of JDK 1.1 (see footnote 1 of section 5.3.2 of the 2nd
// edition of the JVM specification). The only caller of this method is
// is java.lang.ClassLoader.loadClass(String), and that method always
// passes false, so we ignore the parameter.
return loadedClass;
}
|
java
|
public DERObject getConvertedValue(
DERObjectIdentifier oid,
String value)
{
if (value.length() != 0 && value.charAt(0) == '#')
{
try
{
return convertHexEncoded(value, 1);
}
catch (IOException e)
{
throw new RuntimeException("can't recode value for oid " + oid.getId(), e);
}
}
else if (oid.equals(X509Name.EmailAddress))
{
return new DERIA5String(value);
}
else if (canBePrintable(value))
{
return new DERPrintableString(value);
}
else if (canBeUTF8(value))
{
return new DERUTF8String(value);
}
return new DERBMPString(value);
}
|
java
|
@Override
public byte[] transform(ClassLoader loader,
String className,
Class<?> classBeingRedefined,
ProtectionDomain protectionDomain,
byte[] classfileBuffer) throws IllegalClassFormatException {
// Skip over anything on the bootstrap loader and some VM
// internal classes (like those in support of reflection)
if ((loader == null && !includeBootstrap) || probeManagerImpl.isExcludedClass(className)) {
return null;
}
// If this is a probe candidate, hook the static initializer
if (probeManagerImpl.isProbeCandidate(className)) {
return transformCandidate(classfileBuffer);
}
return null;
}
|
java
|
public synchronized void setStoreFileSize(long newMinimumStoreFileSize
, long newMaximumStoreFileSize)
throws ObjectManagerException
{
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.entry(this,
cclass,
"setStoreFileSize",
new Object[] { new Long(newMinimumStoreFileSize), new Long(newMaximumStoreFileSize) }
);
// Synchronized so we have locked out flush();
if (newMinimumStoreFileSize > newMaximumStoreFileSize) {
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this, cclass
, "setStoreFileSize"
);
throw new IllegalArgumentException(newMinimumStoreFileSize + ">" + newMaximumStoreFileSize);
} // if (newStoreFileSize...
// Check that the new MaximumStoreFileSize is still bigger than the existing
// contents if the ObjectStore.
if (newMaximumStoreFileSize < storeFileSizeUsed) {
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this, cclass
, "setStoreFileSize"
, new Object[] { new Long(newMaximumStoreFileSize), new Long(storeFileSizeAllocated), new Long(storeFileSizeUsed) }
);
throw new StoreFileSizeTooSmallException(this
, newMaximumStoreFileSize
, storeFileSizeAllocated
, storeFileSizeUsed);
} // if (newStoreFileSize...
// If we are expanding the minimum file size grab the disk space now.
// If we fail before storing the new administered values the space will be release next time
// we open().
if (newMinimumStoreFileSize > storeFileSizeAllocated) {
try {
setStoreFileSizeInternalWithException(newMinimumStoreFileSize);
} catch (java.io.IOException exception)
{
// No FFDC Code Needed.
ObjectManager.ffdc.processException(this, cclass, "setStoreFileSize", exception, "1:349:1.57");
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this, cclass, "setStoreFileSize");
throw new PermanentIOException(this, exception);
}
} // if ( newMinimumStoreFileSize > storeFileSizeAllocated).
minimumStoreFileSize = newMinimumStoreFileSize;
maximumStoreFileSize = newMaximumStoreFileSize;
writeHeader();
force();
setAllocationAllowed();
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this,
cclass,
"setStoreFileSize");
}
|
java
|
public synchronized void setCachedManagedObjectsSize(int cachedManagedObjectsSize)
throws ObjectManagerException
{
this.cachedManagedObjectsSize = cachedManagedObjectsSize;
cachedManagedObjects = new java.lang.ref.SoftReference[cachedManagedObjectsSize];
writeHeader();
force();
}
|
java
|
protected void setAllocationAllowed()
throws ObjectManagerException {
final String methodName = "setAllocationAllowed";
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.entry(this, cclass, methodName, new Object[] { new Long(storeFileSizeAllocated),
new Long(storeFileSizeUsed) });
// Users of the store should have reserved space in the store for Objects that are allocated before
// they finally add them to the store. We have already reserved enough space for a complete replacement
// of the directory which also implies there is already enough space to delete any number of
// ManagedObjects in the store.
// Calculate how much file space we need to continue storing ManagedObjects.
// We make the pessimistic assumption that the entire contents of the log might need to be written
// to this ObjectStore.
// We have to take into account any pending request to increase the size of the log file, because this
// would allow applications to over commit the store if they used the extra log space when it did not
// exist in the Object Store.
long storeFileSizeRequired = storeFileSizeAllocated;
if (simulateFullReservedSize > 0) {
// We are simulating a a full filesystem.
allocationAllowed = false;
reservationThreshold = 0;
}
else
{
// do we have enough space to accomdate the whole log in the store plus reserved space in the store
long currentReservedSize = reservedSize.get();
long pesimisticSpaceRequired = Math.max(objectManagerState.logOutput.getLogFileSize(),
objectManagerState.logOutput.getLogFileSizeRequested())
+ directoryReservedSize
+ currentReservedSize;
long largestFreeSpace = 0;
if (freeSpaceByLength.size() > 0) {
largestFreeSpace = ((FreeSpace) freeSpaceByLength.last()).length;
}
if (pesimisticSpaceRequired <= largestFreeSpace) {
// we can at least fit the whole log plus reserved space in the largest free space entry
// so we are ok
allocationAllowed = true;
}
else
{
storeFileSizeRequired = storeFileSizeUsed +
pesimisticSpaceRequired;
// See if we need to change the size of the store file.
if (storeFileSizeRequired <= storeFileSizeAllocated) {
// we have allocated more than is required, its just not in the free space map yet
allocationAllowed = true;
} else if (storeFileSizeRequired <= maximumStoreFileSize) {
// we need more than we have allocated, and that is less than the maximum allowed, try grow to the required...
allocationAllowed = setStoreFileSizeInternal(storeFileSizeRequired);
} else {
// We need greater than the maximum allowed, make sure we have all the space we are
// allowed to have.
if (storeFileSizeAllocated < maximumStoreFileSize)
setStoreFileSizeInternal(maximumStoreFileSize);
// TODO We could still allow allocation until we really are full when the reservation scheme will catch it!
allocationAllowed = false;
// Request a checkpoint, see if we can clear some space.
numberOfStoreFullCheckpointsTriggered++;
objectManagerState.requestCheckpoint(persistent);
} // if ( storeFileSizeRequired...
reservationThreshold = storeFileSizeAllocated - storeFileSizeUsed - directoryReservedSize;
reservationCheckpointThreshold = Math.min(reservationThreshold,
currentReservedSize + reservationCheckpointMaximum);
}
}
// Release at least one blocked reservation request.
if (reservationPacing) {
synchronized (reservationPacingLock) {
reservationPacing = false;
reservationPacingLock.notify();
} // synchronized (reservationPacingLock).
}
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this,
cclass,
methodName,
new Object[] { new Boolean(allocationAllowed), new Long(storeFileSizeRequired) });
}
|
java
|
public final void reserve(int deltaSize, boolean paced)
throws ObjectManagerException
{
final String methodName = "reserve";
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.entry(this,
cclass,
methodName,
new Object[] { new Integer(deltaSize), new Boolean(paced) });
long newReservedSize = reservedSize.addAndGet(deltaSize);
if (newReservedSize > reservationCheckpointThreshold) {
numberOfReservationCheckpointsTriggered++;
objectManagerState.requestCheckpoint(persistent);
// Pacing: slow paced requests.
// ----------------------------
if (paced) {
synchronized (reservationPacingLock) {
// TODO not during restart!
// TODO Need to release all waiters at shutdown.
while (newReservedSize > reservationCheckpointThreshold
&& reservationPacing
&& paced) {
if (Tracing.isAnyTracingEnabled() && trace.isDebugEnabled())
trace.debug(this, cclass, methodName, new Object[] { "wait:728",
new Long(newReservedSize),
new Long(reservationCheckpointThreshold),
new Long(reservationThreshold),
new Boolean(reservationPacing) });
try {
newReservedSize = reservedSize.addAndGet(-deltaSize);
reservationPacingLock.wait();
} catch (InterruptedException exception) {
// No FFDC Code Needed.
ObjectManager.ffdc.processException(this, cclass, methodName, exception, "1:739:1.57");
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this, cclass, methodName, exception);
throw new UnexpectedExceptionException(this, exception);
} finally {
newReservedSize = reservedSize.addAndGet(deltaSize);
} // try...
} // while (newReservedSize > reservationCheckpointThreshold...
// If we have a big enough backlog of reserved space in the store,
// make subsequent paced requests also wait,
// except during restart or shutdown.
if (newReservedSize > reservationCheckpointThreshold
&& (objectManagerState.state == ObjectManagerState.stateColdStarted
|| objectManagerState.state == ObjectManagerState.stateWarmStarted)) {
reservationPacing = true;
} else {
// Allow at least one more waiter to proceed.
reservationPacing = false;
reservationPacingLock.notify();
} // if (newReservedSize...
} // synchronized (reservationPacingLock).
} // if (paced)..
// Expand the file size if necessary.
// ----------------------------------
if (newReservedSize > reservationThreshold) {
synchronized (this) {
// Try extending the file.
// TODO setAllocationAllowed() will release another paced reservation.
setAllocationAllowed();
if (newReservedSize > (storeFileSizeAllocated - storeFileSizeUsed - directoryReservedSize)) {
// Suppress storeFull exceptions during recovery.
// Also suppress the exception if we are releasing space by using a negative delta size.
// During commit surplus space is released into the store. If the directory depth has
// increased meanwhile this may mean that there is still insufficient space in the store to allow
// further reservation requests, but we still allow negative requests.
if (objectManagerState.getObjectManagerStateState() == ObjectManagerState.stateReplayingLog
|| deltaSize <= 0) {
if (Tracing.isAnyTracingEnabled() && trace.isDebugEnabled())
trace.debug(this, cclass, methodName, new Object[] { "Objectstorefull exception supressed:783",
new Long(newReservedSize) });
} else {
// We can't make the reservation so take it back.
reservedSize.addAndGet(-deltaSize);
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this, cclass, methodName, new Object[] { new Long(newReservedSize) });
throw new ObjectStoreFullException(this, null); // TODO objectToStore
} // if (objectManagerState...
} // if (reservedSize > (storeFileSizeAllocated - storeFileSizeUsed)).
} // synchronized (this).
} // if (newReservedSize > reservationThreshold).
} else if (reservationPacing) {
// We are below the limit so release any paced reservations.
synchronized (reservationPacingLock) {
pacedReservationsReleased++;
reservationPacing = false;
reservationPacingLock.notify();
} // synchronized (reservationPacingLock).
} // if (newReservedSize > reservationCheckpointThreshold).
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this, cclass, methodName);
}
|
java
|
public void simulateFull(boolean isFull)
throws ObjectManagerException {
final String methodName = "simulateFull";
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.entry(this, cclass, methodName, new Object[] { new Boolean(isFull) });
if (isFull) {
// Clear as much space as we can.
objectManagerState.waitForCheckpoint(true);
// Reserve all of the available space.
synchronized (this) {
long available = storeFileSizeAllocated - storeFileSizeUsed - directoryReservedSize - reservedSize.get();
long newReservedSize = reservedSize.addAndGet((int) available);
simulateFullReservedSize = simulateFullReservedSize + available;
if (Tracing.isAnyTracingEnabled() && trace.isDebugEnabled())
trace.debug(this, cclass, methodName, new Object[] { "isFull:834",
new Long(available),
new Long(newReservedSize),
new Long(simulateFullReservedSize) });
} // synchronized (this).
} else {
synchronized (this) {
reservedSize.addAndGet((int) -simulateFullReservedSize);
simulateFullReservedSize = 0;
} // synchronized (this).
objectManagerState.waitForCheckpoint(true);
} // if (isFull).
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this, cclass, methodName, new Object[] { new Long(simulateFullReservedSize) });
}
|
java
|
public synchronized void flush()
throws ObjectManagerException
{
final String methodName = "flush";
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.entry(this,
cclass,
methodName);
if (storeStrategy == STRATEGY_SAVE_ONLY_ON_SHUTDOWN) {
// Since we only flush on shutdown make sure we are shutting down, otherwise just return.
if (objectManagerState.getObjectManagerStateState() != ObjectManagerState.stateStopped) {
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this,
cclass,
methodName);
return;
}
// If there are any active transactions it is not safe to flush.
if (objectManagerState.getTransactionIterator().hasNext()) {
trace.warning(this,
cclass,
methodName,
"ObjectStore_UnsafeToFlush",
this
);
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this,
cclass,
methodName);
return;
}
} // if ( storeStrategy == STRATEGY_SAVE_ONLY_ON_SHUTDOWN ).
// Debug freespace list
// if (Tracing.isAnyTracingEnabled() && trace.isDebugEnabled()) trace.debug(this, cclass, methodName, "START of Flush: newFreeSpace.size() = "+newFreeSpace.size());
// if (Tracing.isAnyTracingEnabled() && trace.isDebugEnabled()) trace.debug(this, cclass, methodName, "START of Flush: freeSpaceByLength.size() = "+freeSpaceByLength.size());
// We are single threaded through flush, we are now about to make
// updates to the directory and free space map, which must be consistent.
// Also reserve space in the file for the directory updates and free space map.
updateDirectory();
// Release any space into the free space pool.
if (Tracing.isAnyTracingEnabled() && trace.isDebugEnabled())
trace.debug(this,
cclass,
methodName,
"Release new free space");
// Free up space that was release by directory changes, replaced or deleted Objects.
freeAllocatedSpace(newFreeSpace);
if (gatherStatistics) {
long now = System.currentTimeMillis();
releasingEntrySpaceMilliseconds += now - lastFlushMilliseconds;
lastFlushMilliseconds = now;
} // if (gatherStatistics).
// Write the modified parts of the directory to disk.
directory.write();
if (gatherStatistics) {
long now = System.currentTimeMillis();
directoryWriteMilliseconds += now - lastFlushMilliseconds;
lastFlushMilliseconds = now;
} // if (gatherStatistics).
// Write the free space map to disk.
writeFreeSpace();
// Force the data to disk then force the header, if we can make the assumption that
// these writes go to disk first, this first force is unnecessary.
if (storeStrategy == STRATEGY_KEEP_ALWAYS)
force();
writeHeader();
if (storeStrategy == STRATEGY_KEEP_ALWAYS)
force();
// Debug freespace list
// if (Tracing.isAnyTracingEnabled() && trace.isDebugEnabled()) trace.debug(this, cclass, methodName, "END of Flush: newFreeSpace.size() = "+newFreeSpace.size());
// if (Tracing.isAnyTracingEnabled() && trace.isDebugEnabled()) trace.debug(this, cclass, methodName, "END of Flush: freeSpaceByLength.size() = "+freeSpaceByLength.size());
// Defect 573905
// Reset the newFreeSpace after we have finished with it to release the
// memory it uses while we are not flushing.
newFreeSpace.clear();
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this,
cclass,
methodName);
}
|
java
|
private void updateDirectory()
throws ObjectManagerException
{
final String methodName = "updateDirectory";
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.entry(this,
cclass,
methodName);
// TODO See below for how to deal with out of disk space conditions.
// // Keep the current disk location of the directory.
// long currentDirectoryRootAddress = ((Directory.Node)directory.root).byteAddress;
// long currentDirectoryRootLength = ((Directory.Node)directory.root).length;
// long currentStoreFileSizeUsed = storeFileSizeUsed;
//
// long newFreeSpaceLength;
// long newFreeSpaceByteAddress;
// try {
// Capture the ManagedObjects to write and delete. The checkpoint manager has does not
// call flush until all transactions have completd their checkpoint activity.
// If we have not seen any chekpoint updates truncate the write and delete sets now.
if (checkpointManagedObjectsToWrite == null)
captureCheckpointManagedObjects();
// Defect 573905
// Free space clear moved to end of flush.
// The reserved space we will release once the flush has written all it needs to the disk.
checkpointReleaseSize = 0;
if (gatherStatistics) // Start the clock.
lastFlushMilliseconds = System.currentTimeMillis();
// 1) Update the directory.
// ------------------------
// New write updates are not updating checkpointManagedObjectsToWrite, we have a safe set.
for (java.util.Iterator managedObjectIterator = checkpointManagedObjectsToWrite.values().iterator(); managedObjectIterator.hasNext();) {
ManagedObject managedObject = (ManagedObject) managedObjectIterator.next();
cache(managedObject);
write(managedObject);
} // For ... checkpointManagedObjectsToWrite.
checkpointManagedObjectsToWrite = null;
if (gatherStatistics) {
long now = System.currentTimeMillis();
writingMilliseconds += now - lastFlushMilliseconds;
lastFlushMilliseconds = now;
} // if (gatherStatistics).
// Remove Objects from the directory and make a note of any free space that we can release
// once we have allocated all of the new space we will need.
for (java.util.Iterator tokenIterator = checkpointTokensToDelete.values().iterator(); tokenIterator.hasNext();) {
Token token = (Token) tokenIterator.next();
// Delete the object by removing it from the directory, we dont touch
// the data on the disk.
Directory.StoreArea storeArea = (Directory.StoreArea) directory.removeEntry(new Long(token.storedObjectIdentifier));
// Did we ever write this?
if (storeArea != null)
newFreeSpace.add(storeArea);
} // For ... checkpointTokensToDelete.
checkpointTokensToDelete = null;
if (gatherStatistics) {
long now = System.currentTimeMillis();
removingEntriesMilliseconds += now - lastFlushMilliseconds;
lastFlushMilliseconds = now;
} // if (gatherStatistics).
// 2) Allocate new space for the directory and the free space map.
// ---------------------------------------------------------------
if (Tracing.isAnyTracingEnabled() && trace.isDebugEnabled())
trace.debug(this,
cclass,
methodName,
"Reserve space for the direcory and new free space map");
// All changes have been made to the directory, now reserve space in the file for any updates
// and release any space which will become free.
directory.reserveSpace();
if (gatherStatistics) {
long now = System.currentTimeMillis();
allocatingEntrySpaceMilliseconds += now - lastFlushMilliseconds;
lastFlushMilliseconds = now;
} // if (gatherStatistics).
if (freeSpaceStoreArea != null && freeSpaceStoreArea.length != 0)
newFreeSpace.add(freeSpaceStoreArea);
// Make a worst case assumption that no new free space will merge with the
// existing free space.
long newFreeSpaceLength = (freeSpaceByLength.size() + newFreeSpace.size()) * freeSpaceEntryLength;
FreeSpace newFreeSpaceArea = allocateSpace(newFreeSpaceLength);
//TODO The following code segment will restore the ObjectStore to the state ir was in before we ran out
//TODO of disk space, however the serialized bytes will now have been released from the ManagedObjects.
// } catch (ObjectStoreFullException objectStoreFullException) {
// // No FFDC code needed.
// if (Tracing.isAnyTracingEnabled() && trace.isEventEnabled())
// trace.event(this,
// cclass,
// methodName,
// objectStoreFullException);
//
// // Not enough space was available to make the update.
// // Perhaps insufficient reservation() was done?
// // Put things back the way they were.
// managedObjectsToWrite.putAll(checkpointManagedObjectsToWrite);
// tokensToDelete.putAll(checkpointTokensToDelete);
// // Revert to the disk copy of the directory, if there is none
// // the length will be zero so we will create a new empty directory.
// directory = readDirectory(directory.getMinimumNodeSize(),
// currentDirectoryRootAddress,
// currentDirectoryRootLength);
//
// storeFileSizeUsed = currentStoreFileSizeUsed;
//
// if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
// trace.exit(this,
// cclass,
// methodName,
// new Object[]{objectStoreFullException});
// throw objectStoreFullException;
// } // try...
// All space allocation is now done.
freeSpaceStoreArea = directory.makeStoreArea(freeSpaceIdentifier,
newFreeSpaceArea.address,
newFreeSpaceArea.length);
// Adjust the space we need to safely remove entries from the directory.
// The worstCaseDirectorySpace is an over estimate of the space needed so we might
// prematurely stop allocations.
directoryReservedSize = directory.spaceRequired();
// Give back all space that was previously reserved, but which has now been written.
reservedSize.addAndGet(-checkpointReleaseSize);
// See if the store is too now full to allow further ManagedObject allocation.
// Also set the reservation thresholds.
setAllocationAllowed();
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this,
cclass,
methodName);
}
|
java
|
FreeSpace allocateSpace(long lengthRequired)
throws ObjectManagerException
{
final String methodName = "allocateSpace";
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.entry(this,
cclass,
methodName,
new Object[] { new Long(lengthRequired) });
// Find some free space, first search the free space map for the smallest
// entry that will accommodate the request.
FreeSpace freeSpace;
java.util.SortedSet tailSet = freeSpaceByLength.tailSet(new FreeSpace(0, lengthRequired));
if (!tailSet.isEmpty()) {
// Use free space in the body of file.
freeSpace = (FreeSpace) tailSet.first();
tailSet.remove(freeSpace);
long remainingLength = freeSpace.length - lengthRequired;
if (remainingLength < minimumFreeSpaceEntrySize) {
// All of this slot used up, also remove it from the address map.
if (freeSpace.prev != null) {
freeSpace.prev.next = freeSpace.next;
}
else {
freeSpaceByAddressHead = freeSpace.next;
}
if (freeSpace.next != null) {
freeSpace.next.prev = freeSpace.prev;
}
// Return without any links
freeSpace.prev = freeSpace.next = null;
// Debug freespace list
// if (Tracing.isAnyTracingEnabled() && trace.isDebugEnabled()) trace.debug(this, cclass, methodName, "REMOVE from freespace list");
} else {
// Partially used, reduce the length, but leave it in place in the address Map.
// Allocate the end portion of the space found to a new FreeSpace area and
// add the remaining piece back into the length map, where its key will have changed.
freeSpace.length = remainingLength;
freeSpaceByLength.add(freeSpace);
freeSpace = new FreeSpace(freeSpace.address + remainingLength, lengthRequired);
} // if (remainingLength == 0).
} else {
// Add to end of file.
freeSpace = new FreeSpace(storeFileSizeUsed, lengthRequired);
long newStoreFileSizeUsed = storeFileSizeUsed + lengthRequired;
// Check to see if we need to extended the file.
if (newStoreFileSizeUsed > storeFileSizeAllocated) {
storeFileExtendedDuringAllocation++;
// This should not occur if we are using STRATEGY_KEEP_ALWAYS because we
// have already allocated enough space to store all the objects currently
// in the log.
if (storeStrategy == STRATEGY_KEEP_ALWAYS
&& (objectManagerState.logFileType == ObjectManager.LOG_FILE_TYPE_FILE
|| objectManagerState.logFileType == ObjectManager.LOG_FILE_TYPE_CLEAR)) {
ObjectManager.ffdc.processException(this,
cclass,
methodName,
new Exception("Extended allocated file"),
"1:1437:1.57",
new Object[] { new Long(newStoreFileSizeUsed), new Long(storeFileSizeAllocated) });
if (Tracing.isAnyTracingEnabled() && trace.isDebugEnabled())
trace.debug(this, cclass, methodName, new Object[] { "STRATEGY_KEEP_ALWAYS:1440",
new Long(newStoreFileSizeUsed),
new Long(storeFileSizeAllocated) });
} // if ( objectManagerState.logFileType...
if (newStoreFileSizeUsed <= maximumStoreFileSize
&& setStoreFileSizeInternal(newStoreFileSizeUsed)) {
} else {
allocationAllowed = false;
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this,
cclass,
methodName,
new Object[] { "ObjectStoreFull" });
throw new ObjectStoreFullException(this,
null);
}
} // if (newStoreFileSizeUsed > storeFileSizeAllocated).
storeFileSizeUsed = newStoreFileSizeUsed;
} // if (!tailMap.isEmpty()).
if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled())
trace.exit(this,
cclass,
methodName,
new Object[] { freeSpace });
return freeSpace;
}
|
java
|
public Map<String, ProvisioningFeatureDefinition> getFeatureDefinitions(String productName) {
if (productName.equals(CORE_PRODUCT_NAME)) {
return getCoreProductFeatureDefinitions();
} else if (productName.equals(USR_PRODUCT_EXT_NAME)) {
return getUsrProductFeatureDefinitions();
} else {
return getProductExtFeatureDefinitions(productName);
}
}
|
java
|
public Map<String, ProvisioningFeatureDefinition> getCoreFeatureDefinitionsExceptPlatform() {
Map<String, ProvisioningFeatureDefinition> features = new TreeMap<String, ProvisioningFeatureDefinition>();
File featureDir = getCoreFeatureDir();
//the feature directory may not exist if the packaged server had no features installed when minified
if (!featureDir.isDirectory() && !featureDir.mkdir()) {
throw new FeatureToolException("Unable to find or create feature directory: " + featureDir,
MessageFormat.format(NLS.messages.getString("tool.feature.dir.not.found"), featureDir),
null,
ReturnCode.MISSING_CONTENT);
}
File[] manifestFiles = featureDir.listFiles(MFFilter);
if (manifestFiles != null) {
for (File file : manifestFiles) {
try {
ProvisioningFeatureDefinition fd = new SubsystemFeatureDefinitionImpl(ExtensionConstants.CORE_EXTENSION, file);
if (fd.isSupportedFeatureVersion()) {
// using symbolic name because gets compared to FeatureResource symbolic name
features.put(fd.getSymbolicName(), fd);
}
} catch (IOException e) {
// TODO: PROPER NLS MESSAGE
throw new FeatureToolException("Unable to read core feature manifest: " + file,
(String) null,
e,
ReturnCode.BAD_FEATURE_DEFINITION);
}
}
}
return features;
}
|
java
|
private Map<String, ProvisioningFeatureDefinition> getCoreProductFeatureDefinitions() {
Map<String, ProvisioningFeatureDefinition> features = new TreeMap<String, ProvisioningFeatureDefinition>();
File featureDir = getCoreFeatureDir();
//the feature directory may not exist if the packaged server had no features installed when minified
if (!featureDir.isDirectory() && !featureDir.mkdir()) {
throw new FeatureToolException("Unable to find or create feature directory: " + featureDir,
MessageFormat.format(NLS.messages.getString("tool.feature.dir.not.found"), featureDir),
null,
ReturnCode.MISSING_CONTENT);
}
File platformDir = getCorePlatformDir();
File[] manifestFiles = featureDir.listFiles(MFFilter);
if (manifestFiles != null) {
for (File file : manifestFiles) {
try {
ProvisioningFeatureDefinition fd = new SubsystemFeatureDefinitionImpl(ExtensionConstants.CORE_EXTENSION, file);
if (fd.isSupportedFeatureVersion()) {
// using symbolic name because gets compared to FeatureResource symbolic name
features.put(fd.getSymbolicName(), fd);
}
} catch (IOException e) {
// TODO: PROPER NLS MESSAGE
throw new FeatureToolException("Unable to read core feature manifest: " + file,
(String) null,
e,
ReturnCode.BAD_FEATURE_DEFINITION);
}
}
}
manifestFiles = platformDir.listFiles(MFFilter);
if (manifestFiles != null) {
for (File file : manifestFiles) {
try {
ProvisioningFeatureDefinition fd = new KernelFeatureListDefinition(file);
// using symbolic name because gets compared to FeatureResource symbolic name
features.put(fd.getSymbolicName(), fd);
} catch (IOException e) {
// TODO: PROPER NLS MESSAGE
throw new FeatureToolException("Unable to read core manifest: " + file,
(String) null,
e,
ReturnCode.BAD_FEATURE_DEFINITION);
}
}
}
return features;
}
|
java
|
private Map<String, ProvisioningFeatureDefinition> getUsrProductFeatureDefinitions() {
Map<String, ProvisioningFeatureDefinition> features = null;
File userDir = Utils.getUserDir();
if (userDir != null && userDir.exists()) {
File userFeatureDir = new File(userDir, USER_FEATURE_DIR);
if (userFeatureDir.exists()) {
features = new TreeMap<String, ProvisioningFeatureDefinition>();
File[] userManifestFiles = userFeatureDir.listFiles(MFFilter);
if (userManifestFiles != null) {
for (File file : userManifestFiles) {
try {
ProvisioningFeatureDefinition fd = new SubsystemFeatureDefinitionImpl(USR_PRODUCT_EXT_NAME, file);
features.put(fd.getSymbolicName(), fd);
} catch (IOException e) {
// TODO: PROPER NLS MESSAGE
throw new FeatureToolException("Unable to read feature manifest from user extension: " + file,
(String) null,
e,
ReturnCode.BAD_FEATURE_DEFINITION);
}
}
}
}
}
return features;
}
|
java
|
public String getProdFeatureLocation(String productName) {
String location = null;
if (productName.equals(CORE_PRODUCT_NAME)) {
location = Utils.getInstallDir().getAbsolutePath();
} else if (productName.equals(USR_PRODUCT_EXT_NAME)) {
location = Utils.getUserDir().getAbsolutePath();
} else {
readProductExtFeatureLocations();
if (productExtNameInfoMap.containsKey(productName)) {
location = productExtNameInfoMap.get(productName).getLocation();
}
}
return location;
}
|
java
|
public String getProdFeatureId(String productName) {
String productId = null;
if (!productName.equals(CORE_PRODUCT_NAME) && !productName.equals(USR_PRODUCT_EXT_NAME)) {
readProductExtFeatureLocations();
if (productExtNameInfoMap.containsKey(productName)) {
productId = productExtNameInfoMap.get(productName).getProductID();
}
}
return productId;
}
|
java
|
public File getCoreFeatureDir() {
File featureDir = null;
File installDir = Utils.getInstallDir();
if (installDir != null) {
featureDir = new File(installDir, FEATURE_DIR);
}
if (featureDir == null) {
throw new RuntimeException("Feature Directory not found");
}
return featureDir;
}
|
java
|
public File getCorePlatformDir() {
File platformDir = null;
File installDir = Utils.getInstallDir();
if (installDir != null) {
platformDir = new File(installDir, PLATFORM_DIR);
}
if (platformDir == null) {
throw new RuntimeException("Platform Directory not found");
}
return platformDir;
}
|
java
|
public File getCoreAssetDir() {
File assetDir = null;
File installDir = Utils.getInstallDir();
if (installDir != null) {
assetDir = new File(installDir, ASSET_DIR);
}
if (assetDir == null) {
throw new RuntimeException("Asset Directory not found");
}
return assetDir;
}
|
java
|
public ContentBasedLocalBundleRepository getBundleRepository(String featureName, WsLocationAdmin locService) {
return BundleRepositoryRegistry.getRepositoryHolder(featureName).getBundleRepository();
}
|
java
|
@FFDCIgnore({IllegalCharsetNameException.class, UnsupportedCharsetException.class})
public static String mapCharset(String enc, String deflt) {
if (enc == null) {
return deflt;
}
//older versions of tomcat don't properly parse ContentType headers with stuff
//after charset=StandardCharsets.UTF_8
int idx = enc.indexOf(";");
if (idx != -1) {
enc = enc.substring(0, idx);
}
// Charsets can be quoted. But it's quite certain that they can't have escaped quoted or
// anything like that.
enc = charsetPattern.matcher(enc).replaceAll("").trim();
if ("".equals(enc)) {
return deflt;
}
String newenc = encodings.get(enc);
if (newenc == null) {
try {
newenc = Charset.forName(enc).name();
} catch (IllegalCharsetNameException icne) {
return null;
} catch (UnsupportedCharsetException uce) {
return null;
}
String tmpenc = encodings.putIfAbsent(enc, newenc);
if (tmpenc != null) {
newenc = tmpenc;
}
}
return newenc;
}
|
java
|
@BeforeClass
public static void setUp() throws Exception {
// Add LDAP variables to bootstrap properties file
LDAPUtils.addLDAPVariables(server);
Log.info(c, "setUp", "Starting the server... (will wait for userRegistry servlet to start)");
server.copyFileToLibertyInstallRoot("lib/features", "internalfeatures/securitylibertyinternals-1.0.mf");
server.addInstalledAppForValidation("userRegistry");
server.startServer(c.getName() + ".log");
//Make sure the application has come up before proceeding
assertNotNull("Application userRegistry does not appear to have started.",
server.waitForStringInLog("CWWKZ0001I:.*userRegistry"));
assertNotNull("Security service did not report it was ready",
server.waitForStringInLog("CWWKS0008I"));
assertNotNull("Server did not came up",
server.waitForStringInLog("CWWKF0011I"));
Log.info(c, "setUp", "Creating servlet connection the server");
servlet = new UserRegistryServletConnection(server.getHostname(), server.getHttpDefaultPort());
if (servlet.getRealm() == null) {
Thread.sleep(5000);
servlet.getRealm();
}
}
|
java
|
@Test
public void getUserSecurityName() throws Exception {
String user = "vmmtestuser";
String securityName = "cn=vmmtestuser,cn=users,dc=secfvt2,dc=austin,dc=ibm,dc=com";
Log.info(c, "getUserSecurityName", "Checking with a valid user.");
LDAPFatUtils.assertDNsEqual("User security name didn't match expected value.", securityName, servlet.getUserSecurityName(user));
}
|
java
|
public void initialRead() {
try {
this.buffer = this.isc.getRequestBodyBuffer();
if (null != this.buffer) {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) {
Tr.debug(tc, "Buffer returned from getRequestBodyBuffer : " + this.buffer);
}
// record the new amount of data read from the channel
this.bytesRead += this.buffer.remaining();
}
} catch (IOException e) {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) {
Tr.debug(tc, "Exception encountered during initialRead : " + e);
}
}
}
|
java
|
private String encode(PrintStream stderr, String plaintext, String encodingType,
Map<String, String> properties) throws InvalidPasswordEncodingException, UnsupportedCryptoAlgorithmException {
String ret = null;
try {
ret = PasswordUtil.encode(plaintext, encodingType == null ? PasswordUtil.getDefaultEncoding() : encodingType, properties);
} catch (InvalidPasswordEncodingException e) {
e.printStackTrace(stderr);
throw e;
} catch (UnsupportedCryptoAlgorithmException e) {
e.printStackTrace(stderr);
throw e;
}
return ret;
}
|
java
|
protected String getDescription(JSONArray customInfoArray) {
StringBuffer sb = new StringBuffer();
sb.append(getMessage("encode.option-custom.encryption"));
for (int i = 0; i < customInfoArray.size(); i++) {
JSONObject customInfo = (JSONObject) customInfoArray.get(i);
String name = (String) customInfo.get("name");
sb.append(getMessage("encode.option-desc.custom.feature", name));
sb.append((String) customInfo.get("featurename"));
sb.append(getMessage("encode.option-desc.custom.description", name));
sb.append((String) customInfo.get("description"));
}
return sb.toString();
}
|
java
|
private void extractFiles(ArtifactMetadata artifactMetadata) throws RepositoryArchiveIOException, RepositoryArchiveEntryNotFoundException, RepositoryArchiveException {
_readmePayload = artifactMetadata.getFileWithExtension(".txt");
if (_readmePayload == null) {
throw new RepositoryArchiveEntryNotFoundException("Unable to find iFix readme .txt file in archive"
+ artifactMetadata.getArchive().getAbsolutePath(), artifactMetadata.getArchive(), "*.txt");
}
}
|
java
|
private String getFixId(IFixInfo iFixInfo, ExtractedFileInformation xmlInfo) throws RepositoryArchiveInvalidEntryException {
// check for null input
if (null == iFixInfo) {
throw new RepositoryArchiveInvalidEntryException("Null XML object provided", xmlInfo.getSourceArchive(), xmlInfo.getSelectedPathFromArchive());
}
// check the first child node is named fix
return iFixInfo.getId();
}
|
java
|
private List<String> getProvides(IFixInfo iFixInfo, ParserBase.ExtractedFileInformation xmlInfo) throws RepositoryArchiveInvalidEntryException {
// check for null input
if (null == iFixInfo) {
throw new RepositoryArchiveInvalidEntryException("Null document provided", xmlInfo.getSourceArchive(), xmlInfo.getSelectedPathFromArchive());
}
Resolves resolves = iFixInfo.getResolves();
if (null == resolves) {
throw new RepositoryArchiveInvalidEntryException("Document does not contain a \"resolves\" node", xmlInfo.getSourceArchive(), xmlInfo.getSelectedPathFromArchive());
}
// Get child nodes and look for APAR ids
List<String> retList = new ArrayList<String>();
List<Problem> problems = resolves.getProblems();
if (problems != null) {
for (Problem problem : problems) {
String displayId = problem.getDisplayId();
if (null == displayId) {
throw new RepositoryArchiveInvalidEntryException("Unexpected null getting APAR id", xmlInfo.getSourceArchive(), xmlInfo.getSelectedPathFromArchive());
}
retList.add(displayId);
}
}
return retList;
}
|
java
|
private String parseManifestForAppliesTo(File file) throws RepositoryArchiveIOException {
Manifest mf = null;
try (JarFile jar = new JarFile(file)) {
try {
mf = jar.getManifest();
} catch (IOException ioe) {
throw new RepositoryArchiveIOException("Error getting manifest from jar " + jar.getName(), new File(jar.getName()), ioe);
}
} catch (IOException ioe) {
throw new RepositoryArchiveIOException("Unable to create JarFile from path " +
file, new File(file.getName()), ioe);
}
String appliesTo = null;
Attributes mainattrs = mf.getMainAttributes();
// Iterate over the main attributes in the manifest and look for the ones we
// are interested in.
for (Object at : mainattrs.keySet()) {
String attribName = ((Attributes.Name) at).toString();
String attribValue = (String) mainattrs.get(at);
if (APPLIES_TO.equals(attribName)) {
appliesTo = attribValue;
}
}
return appliesTo;
}
|
java
|
public static int bytesToInt(byte[] bytes, int offset) {
return ((bytes[offset + 3] & 0xFF) << 0) + ((bytes[offset + 2] & 0xFF) << 8)
+ ((bytes[offset + 1] & 0xFF) << 16) + ((bytes[offset + 0] & 0xFF) << 24);
}
|
java
|
public static short bytesToShort(byte[] bytes, int offset) {
short result = 0x0;
for (int i = offset; i < offset + 2; ++i) {
result = (short) ((result) << 8);
result |= (bytes[i] & 0x00FF);
}
return result;
}
|
java
|
public static long bytesToLong(byte[] bytes, int offset) {
long result = 0x0;
for (int i = offset; i < offset + 8; ++i) {
result = result << 8;
result |= (bytes[i] & 0x00000000000000FFl);
}
return result;
}
|
java
|
public static char bytesToChar(byte[] bytes, int offset) {
char result = 0x0;
for (int i = offset; i < offset + 2; ++i) {
result = (char) ((result) << 8);
result |= (bytes[i] & 0x00FF);
}
return result;
}
|
java
|
public static void intToBytes(int value, byte[] bytes, int offset) {
bytes[offset + 3] = (byte) (value >>> 0);
bytes[offset + 2] = (byte) (value >>> 8);
bytes[offset + 1] = (byte) (value >>> 16);
bytes[offset + 0] = (byte) (value >>> 24);
}
|
java
|
public static void shortToBytes(short value, byte[] bytes, int offset) {
for (int i = offset + 1; i >= offset; --i) {
bytes[i] = (byte) value;
value = (short) ((value) >> 8);
}
}
|
java
|
public static void longToBytes(long value, byte[] bytes, int offset) {
for (int i = offset + 7; i >= offset; --i) {
bytes[i] = (byte) value;
value = value >> 8;
}
}
|
java
|
public static long varIntBytesToLong(byte[] bytes, int offset) {
int shift = 0;
long result = 0;
while (shift < 64) {
final byte b = bytes[offset++];
result |= (long)(b & 0x7F) << shift;
if ((b & 0x80) == 0) {
return result;
}
shift += 7;
}
throw new IllegalStateException("Varint representation is invalid or exceeds 64-bit value");
}
|
java
|
public static int varIntBytesToInt(byte[] bytes, int offset) {
byte tmp = bytes[offset++];
if (tmp >= 0) {
return tmp;
}
int result = tmp & 0x7f;
if ((tmp = bytes[offset++]) >= 0) {
result |= tmp << 7;
} else {
result |= (tmp & 0x7f) << 7;
if ((tmp = bytes[offset++]) >= 0) {
result |= tmp << 14;
} else {
result |= (tmp & 0x7f) << 14;
if ((tmp = bytes[offset++]) >= 0) {
result |= tmp << 21;
} else {
result |= (tmp & 0x7f) << 21;
result |= (tmp = bytes[offset++]) << 28;
if (tmp < 0) {
// Discard upper 32 bits.
for (int i = 0; i < 5; i++) {
if (bytes[offset++] >= 0) {
return result;
}
}
//Should never happen since we wrote the varint value. If this occurs due to an internal bug
//this exception is caught and wrapped further up the chain.
throw new IllegalStateException("Varint representation is invalid or exceeds 32-bit value");
}
}
}
}
return result;
}
|
java
|
public static int writeLongAsVarIntBytes(long v, byte[] bytes, int offest) {
int pos = offest;
while (true) {
if ((v & ~0x7FL) == 0) {
bytes[pos++] = ((byte)v);
return pos;
} else {
bytes[pos++] = (byte)((v & 0x7F) | 0x80);
v >>>= 7;
}
}
}
|
java
|
public static int writeIntAsVarIntBytes(int intVal, byte[] bytes, int offset) {
int pos = offset;
int v = intVal;
if ((v & ~0x7F) == 0) {
bytes[pos++] = ((byte) v);
return 1 + offset;
}
while (true) {
if ((v & ~0x7F) == 0) {
bytes[pos++] = ((byte) v);
return pos;
} else {
bytes[pos++] = (byte) ((v & 0x7F) | 0x80);
v >>>= 7;
}
}
}
|
java
|
public static String limitedBytesToString(byte[] bytes) {
if(bytes.length <= 1000) {
return Arrays.toString(bytes);
} else {
byte[] firstBytes = new byte[1000];
System.arraycopy(bytes, 0, firstBytes, 0, 1000);
return Arrays.toString(firstBytes);
}
}
|
java
|
public static RESTHandlerJsonException createRESTHandlerJsonException(Throwable e, JSONConverter converter, int status) {
try {
//See if we need to fetch a converter
if (converter == null) {
converter = JSONConverter.getConverter();
}
//Create a new OutputStream to avoid any corrupted data
ByteArrayOutputStream os = new ByteArrayOutputStream();
//Write the exception inside the output stream
converter.writeThrowable(os, e);
//Get the message from the output stream
String exceptionMessage = os.toString("UTF-8");
//return a Web exception with the new response
return new RESTHandlerJsonException(exceptionMessage, status, true);
} catch (IOException innerException) {
//Since we got an exception while converting the error just write the actual exception text with an internal error code. This should never
//happen because our JSONErrorOutputStream shouldn't ever throw an IOException
return new RESTHandlerJsonException(e.getMessage(), status, true);
} finally {
JSONConverter.returnConverter(converter);
}
}
|
java
|
public static synchronized void notifyStarted(HttpEndpointImpl endpoint, String resolvedHostName, int port, boolean isHttps) {
if (TraceComponent.isAnyTracingEnabled() && tc.isEventEnabled()) {
Tr.event(tc, "Notify endpoint started: " + endpoint, resolvedHostName, port, isHttps, defaultHost.get(), alternateHostSelector);
}
if (alternateHostSelector.get() == null) {
if (defaultHost.get() != null) {
defaultHost.get().listenerStarted(endpoint, resolvedHostName, port, isHttps);
}
} else {
alternateHostSelector.get().alternateNotifyStarted(endpoint, resolvedHostName, port, isHttps);
}
}
|
java
|
public static synchronized void notifyStopped(HttpEndpointImpl endpoint, String resolvedHostName, int port, boolean isHttps) {
if (TraceComponent.isAnyTracingEnabled() && tc.isEventEnabled()) {
Tr.event(tc, "Notify endpoint stopped: " + endpoint, resolvedHostName, port, isHttps, defaultHost.get(), alternateHostSelector);
}
if (alternateHostSelector.get() == null) {
if (defaultHost.get() != null) {
defaultHost.get().listenerStopped(endpoint, resolvedHostName, port, isHttps);
}
} else {
alternateHostSelector.get().alternateNotifyStopped(endpoint, resolvedHostName, port, isHttps);
}
}
|
java
|
public void addWsByteBuffer(WsByteBuffer buffer) {
if (this.allWsByteBuffers == null) {
this.allWsByteBuffers = new Hashtable<WsByteBuffer, WsByteBuffer>();
}
this.allWsByteBuffers.put(buffer, buffer);
}
|
java
|
public void addOwner(String owner) {
if (this.owners == null) {
this.owners = new Hashtable<String, String>();
}
this.owners.put(owner, owner);
}
|
java
|
public String getInputUniqueUserId(String inputVirtualRealm) {
// initialize the return value
String returnValue = getInputMapping(inputVirtualRealm, Service.CONFIG_DO_UNIQUE_USER_ID_MAPPING,
UNIQUE_USER_ID_DEFAULT);
return returnValue;
}
|
java
|
public String getOutputUniqueUserId(String inputVirtualRealm) {
// initialize the return value
String returnValue = getOutputMapping(inputVirtualRealm, Service.CONFIG_DO_UNIQUE_USER_ID_MAPPING,
UNIQUE_USER_ID_DEFAULT);
return returnValue;
}
|
java
|
public String getInputUserSecurityName(String inputVirtualRealm) {
// initialize the return value
String returnValue = getInputMapping(inputVirtualRealm, Service.CONFIG_DO_USER_SECURITY_NAME_MAPPING,
INPUT_USER_SECURITY_NAME_DEFAULT);
return returnValue;
}
|
java
|
public String getInputUserDisplayName(String inputVirtualRealm) {
// initialize the return value
String returnValue = getInputMapping(inputVirtualRealm, Service.CONFIG_DO_USER_DISPLAY_NAME_MAPPING,
USER_DISPLAY_NAME_DEFAULT);
return returnValue;
}
|
java
|
public String getOutputUserDisplayName(String inputVirtualRealm) {
// initialize the return value
String returnValue = getOutputMapping(inputVirtualRealm, Service.CONFIG_DO_USER_DISPLAY_NAME_MAPPING,
USER_DISPLAY_NAME_DEFAULT);
return returnValue;
}
|
java
|
public String getInputUniqueGroupId(String inputVirtualRealm) {
// initialize the return value
String returnValue = getInputMapping(inputVirtualRealm, Service.CONFIG_DO_UNIQUE_GROUP_ID_MAPPING,
INPUT_UNIQUE_GROUP_ID_DEFAULT);
return returnValue;
}
|
java
|
public String getOutputUniqueGroupId(String inputVirtualRealm) {
// initialize the return value
String returnValue = getOutputMapping(inputVirtualRealm, Service.CONFIG_DO_UNIQUE_GROUP_ID_MAPPING,
OUTPUT_UNIQUE_GROUP_ID_DEFAULT);
return returnValue;
}
|
java
|
public String getInputGroupSecurityName(String inputVirtualRealm) {
// initialize the return value
String returnValue = getInputMapping(inputVirtualRealm, Service.CONFIG_DO_GROUP_SECURITY_NAME_MAPPING,
INPUT_GROUP_SECURITY_NAME_DEFAULT);
return returnValue;
}
|
java
|
public String getOutputGroupSecurityName(String inputVirtualRealm) {
// initialize the return value
String returnValue = getOutputMapping(inputVirtualRealm, Service.CONFIG_DO_GROUP_SECURITY_NAME_MAPPING,
OUTPUT_GROUP_SECURITY_NAME_DEFAULT);
return returnValue;
}
|
java
|
public String getInputGroupDisplayName(String inputVirtualRealm) {
// initialize the return value
String returnValue = getInputMapping(inputVirtualRealm, Service.CONFIG_DO_GROUP_DISPLAY_NAME_MAPPING,
GROUP_DISPLAY_NAME_DEFAULT);
return returnValue;
}
|
java
|
public String getOutputGroupDisplayName(String inputVirtualRealm) {
// initialize the return value
String returnValue = getOutputMapping(inputVirtualRealm, Service.CONFIG_DO_GROUP_DISPLAY_NAME_MAPPING,
GROUP_DISPLAY_NAME_DEFAULT);
return returnValue;
}
|
java
|
@FFDCIgnore(Exception.class)
private String getInputMapping(String inputVirtualRealm, String inputProperty, String inputDefaultProperty) {
String methodName = "getInputMapping";
// initialize the return value
String returnValue = null;
RealmConfig realmConfig = mappingUtils.getCoreConfiguration().getRealmConfig(inputVirtualRealm);
if (realmConfig != null) {
try {
returnValue = realmConfig.getURMapInputPropertyInRealm(inputProperty);
if ((returnValue == null) || (returnValue.equals(""))) {
returnValue = inputDefaultProperty;
}
} catch (Exception toCatch) {
returnValue = inputDefaultProperty;
if (tc.isDebugEnabled()) {
Tr.debug(tc, methodName + " " + toCatch.getMessage(), toCatch);
}
}
} else {
returnValue = inputDefaultProperty;
}
return returnValue;
}
|
java
|
private static Object _convertOrCoerceValue(FacesContext facesContext,
UIComponent uiComponent, Object value, SelectItem selectItem,
Converter converter)
{
Object itemValue = selectItem.getValue();
if (converter != null && itemValue instanceof String)
{
itemValue = converter.getAsObject(facesContext, uiComponent,
(String) itemValue);
}
else
{
// The javadoc of UISelectOne/UISelectMany says :
// "... Before comparing each option, coerce the option value type
// to the type of this component's value following the
// Expression Language coercion rules ..."
// If the coercion fails, just return the value without coerce,
// because it could be still valid the comparison for that value.
// and swallow the exception, because its information is no relevant
// on this context.
try
{
if (value instanceof java.lang.Enum)
{
// Values from an enum are a special case. There is one syntax were the
// particular enumeration is extended using something like
// SOMEVALUE { ... }, usually to override toString() method. In this case,
// value.getClass is not the target enum class, so we need to get the
// right one from super class.
Class targetClass = value.getClass();
if (targetClass != null && !targetClass.isEnum())
{
targetClass = targetClass.getSuperclass();
}
itemValue = _ClassUtils.convertToTypeNoLogging(facesContext, itemValue, targetClass);
}
else
{
itemValue = _ClassUtils.convertToTypeNoLogging(facesContext, itemValue, value.getClass());
}
}
catch (IllegalArgumentException e)
{
//itemValue = selectItem.getValue();
}
catch (Exception e)
{
//itemValue = selectItem.getValue();
}
}
return itemValue;
}
|
java
|
public final static MessageType getMessageType(Byte aValue) {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(tc,"Value = " + aValue);
return set[aValue.intValue()];
}
|
java
|
void balance(
int kFactor,
NodeStack stack,
GBSNode fpoint,
int fpidx,
int maxBal)
{
/* Get parent of balance point */
GBSNode bparent = stack.node(fpidx - 1);
switch (kFactor)
{
case 2:
balance2(stack, bparent, fpoint, fpidx, maxBal);
break;
case 4:
balance4(stack, bparent, fpoint, fpidx, maxBal);
break;
case 6:
balance6(stack, bparent, fpoint, fpidx, maxBal);
break;
case 8:
balance8(stack, bparent, fpoint, fpidx, maxBal);
break;
case 12:
balance12(stack, bparent, fpoint, fpidx, maxBal);
break;
case 16:
balance16(stack, bparent, fpoint, fpidx, maxBal);
break;
case 24:
balance24(stack, bparent, fpoint, fpidx, maxBal);
break;
case 32:
balance32(stack, bparent, fpoint, fpidx, maxBal);
break;
default:
String x =
"Unknown K factor in fringe balance: " + kFactor;
error(x);
break;
}
}
|
java
|
private void balance2(
NodeStack stack,
GBSNode bparent, /* Parent of fringe balance point */
GBSNode fpoint, /* Fringe balance point */
int fpidx, /* Index within stack of fpoint */
int maxBal) /* Maximum allowed fringe imbalance */
{
/*
k = 2, 2k-1 = 5, k-1 = 1
[A-B-C] [3 children]
becomes:
*----B----*
| |
A C
-------------------------------
*----B----*
| |
A C - D - E [3 children]
becomes:
*----B----*
| |
A *--D--*
| |
C E
*/
GBSNode a = fpoint;
GBSNode b = a.rightChild();
if (bparent.rightChild() == a)
bparent.setRightChild(b);
else
bparent.setLeftChild(b);
b.setLeftChild(a);
a.clearRightChild();
if ((fpidx > 1) && (stack.balancePointIndex() > -1))
{
GBSNode qpoint = b;
stack.setNode(fpidx, qpoint);
GBSInsertHeight.singleInstance().balance(stack, qpoint);
}
}
|
java
|
private void balance4(
NodeStack stack,
GBSNode bparent, /* Parent of fringe balance point */
GBSNode fpoint, /* Fringe balance point */
int fpidx, /* Index within stack of fpoint */
int maxBal) /* Maximum allowed fringe imbalance */
{
/*
k = 4, 2k-1 = 7, k-1 = 3
[A-B-C] [3 children]
becomes:
*----B----*
| |
A C
-------------------------------
*----B----*
| |
A C - D - E - F - G [5 children]
becomes:
*------D------*
| |
*--B--* *--F--*
| | | |
A C E G */
if (maxBal == 3)
{
GBSNode a = fpoint;
GBSNode b = a.rightChild();
if (bparent.rightChild() == a)
bparent.setRightChild(b);
else
bparent.setLeftChild(b);
b.setLeftChild(a);
a.clearRightChild();
}
else
{
if (maxBal != 5)
error("fringeBalance4: maxBal != 5, maxBal = " + maxBal);
GBSNode t0_top = stack.node(fpidx-2);
GBSNode c = fpoint;
GBSNode d = c.rightChild();
GBSNode e = d.rightChild();
GBSNode f = e.rightChild();
d.setChildren(bparent, f);
c.clearRightChild();
f.setLeftChild(e);
e.clearRightChild();
if (t0_top.rightChild() == bparent)
t0_top.setRightChild(d);
else
t0_top.setLeftChild(d);
if ((fpidx > 2) && (stack.balancePointIndex() > -1))
{
GBSNode qpoint = d;
stack.setNode(fpidx-1, qpoint);
GBSInsertHeight.singleInstance().balance(stack, qpoint);
}
}
}
|
java
|
private void balance6(
NodeStack stack,
GBSNode bparent, /* Parent of fringe balance point */
GBSNode fpoint, /* Fringe balance point */
int fpidx, /* Index within stack of fpoint */
int maxBal) /* Maximum allowed fringe imbalance */
{
/*
k = 6, 2k-1 = 11, k-1 = 5
[A-B-C-D-E] [5 children]
becomes:
*--C--*
| |
*-B D-*
| |
A E
-------------------------------
*--C--*
| | [8 children]
*-B D-*
| |
A E - F - G - H - I - J - K
becomes:
*-------F-------*
| |
*--C--* *--I--*
| | | |
*-B D-* *-H J-*
| | | |
A E G K */
if (maxBal == 5)
{
GBSNode a = fpoint;
GBSNode b = a.rightChild();
GBSNode c = b.rightChild();
if (bparent.rightChild() == a)
bparent.setRightChild(c);
else
bparent.setLeftChild(c);
c.setLeftChild(b);
b.setChildren(a, null);
a.clearRightChild();
}
else
{
if (maxBal != 8)
error("fringeBalance6: maxBal != 8, maxBal = " + maxBal);
GBSNode t0_top = stack.node(fpidx-2);
GBSNode c = bparent;
GBSNode d = fpoint;
GBSNode e = d.rightChild();
GBSNode f = e.rightChild();
GBSNode g = f.rightChild();
GBSNode h = g.rightChild();
GBSNode i = h.rightChild();
f.setChildren(c, i);
i.setLeftChild(h);
h.setChildren(g, null);
g.clearRightChild();
e.clearRightChild();
if (t0_top.rightChild() == c)
t0_top.setRightChild(f);
else
t0_top.setLeftChild(f);
if ((fpidx > 2) && (stack.balancePointIndex() > -1))
{
GBSNode qpoint = f;
stack.setNode(fpidx-1, qpoint);
GBSInsertHeight.singleInstance().balance(stack, qpoint);
}
}
}
|
java
|
private void balance8(
NodeStack stack,
GBSNode bparent, /* Parent of fringe balance point */
GBSNode fpoint, /* Fringe balance point */
int fpidx, /* Index within stack of fpoint */
int maxBal) /* Maximum allowed fringe imbalance */
{
/*
k = 8, 2k-1 = 15, k-1 = 7
[A-B-C-D-E-F-G] [7 children]
becomes:
*------D------*
| |
*--B--* *--F--*
| | | |
A C E G
-------------------------------------------
*------D------*
| |
*--B--* *--F--* [9 children]
| | | |
A C E G - H - I - J - K - L - M - N - O
becomes:
*--------H--------*
| |
*---D---* *---L---*
| | | |
*-B-* *-F-* *-J-* *-N-*
| | | | | | | |
A C E G I K M O */
if (maxBal == 7)
{
GBSNode a = fpoint;
GBSNode b = a.rightChild();
GBSNode c = b.rightChild();
GBSNode d = c.rightChild();
GBSNode e = d.rightChild();
GBSNode f = e.rightChild();
d.setLeftChild(b);
d.setRightChild(f);
b.setLeftChild(a);
a.clearRightChild();
c.clearRightChild();
f.setLeftChild(e);
e.clearRightChild();
if (bparent.rightChild() == a)
bparent.setRightChild(d);
else
bparent.setLeftChild(d);
}
else
{
if (maxBal != 9)
error("fringeBalance8: maxBal != 9, maxBal = " + maxBal);
GBSNode t0_top = stack.node(fpidx-3);
GBSNode d = stack.node(fpidx-2);
GBSNode g = fpoint;
GBSNode h = g.rightChild();
GBSNode i = h.rightChild();
GBSNode j = i.rightChild();
GBSNode k = j.rightChild();
GBSNode l = k.rightChild();
GBSNode m = l.rightChild();
GBSNode n = m.rightChild();
h.setLeftChild(d);
h.setRightChild(l);
l.setLeftChild(j);
l.setRightChild(n);
j.setLeftChild(i);
i.clearRightChild();
k.clearRightChild();
n.setLeftChild(m);
m.clearRightChild();
g.clearRightChild();
if (t0_top.rightChild() == d)
t0_top.setRightChild(h);
else
t0_top.setLeftChild(h);
if ((fpidx > 3) && (stack.balancePointIndex() > -1))
{
GBSNode qpoint = h;
stack.setNode(fpidx-2, qpoint);
GBSInsertHeight.singleInstance().balance(stack, qpoint);
}
}
}
|
java
|
@Reference(name = BASE_INSTANCE,
service = ContextService.class,
cardinality = ReferenceCardinality.OPTIONAL,
policy = ReferencePolicy.DYNAMIC,
policyOption = ReferencePolicyOption.GREEDY,
target = "(id=unbound)")
protected void setBaseInstance(ServiceReference<ContextService> ref) {
lock.writeLock().lock();
try {
threadContextConfigurations = null;
} finally {
lock.writeLock().unlock();
}
}
|
java
|
@Reference(name = THREAD_CONTEXT_MANAGER,
service = WSContextService.class,
cardinality = ReferenceCardinality.MANDATORY,
policy = ReferencePolicy.STATIC,
target = "(component.name=com.ibm.ws.context.manager)")
protected void setThreadContextManager(WSContextService svc) {
threadContextMgr = (ThreadContextManager) svc;
}
|
java
|
protected void unsetBaseInstance(ServiceReference<ContextService> ref) {
lock.writeLock().lock();
try {
threadContextConfigurations = null;
} finally {
lock.writeLock().unlock();
}
}
|
java
|
@Override
public synchronized void complete() {
if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled() && logger.isLoggable(Level.FINEST)) {
logger.entering(CLASS_NAME, "complete",this);
}
if (!lockHeldByDifferentThread())
{
//We can't call this or WebContainer won't know to run the complete runnable which kicks off the async listeners
// WebContainerRequestState.getInstance(true).setAsyncMode(false);
if (!completePending){
//Move this inside of if block because it complete is called
//after a previous complete, you can get a NPE because the
//request may have already been cleaned up. If complete is not
//pending, then it shouldn't have been cleaned up.
if (com.ibm.ws.webcontainer.osgi.WebContainer.getServletContainerSpecLevel() > 31) {
//setAsyncStarted(false) upon exit service()
WebContainerRequestState reqState = WebContainerRequestState.getInstance(true);
reqState.setAttribute("webcontainer.resetAsyncStartedOnExit", "true");
}
else {
iExtendedRequest.setAsyncStarted(false);
}
createNewAsyncServletReeentrantLock();
cancelAsyncTimer();
completeRunnable = new CompleteRunnable(iExtendedRequest,this);
completePending = true;
if (!dispatching){
executeNextRunnable();
}
}
} else {
if (WCCustomProperties.THROW_EXCEPTION_WHEN_UNABLE_TO_COMPLETE_OR_DISPATCH) {
throw new IllegalStateException(nls.getString("AsyncContext.lock.already.held", "Unable to obtain the lock. Error processing has already been invoked by another thread."));
}
}
dispatchURI = null;
if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled() && logger.isLoggable(Level.FINEST)) {
logger.exiting(CLASS_NAME, "complete",this);
}
}
|
java
|
@Override
public synchronized void dispatch(ServletContext context, String path) throws IllegalStateException {
if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled() && logger.isLoggable(Level.FINEST)) {
logger.entering(CLASS_NAME, "dispatch(ctx,path)",new Object [] {this,context,path});
}
if (!lockHeldByDifferentThread()){
if (completePending) {
throw new IllegalStateException(nls.getString("called.dispatch.after.complete"));
} else if (!dispatchAllowed){
throw new IllegalStateException(nls.getString("trying.to.call.dispatch.twice.for.the.same.async.operation"));
}
// PI28910, the effect of calling dispatch should not take effect until the calling
// thread completes so it is too early to set asyncStarted to false
// iExtendedRequest.setAsyncStarted(false);
createNewAsyncServletReeentrantLock();
//cancel timer inside lock so we don't kick off timeout events after we've decided to dispatch
cancelAsyncTimer();
WebAppRequestDispatcher requestDispatcher = (WebAppRequestDispatcher) context.getRequestDispatcher(path);
dispatchRunnable = new DispatchRunnable(requestDispatcher, this);
dispatchPending = true;
dispatchAllowed = false;
if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled() && logger.isLoggable(Level.FINEST)) {
logger.logp(Level.FINEST, CLASS_NAME, "dispatch(ctx,path)", "dispatching -->" + dispatching);
}
if (!dispatching) {
executeNextRunnable();
}
} else {
if (WCCustomProperties.THROW_EXCEPTION_WHEN_UNABLE_TO_COMPLETE_OR_DISPATCH) {
throw new IllegalStateException(nls.getString("AsyncContext.lock.already.held", "Unable to obtain the lock. Error processing has already been invoked by another thread."));
}
}
dispatchURI = null;
if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled() && logger.isLoggable(Level.FINEST)) {
logger.exiting(CLASS_NAME, "dispatch(ctx,path)",this);
}
}
|
java
|
public final void commit_one_phase() throws XAException
{
if (tc.isEntryEnabled()) Tr.entry(tc, "commit_one_phase", _resource);
if (tcSummary.isDebugEnabled()) Tr.debug(tcSummary, "commit_one_phase", this);
//
// Commit the one-phase resource.
//
try
{
_resource.commit(_xid, true);
// Record the completion direction and Automatic vote.
_completedCommit = true;
_vote = JTAResourceVote.commit;
}
catch(XAException xae)
{
// Record the completion XA return code
_completionXARC = xae.errorCode;
throw xae;
}
finally
{
if (tc.isEntryEnabled()) Tr.exit(tc, "commit_one_phase", _completionXARC);
if (tcSummary.isDebugEnabled()) Tr.debug(tcSummary, "commit_one_phase result: " +
XAReturnCodeHelper.convertXACode(_completionXARC));
}
}
|
java
|
public final void rollback() throws XAException
{
if (tc.isEntryEnabled()) Tr.entry(tc, "rollback", _resource);
if (tcSummary.isDebugEnabled()) Tr.debug(tcSummary, "rollback", this);
try
{
_resource.rollback(_xid);
// Record the vote.
// _completedCommit defaults to false.
_vote = JTAResourceVote.rollback;
}
catch(XAException xae)
{
// Record the completion XA return code
_completionXARC = xae.errorCode;
throw xae;
}
finally
{
if (tc.isEntryEnabled()) Tr.exit(tc, "rollback");
if (tcSummary.isDebugEnabled()) Tr.debug(tcSummary, "rollback result: " +
XAReturnCodeHelper.convertXACode(_completionXARC));
}
}
|
java
|
public final void forget() throws XAException
{
if (tc.isEntryEnabled())
{
Tr.entry(tc, "forget", _resource);
Tr.exit(tc, "forget");
}
}
|
java
|
private void verifyConfiguration() throws DeploymentException {
Map<URL, ModuleMetaData> mmds = getModuleMetaDataMap();
if (mmds != null) {
for (Map.Entry<URL, ModuleMetaData> entry : mmds.entrySet()) {
ModuleMetaData mmd = entry.getValue();
if (mmd instanceof WebModuleMetaData) {
String j2eeModuleName = mmd.getJ2EEName().getModule();
Map<Class<?>, Properties> authMechs = getAuthMechs(j2eeModuleName);
if (authMechs != null && !authMechs.isEmpty()) {
// make sure that only one HAM.
if (authMechs.size() != 1) {
String appName = mmd.getJ2EEName().getApplication();
String authMechNames = getAuthMechNames(authMechs);
Tr.error(tc, "JAVAEESEC_CDI_ERROR_MULTIPLE_HTTPAUTHMECHS", j2eeModuleName, appName, authMechNames);
String msg = Tr.formatMessage(tc, "JAVAEESEC_CDI_ERROR_MULTIPLE_HTTPAUTHMECHS", j2eeModuleName, appName, authMechNames);
throw new DeploymentException(msg);
}
SecurityMetadata smd = (SecurityMetadata) ((WebModuleMetaData) mmd).getSecurityMetaData();
if (smd != null) {
LoginConfiguration lc = smd.getLoginConfiguration();
if (lc != null && !lc.isAuthenticationMethodDefaulted()) {
String appName = mmd.getJ2EEName().getApplication();
String msg = Tr.formatMessage(tc, "JAVAEESEC_CDI_ERROR_LOGIN_CONFIG_EXISTS", j2eeModuleName, appName);
Tr.error(tc, "JAVAEESEC_CDI_ERROR_LOGIN_CONFIG_EXISTS", j2eeModuleName, appName);
throw new DeploymentException(msg);
}
}
}
}
}
}
}
|
java
|
private String getModuleFromClass(Class<?> klass, Map<String, ModuleProperties> moduleMap) {
String file = getClassFileLocation(klass);
if (tc.isDebugEnabled()) {
Tr.debug(tc, "File name : " + file);
}
String moduleName = null;
for (Map.Entry<String, ModuleProperties> entry : moduleMap.entrySet()) {
URL location = entry.getValue().getLocation();
String filePath = location.getFile();
if (tc.isDebugEnabled()) {
Tr.debug(tc, "location : " + filePath);
}
if (location.getProtocol().equals("file") && file.startsWith(filePath)) {
moduleName = entry.getKey();
if (tc.isDebugEnabled()) {
Tr.debug(tc, "module name from the list : " + moduleName);
}
break;
}
}
if (moduleName == null) {
moduleName = file;
if (tc.isDebugEnabled()) {
Tr.debug(tc, "no match. use filename as module name : " + moduleName);
}
}
return moduleName;
}
|
java
|
private Properties getGlobalLoginBasicProps() throws Exception {
String realm = getWebAppSecurityConfig().getBasicAuthRealmName();
Properties props = new Properties();
if (realm == null) {
if (tc.isDebugEnabled()) {
Tr.debug(tc, "basicAuthenticationMechanismRealmName is not set. the default value " + JavaEESecConstants.DEFAULT_REALM + " is used.");
}
} else {
if (tc.isDebugEnabled()) {
Tr.debug(tc, "The container provided BasicAuthenticationMechanism will be used with the realm name : " + realm);
}
props.put(JavaEESecConstants.REALM_NAME, realm);
}
return props;
}
|
java
|
private Properties getGlobalLoginFormProps() throws Exception {
WebAppSecurityConfig webAppSecConfig = getWebAppSecurityConfig();
String loginURL = webAppSecConfig.getLoginFormURL();
String errorURL = webAppSecConfig.getLoginErrorURL();
if (loginURL == null || loginURL.isEmpty()) {
Tr.error(tc, "JAVAEESEC_CDI_ERROR_NO_URL", "loginFormURL");
}
if (errorURL == null || errorURL.isEmpty()) {
Tr.error(tc, "JAVAEESEC_CDI_ERROR_NO_URL", "loginErrorURL");
}
String contextRoot = webAppSecConfig.getLoginFormContextRoot();
if (contextRoot == null) {
// if a context root is not set, use the first path element of the login page.
contextRoot = getFirstPathElement(loginURL);
if (tc.isDebugEnabled()) {
Tr.debug(tc, "loginFormContextRoot is not set, use the first element of loginURL : " + contextRoot);
}
} else {
if (!validateContextRoot(contextRoot, loginURL)) {
Tr.error(tc, "JAVAEESEC_CDI_ERROR_INVALID_CONTEXT_ROOT", contextRoot, loginURL, "loginFormURL");
}
if (!validateContextRoot(contextRoot, errorURL)) {
Tr.error(tc, "JAVAEESEC_CDI_ERROR_INVALID_CONTEXT_ROOT", contextRoot, errorURL, "loginErrorURL");
}
}
// adjust the login and error url which need to be relative path from the context root.
loginURL = FixUpUrl(loginURL, contextRoot);
errorURL = FixUpUrl(errorURL, contextRoot);
if (tc.isDebugEnabled()) {
Tr.debug(tc, "The container provided FormAuthenticationMechanism will be used with the following attributes. login page : " + loginURL + ", error page : " + errorURL + ", context root : " + contextRoot);
}
Properties props = new Properties();
if (loginURL != null) {
props.put(JavaEESecConstants.LOGIN_TO_CONTINUE_LOGINPAGE, loginURL);
}
if (errorURL != null) {
props.put(JavaEESecConstants.LOGIN_TO_CONTINUE_ERRORPAGE, errorURL);
}
props.put(JavaEESecConstants.LOGIN_TO_CONTINUE_USEFORWARDTOLOGIN, true);
props.put(JavaEESecConstants.LOGIN_TO_CONTINUE_USE_GLOBAL_LOGIN, true);
if (contextRoot != null) {
props.put(JavaEESecConstants.LOGIN_TO_CONTINUE_LOGIN_FORM_CONTEXT_ROOT, contextRoot);
}
return props;
}
|
java
|
private void initialize(Map<String, Object> metadata) {
if (metadata == null) {
metadata = new HashMap<String, Object>();
}
metadata.put(REQUEST_ID, generateRequestID());
this.metadata = metadata;
}
|
java
|
@Override
public void addObserver(Observer observer) {
super.addObserver(observer);
if (countObservers() > 1) {
super.deleteObserver(observer);
AbstractConnectionFactoryService cfSvc = (AbstractConnectionFactoryService) observer;
Object[] params = new Object[] { CONNECTION_MANAGER, name, cfSvc.getConfigElementName() };
RuntimeException failure = connectorSvc.ignoreWarnOrFail(tc, null, UnsupportedOperationException.class, "CARDINALITY_ERROR_J2CA8040", params);
if (failure != null)
throw failure;
}
}
|
java
|
@Override
public void destroyConnectionFactories() {
final boolean trace = TraceComponent.isAnyTracingEnabled();
if (trace && tc.isEntryEnabled()) {
final String pmName;
if (pm != null)
pmName = pm.getUniqueId();
else
pmName = "factory name not avaiable";
Tr.entry(this, tc, "destroyConnectionFactories", pmName);
}
lock.writeLock().lock();
try {
if (pmMBean != null) {
pmMBean.unregister();
pmMBean = null;
}
if (pm != null) {
try {
pm.serverShutDown();
pm = null;
cfKeyToCM.clear();
} catch (Throwable x) {
FFDCFilter.processException(x, getClass().getName(), "263", this);
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
Tr.debug(this, tc, x.getMessage(), CommonFunction.stackTraceToString(x));
}
}
} finally {
lock.writeLock().unlock();
}
if (trace && tc.isEntryEnabled())
Tr.exit(this, tc, "destroyConnectionFactories");
}
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.