focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public int hashCode() { final int prime = 31; int result = prime + topicId.hashCode(); result = prime * result + topicPartition.hashCode(); return result; }
@Test public void testHashCode() { assertEquals(Objects.hash(topicIdPartition0.topicId(), topicIdPartition0.topicPartition()), topicIdPartition0.hashCode()); assertEquals(topicIdPartition0.hashCode(), topicIdPartition1.hashCode()); assertEquals(Objects.hash(topicIdPartitionWithNullTopic0.topicId(), new TopicPartition(null, partition1)), topicIdPartitionWithNullTopic0.hashCode()); assertEquals(topicIdPartitionWithNullTopic0.hashCode(), topicIdPartitionWithNullTopic1.hashCode()); assertNotEquals(topicIdPartition0.hashCode(), topicIdPartition2.hashCode()); assertNotEquals(topicIdPartition0.hashCode(), topicIdPartitionWithNullTopic0.hashCode()); assertNotEquals(topicIdPartitionWithNullTopic0.hashCode(), topicIdPartitionWithNullTopic2.hashCode()); }
public File getOrCreateDirectoryForTask(final TaskId taskId) { final File taskParentDir = getTaskDirectoryParentName(taskId); final File taskDir = new File(taskParentDir, StateManagerUtil.toTaskDirString(taskId)); if (hasPersistentStores) { if (!taskDir.exists()) { synchronized (taskDirCreationLock) { // to avoid a race condition, we need to check again if the directory does not exist: // otherwise, two threads might pass the outer `if` (and enter the `then` block), // one blocks on `synchronized` while the other creates the directory, // and the blocking one fails when trying to create it after it's unblocked if (!taskParentDir.exists() && !taskParentDir.mkdir()) { throw new ProcessorStateException( String.format("Parent [%s] of task directory [%s] doesn't exist and couldn't be created", taskParentDir.getPath(), taskDir.getPath())); } if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException( String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } } } else if (!taskDir.isDirectory()) { throw new ProcessorStateException( String.format("state directory [%s] can't be created as there is an existing file with the same name", taskDir.getPath())); } } return taskDir; }
@Test public void shouldCreateDirectoriesIfParentDoesntExist() { final File tempDir = TestUtils.tempDirectory(); final File stateDir = new File(new File(tempDir, "foo"), "state-dir"); final StateDirectory stateDirectory = new StateDirectory( new StreamsConfig(new Properties() { { put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId); put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234"); put(StreamsConfig.STATE_DIR_CONFIG, stateDir.getPath()); } }), time, true, false); final File taskDir = stateDirectory.getOrCreateDirectoryForTask(new TaskId(0, 0)); assertTrue(stateDir.exists()); assertTrue(taskDir.exists()); }
public static <T> T copyProperties(Object source, Class<T> tClass, String... ignoreProperties) { if (null == source) { return null; } T target = ReflectUtil.newInstanceIfPossible(tClass); copyProperties(source, target, CopyOptions.create().setIgnoreProperties(ignoreProperties)); return target; }
@Test public void copyNullTest() { assertNull(BeanUtil.copyProperties(null, Food.class)); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testExplainAnalyzeFormatJsonTypeDistributed() { analyze("EXPLAIN ANALYZE (format JSON, type DISTRIBUTED) SELECT * FROM t1"); }
@Override public List<ConfigInfo> removeConfigInfoByIds(final List<Long> ids, final String srcIp, final String srcUser) { if (CollectionUtils.isEmpty(ids)) { return null; } ids.removeAll(Collections.singleton(null)); return tjt.execute(new TransactionCallback<List<ConfigInfo>>() { final Timestamp time = new Timestamp(System.currentTimeMillis()); @Override public List<ConfigInfo> doInTransaction(TransactionStatus status) { try { String idsStr = StringUtils.join(ids, StringUtils.COMMA); List<ConfigInfo> configInfoList = findConfigInfosByIds(idsStr); if (!CollectionUtils.isEmpty(configInfoList)) { removeConfigInfoByIdsAtomic(idsStr); for (ConfigInfo configInfo : configInfoList) { removeTagByIdAtomic(configInfo.getId()); historyConfigInfoPersistService.insertConfigHistoryAtomic(configInfo.getId(), configInfo, srcIp, srcUser, time, "D"); } } return configInfoList; } catch (CannotGetJdbcConnectionException e) { LogUtil.FATAL_LOG.error("[db-error] " + e, e); throw e; } } }); }
@Test void testRemoveConfigInfoByIds() { //mock exist config info List<ConfigInfo> configInfos = new ArrayList<>(); configInfos.add(new ConfigInfo("data1", "group", "tenant", "app", "content")); configInfos.add(new ConfigInfo("data2", "grou2", "tenan2", "app2", "content2")); List<Long> deleteIds = Arrays.asList(12344L, 3456789L); configInfos.get(0).setId(12344L); configInfos.get(1).setId(3456789L); Mockito.when(jdbcTemplate.query(anyString(), eq(deleteIds.toArray()), eq(CONFIG_INFO_ROW_MAPPER))).thenReturn(configInfos); String srcIp = "srcIp1234"; String srcUser = "srcUser"; externalConfigInfoPersistService.removeConfigInfoByIds(deleteIds, srcIp, srcUser); //expect delete to be invoked Mockito.verify(jdbcTemplate, times(1)).update(anyString(), eq(deleteIds.get(0)), eq(deleteIds.get(1))); //expect delete tags to be invoked Mockito.verify(jdbcTemplate, times(1)).update(anyString(), eq(deleteIds.get(0))); Mockito.verify(jdbcTemplate, times(1)).update(anyString(), eq(deleteIds.get(1))); //expect insert delete history Mockito.verify(historyConfigInfoPersistService, times(1)) .insertConfigHistoryAtomic(eq(configInfos.get(0).getId()), eq(configInfos.get(0)), eq(srcIp), eq(srcUser), any(), eq("D")); Mockito.verify(historyConfigInfoPersistService, times(1)) .insertConfigHistoryAtomic(eq(configInfos.get(1).getId()), eq(configInfos.get(1)), eq(srcIp), eq(srcUser), any(), eq("D")); }
@Override public void start() { File dbHome = new File(getRequiredSetting(PATH_DATA.getKey())); if (!dbHome.exists()) { dbHome.mkdirs(); } startServer(dbHome); }
@Test public void start_fails_with_IAE_if_embedded_port_settings_is_not_set() throws IOException { settings .setProperty(PATH_DATA.getKey(), temporaryFolder.newFolder().getAbsolutePath()) .setProperty(JDBC_URL.getKey(), "jdbc url"); assertThatThrownBy(() -> underTest.start()) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Missing property " + JDBC_EMBEDDED_PORT.getKey()); }
@Override public MapperResult findConfigInfoBaseByGroupFetchRows(MapperContext context) { return new MapperResult( "SELECT id,data_id,group_id,content FROM config_info WHERE group_id=? " + "AND tenant_id=?" + " OFFSET " + context.getStartRow() + " ROWS FETCH NEXT " + context.getPageSize() + " ROWS ONLY", CollectionUtils.list(context.getWhereParameter(FieldConstant.GROUP_ID), context.getWhereParameter(FieldConstant.TENANT_ID))); }
@Test void testFindConfigInfoBaseByGroupFetchRows() { context.putWhereParameter(FieldConstant.GROUP_ID, groupId); MapperResult mapperResult = configInfoMapperByDerby.findConfigInfoBaseByGroupFetchRows(context); assertEquals(mapperResult.getSql(), "SELECT id,data_id,group_id,content FROM config_info WHERE group_id=? AND tenant_id=? " + "OFFSET " + startRow + " ROWS FETCH NEXT " + pageSize + " ROWS ONLY"); assertArrayEquals(new Object[] {groupId, tenantId}, mapperResult.getParamList().toArray()); }
@Override public boolean matchesType(Type t) { if (t.isPseudoType()) { return t.matchesType(this); } if (!t.isStructType()) { return false; } StructType rhsType = (StructType) t; if (fields.size() != rhsType.fields.size()) { return false; } for (int i = 0; i < fields.size(); ++i) { if (!fields.get(i).getType().matchesType(rhsType.fields.get(i).getType())) { return false; } if (!StringUtils.equalsIgnoreCase(fields.get(i).getName(), rhsType.fields.get(i).getName())) { return false; } } return true; }
@Test public void testTypeMatch() { Assert.assertTrue(Type.ANY_STRUCT.matchesType(Type.ANY_STRUCT)); Assert.assertTrue(Type.ANY_STRUCT.matchesType(Type.ANY_ELEMENT)); StructType structType = new StructType(Lists.newArrayList(Type.BIGINT, Type.DOUBLE)); Assert.assertTrue(Type.ANY_STRUCT.matchesType(structType)); }
@Override public double suspicionLevel(long timestamp) { return phi(timestamp); }
@Test public void nonSuspected_beforeFirstHeartbeat() { double suspicionLevel = failureDetector.suspicionLevel(Clock.currentTimeMillis()); assertEquals(0, suspicionLevel, 0d); }
public static Map<String, String[]> getQueryMap(String query) { Map<String, String[]> map = new HashMap<>(); String[] params = query.split(PARAM_CONCATENATE); for (String param : params) { String[] paramSplit = param.split("="); if (paramSplit.length == 0) { continue; // We found no key-/value-pair, so continue on the next param } String name = decodeQuery(paramSplit[0]); // hack for SOAP request (generally) if (name.trim().startsWith("<?")) { // $NON-NLS-1$ map.put(" ", new String[] {query}); //blank name // $NON-NLS-1$ return map; } // the post payload is not key=value if((param.startsWith("=") && paramSplit.length == 1) || paramSplit.length > 2) { map.put(" ", new String[] {query}); //blank name // $NON-NLS-1$ return map; } String value = ""; if(paramSplit.length>1) { value = decodeQuery(paramSplit[1]); } String[] known = map.get(name); if(known == null) { known = new String[] {value}; } else { String[] tmp = new String[known.length+1]; tmp[tmp.length-1] = value; System.arraycopy(known, 0, tmp, 0, known.length); known = tmp; } map.put(name, known); } return map; }
@Test void testGetQueryMapBug52491() { String query = "<envelope><header><context><conversationId>119</conversationId></context></header>" + "<body><call component=\"OrderTransfer\" method=\"getSourceManifestID\" id=\"2\">\n" + "<params></params><refs></refs></call></body></envelope>"; Map<String, String[]> params = RequestViewHTTP.getQueryMap(query); Assertions.assertNotNull(params); Assertions.assertEquals(1, params.size()); Map.Entry<String, String[]> param1 = params.entrySet().iterator().next(); Assertions.assertNotNull(param1); Assertions.assertEquals(1, param1.getValue().length); Assertions.assertEquals(query, param1.getValue()[0]); Assertions.assertTrue(StringUtils.isBlank(param1.getKey())); }
@VisibleForTesting static Optional<String> checkSchemas( final LogicalSchema schema, final LogicalSchema other ) { final Optional<String> keyError = checkSchemas(schema.key(), other.key(), "key ") .map(msg -> "Key columns must be identical. " + msg); if (keyError.isPresent()) { return keyError; } return checkSchemas(schema.columns(), other.columns(), ""); }
@Test public void shouldEnforceNoTypeChangeKey() { // Given: final LogicalSchema someSchema = LogicalSchema.builder() .keyColumn(ColumnName.of("k0"), SqlTypes.INTEGER) .valueColumn(ColumnName.of("f0"), SqlTypes.BIGINT) .build(); final LogicalSchema otherSchema = LogicalSchema.builder() .keyColumn(ColumnName.of("k0"), SqlTypes.STRING) .valueColumn(ColumnName.of("f0"), SqlTypes.BIGINT) .build(); // When: final Optional<String> s = StructuredDataSource.checkSchemas(someSchema, otherSchema); // Then: assertThat(s.isPresent(), is(true)); assertThat(s.get(), containsString("The following key columns are changed, missing or reordered: [`k0` INTEGER KEY]")); }
@Override public String getDescription() { return "Load duplications"; }
@Test public void verify_description() { assertThat(underTest.getDescription()).isEqualTo("Load duplications"); }
public void commit(final int index) { final int recordIndex = computeRecordIndex(index); final AtomicBuffer buffer = this.buffer; final int recordLength = verifyClaimedSpaceNotReleased(buffer, recordIndex); buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength); }
@Test void commitPublishesMessageByInvertingTheLengthValue() { final int index = 128; final int recordIndex = index - HEADER_LENGTH; final int recordLength = -19; when(buffer.getInt(lengthOffset(recordIndex))).thenReturn(recordLength); ringBuffer.commit(index); final InOrder inOrder = inOrder(buffer); inOrder.verify(buffer).getInt(lengthOffset(recordIndex)); inOrder.verify(buffer).putIntOrdered(lengthOffset(recordIndex), -recordLength); inOrder.verifyNoMoreInteractions(); }
public static Object convertValue(final Object value, final Class<?> convertType) throws SQLFeatureNotSupportedException { ShardingSpherePreconditions.checkNotNull(convertType, () -> new SQLFeatureNotSupportedException("Type can not be null")); if (null == value) { return convertNullValue(convertType); } if (value.getClass() == convertType) { return value; } if (value instanceof LocalDateTime) { return convertLocalDateTimeValue((LocalDateTime) value, convertType); } if (value instanceof Timestamp) { return convertTimestampValue((Timestamp) value, convertType); } if (URL.class.equals(convertType)) { return convertURL(value); } if (value instanceof Number) { return convertNumberValue(value, convertType); } if (value instanceof Date) { return convertDateValue((Date) value, convertType); } if (value instanceof byte[]) { return convertByteArrayValue((byte[]) value, convertType); } if (boolean.class.equals(convertType)) { return convertBooleanValue(value); } if (String.class.equals(convertType)) { return value.toString(); } try { return convertType.cast(value); } catch (final ClassCastException ignored) { throw new SQLFeatureNotSupportedException("getObject with type"); } }
@Test void assertConvertNumberValueSuccess() throws SQLException { assertThat(ResultSetUtils.convertValue("1", String.class), is("1")); assertTrue((boolean) ResultSetUtils.convertValue(1, boolean.class)); assertThat(ResultSetUtils.convertValue((byte) 1, byte.class), is((byte) 1)); assertThat(ResultSetUtils.convertValue((short) 1, short.class), is((short) 1)); assertThat(ResultSetUtils.convertValue(new BigDecimal("1"), int.class), is(1)); assertThat(ResultSetUtils.convertValue(new BigDecimal("1"), long.class), is(1L)); assertThat(ResultSetUtils.convertValue(new BigDecimal("1"), double.class), is(1.0D)); assertThat(ResultSetUtils.convertValue(new BigDecimal("1"), float.class), is(1.0F)); assertThat(ResultSetUtils.convertValue(new BigDecimal("1"), BigDecimal.class), is(new BigDecimal("1"))); assertThat(ResultSetUtils.convertValue((short) 1, BigDecimal.class), is(new BigDecimal("1"))); assertThat(ResultSetUtils.convertValue(new Date(0L), Date.class), is(new Date(0L))); assertThat(ResultSetUtils.convertValue((short) 1, Object.class), is(Short.valueOf("1"))); assertThat(ResultSetUtils.convertValue((short) 1, String.class), is("1")); assertThat(ResultSetUtils.convertValue(1, Byte.class), is(Byte.valueOf("1"))); assertThat(ResultSetUtils.convertValue(1, Short.class), is(Short.valueOf("1"))); assertThat(ResultSetUtils.convertValue(1, Long.class), is(Long.valueOf("1"))); assertThat(ResultSetUtils.convertValue(1, Double.class), is(Double.valueOf("1"))); assertThat(ResultSetUtils.convertValue(1, Float.class), is(Float.valueOf("1"))); }
@Override protected void write(final MySQLPacketPayload payload) { payload.writeInt1(HEADER); payload.writeInt2(errorCode); payload.writeStringFix(SQL_STATE_MARKER); payload.writeStringFix(sqlState); payload.writeStringEOF(errorMessage); }
@Test void assertWrite() { new MySQLErrPacket(new SQLException(MySQLVendorError.ER_NO_DB_ERROR.getReason(), MySQLVendorError.ER_NO_DB_ERROR.getSqlState().getValue(), MySQLVendorError.ER_NO_DB_ERROR.getVendorCode())).write(payload); verify(payload).writeInt1(MySQLErrPacket.HEADER); verify(payload).writeInt2(1046); verify(payload).writeStringFix("#"); verify(payload).writeStringFix("3D000"); verify(payload).writeStringEOF("No database selected"); }
<T extends PipelineOptions> T as(Class<T> iface) { checkNotNull(iface); checkArgument(iface.isInterface(), "Not an interface: %s", iface); T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { synchronized (this) { // double check existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { Registration<T> registration = PipelineOptionsFactory.CACHE .get() .validateWellFormed(iface, computedProperties.knownInterfaces); List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors(); Class<T> proxyClass = registration.getProxyClass(); existingOption = InstanceBuilder.ofType(proxyClass) .fromClass(proxyClass) .withArg(InvocationHandler.class, this) .build(); computedProperties = computedProperties.updated(iface, existingOption, propertyDescriptors); } } } return existingOption; }
@Test public void testAsSiblingRetainsSuperInterfaceValues() throws Exception { ProxyInvocationHandler handler = new ProxyInvocationHandler(Maps.newHashMap()); SubClass extended = handler.as(SubClass.class); extended.setString("parentValue"); Sibling sibling = extended.as(Sibling.class); assertEquals("parentValue", sibling.getString()); }
static KiePMMLApply getKiePMMLApply(final Apply apply) { final String invalidValueTreatment = apply.getInvalidValueTreatment() != null ? apply.getInvalidValueTreatment().value() : null; final List<KiePMMLExpression> kiePMMLExpressions = getKiePMMLExpressions(apply.getExpressions()); final KiePMMLApply.Builder builder = KiePMMLApply.builder(UUID.randomUUID().toString(), getKiePMMLExtensions(apply.getExtensions()), apply.getFunction()) .withKiePMMLExpressions(kiePMMLExpressions) .withMapMissingTo(apply.getMapMissingTo()) .withDefaultValue(apply.getDefaultValue()) .withInvalidValueTreatmentMethod(invalidValueTreatment); return builder.build(); }
@Test void getKiePMMLApply() { Apply toConvert = getRandomApply(); KiePMMLApply retrieved = KiePMMLApplyInstanceFactory.getKiePMMLApply(toConvert); commonVerifyKiePMMLApply(retrieved, toConvert); }
@Override public Collection<PiPacketOperation> mapOutboundPacket(OutboundPacket packet) throws PiInterpreterException { DeviceId deviceId = packet.sendThrough(); TrafficTreatment treatment = packet.treatment(); // fabric.p4 supports only OUTPUT instructions. List<Instructions.OutputInstruction> outInstructions = treatment .allInstructions() .stream() .filter(i -> i.type().equals(OUTPUT)) .map(i -> (Instructions.OutputInstruction) i) .collect(toList()); if (treatment.allInstructions().size() != outInstructions.size()) { // There are other instructions that are not of type OUTPUT. throw new PiInterpreterException("Treatment not supported: " + treatment); } ImmutableList.Builder<PiPacketOperation> builder = ImmutableList.builder(); for (Instructions.OutputInstruction outInst : outInstructions) { if (outInst.port().equals(TABLE)) { // Logical port. Forward using the switch tables like a regular packet. builder.add(createPiPacketOperation(packet.data(), -1, true)); } else if (outInst.port().equals(FLOOD)) { // Logical port. Create a packet operation for each switch port. final DeviceService deviceService = handler().get(DeviceService.class); for (Port port : deviceService.getPorts(packet.sendThrough())) { builder.add(createPiPacketOperation(packet.data(), port.number().toLong(), false)); } } else if (outInst.port().isLogical()) { throw new PiInterpreterException(format( "Output on logical port '%s' not supported", outInst.port())); } else { // Send as-is to given port bypassing all switch tables. builder.add(createPiPacketOperation(packet.data(), outInst.port().toLong(), false)); } } return builder.build(); }
@Test public void testMapOutboundPacketWithForwarding() throws Exception { PortNumber outputPort = PortNumber.TABLE; TrafficTreatment outputTreatment = DefaultTrafficTreatment.builder() .setOutput(outputPort) .build(); ByteBuffer data = ByteBuffer.allocate(64); OutboundPacket outPkt = new DefaultOutboundPacket(DEVICE_ID, outputTreatment, data); Collection<PiPacketOperation> result = interpreter.mapOutboundPacket(outPkt); assertEquals(result.size(), 1); ImmutableList.Builder<PiPacketMetadata> builder = ImmutableList.builder(); builder.add(PiPacketMetadata.builder() .withId(FabricConstants.DO_FORWARDING) .withValue(ImmutableByteSequence.copyFrom(1) .fit(1)) .build()); PiPacketOperation expectedPktOp = PiPacketOperation.builder() .withType(PiPacketOperationType.PACKET_OUT) .withData(ImmutableByteSequence.copyFrom(data)) .withMetadatas(builder.build()) .build(); assertEquals(expectedPktOp, result.iterator().next()); }
@Override public String toString() { return toStringHelper(getClass()) .add("icmpType", Byte.toString(icmpType)) .add("icmpCode", Byte.toString(icmpCode)) .add("checksum", Short.toString(checksum)) .toString(); }
@Test public void testToStringIcmp() throws Exception { ICMP icmp = deserializer.deserialize(headerBytes, 0, headerBytes.length); String str = icmp.toString(); assertTrue(StringUtils.contains(str, "icmpType=" + icmpType)); assertTrue(StringUtils.contains(str, "icmpCode=" + icmpCode)); assertTrue(StringUtils.contains(str, "checksum=" + checksum)); }
protected ValidationTaskResult loadHdfsConfig() { Pair<String, String> clientConfFiles = getHdfsConfPaths(); String coreConfPath = clientConfFiles.getFirst(); String hdfsConfPath = clientConfFiles.getSecond(); mCoreConf = accessAndParseConf("core-site.xml", coreConfPath); mHdfsConf = accessAndParseConf("hdfs-site.xml", hdfsConfPath); return new ValidationTaskResult(mState, getName(), mMsg.toString(), mAdvice.toString()); }
@Test public void missingBoth() { CONF.set(PropertyKey.UNDERFS_HDFS_CONFIGURATION, "/conf/"); HdfsConfValidationTask task = new HdfsConfValidationTask("hdfs://namenode:9000/alluxio", CONF); ValidationTaskResult result = task.loadHdfsConfig(); assertEquals(result.getState(), ValidationUtils.State.SKIPPED); assertThat(result.getResult(), containsString("hdfs-site.xml is not configured")); assertThat(result.getResult(), containsString("core-site.xml is not configured")); assertThat(result.getAdvice(), containsString("hdfs-site.xml")); assertThat(result.getAdvice(), containsString("core-site.xml")); }
@Override public Path extract(SPTEntry fwdEntry, SPTEntry bwdEntry, double weight) { if (fwdEntry == null || bwdEntry == null) { // path not found return path; } if (fwdEntry.adjNode != bwdEntry.adjNode) throw new IllegalStateException("forward and backward entries must have same adjacent nodes, fwdEntry:" + fwdEntry + ", bwdEntry:" + bwdEntry); StopWatch sw = new StopWatch().start(); extractFwdPath(fwdEntry); processMeetingPoint(fwdEntry, bwdEntry); extractBwdPath(bwdEntry); setExtractionTime(sw.stop().getNanos()); path.setFound(true); path.setWeight(weight); return path; }
@Test public void testExtract() { Graph graph = createGraph(); graph.edge(1, 2).setDistance(10).set(speedEnc, 60, 60); SPTEntry fwdEntry = new SPTEntry(0, 2, 0, new SPTEntry(1, 10)); SPTEntry bwdEntry = new SPTEntry(2, 0); Path p = DefaultBidirPathExtractor.extractPath(graph, new SpeedWeighting(speedEnc), fwdEntry, bwdEntry, 0); assertEquals(IntArrayList.from(1, 2), p.calcNodes()); assertEquals(10, p.getDistance(), 1e-4); }
@Override public boolean supportsSubqueriesInIns() { return false; }
@Test void assertSupportsSubqueriesInIns() { assertFalse(metaData.supportsSubqueriesInIns()); }
@Override public double readDouble() throws EOFException { if (availableLong() < 8) { throw new EOFException(); } double result = _dataBuffer.getDouble(_currentOffset); _currentOffset += 8; return result; }
@Test void testReadDouble() throws EOFException { double read = _dataBufferPinotInputStream.readDouble(); assertEquals(read, _byteBuffer.getDouble(0)); assertEquals(_dataBufferPinotInputStream.getCurrentOffset(), Double.BYTES); }
public QuotaCounts add(QuotaCounts that) { nsSsCounts = modify(nsSsCounts, ec -> ec.add(that.nsSsCounts)); tsCounts = modify(tsCounts, ec -> ec.add(that.tsCounts)); return this; }
@Test public void testAdd() throws Exception { QuotaCounts qc1 = new QuotaCounts.Builder().build(); QuotaCounts qc2 = new QuotaCounts.Builder().nameSpace(1).storageSpace(512) .typeSpaces(5).build(); qc1.add(qc2); assertEquals(1, qc1.getNameSpace()); assertEquals(512, qc1.getStorageSpace()); for (StorageType type : StorageType.values()) { assertEquals(5, qc1.getTypeSpace(type)); } }
@Override public UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options) { final KafkaFutureImpl<Void> future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call("unregisterBroker", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @Override UnregisterBrokerRequest.Builder createRequest(int timeoutMs) { UnregisterBrokerRequestData data = new UnregisterBrokerRequestData().setBrokerId(brokerId); return new UnregisterBrokerRequest.Builder(data); } @Override void handleResponse(AbstractResponse abstractResponse) { final UnregisterBrokerResponse response = (UnregisterBrokerResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); switch (error) { case NONE: future.complete(null); break; case REQUEST_TIMED_OUT: throw error.exception(); default: log.error("Unregister broker request for broker ID {} failed: {}", brokerId, error.message()); future.completeExceptionally(error.exception()); break; } } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }; runnable.call(call, now); return new UnregisterBrokerResult(future); }
@Test public void testUnregisterBrokerFailure() { int nodeId = 1; try (final AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions( NodeApiVersions.create(ApiKeys.UNREGISTER_BROKER.id, (short) 0, (short) 0)); env.kafkaClient().prepareResponse(prepareUnregisterBrokerResponse(Errors.UNKNOWN_SERVER_ERROR, 0)); UnregisterBrokerResult result = env.adminClient().unregisterBroker(nodeId); // Validate response assertNotNull(result.all()); TestUtils.assertFutureThrows(result.all(), Errors.UNKNOWN_SERVER_ERROR.exception().getClass()); } }
public static Metric metric(String name) { return MetricsImpl.metric(name, Unit.COUNT); }
@Test public void customUnit() { pipeline.readFrom(TestSources.items(0L, 1L, 2L, 3L, 4L)) .mapStateful(LongAccumulator::new, (acc, i) -> { acc.add(i); Metric metric = Metrics.metric("sum", Unit.COUNT); metric.set(acc.get()); return acc.get(); }) .writeTo(Sinks.noop()); Job job = runPipeline(pipeline.toDag()); JobMetricsChecker checker = new JobMetricsChecker(job); checker.assertSummedMetricValue("sum", 10); }
@Override public Response toResponse(Throwable exception) { debugLog(exception); if (exception instanceof WebApplicationException w) { var res = w.getResponse(); if (res.getStatus() >= 500) { log(w); } return res; } if (exception instanceof AuthenticationException) { return Response.status(Status.UNAUTHORIZED).build(); } if (exception instanceof ValidationException ve) { if (ve.seeOther() != null) { return Response.seeOther(ve.seeOther()).build(); } return buildContentNegotiatedErrorResponse(ve.localizedMessage(), Status.BAD_REQUEST); } // the remaining exceptions are unexpected, let's log them log(exception); if (exception instanceof FederationException fe) { var errorMessage = new Message(FEDERATION_ERROR_MESSAGE, fe.reason().name()); return buildContentNegotiatedErrorResponse(errorMessage, Status.INTERNAL_SERVER_ERROR); } var status = Status.INTERNAL_SERVER_ERROR; var errorMessage = new Message(SERVER_ERROR_MESSAGE, (String) null); return buildContentNegotiatedErrorResponse(errorMessage, status); }
@Test void toResponse_withJson() { when(headers.getAcceptableMediaTypes()) .thenReturn( List.of( MediaType.APPLICATION_JSON_TYPE, MediaType.TEXT_HTML_TYPE, MediaType.WILDCARD_TYPE)); var msg = "Ooops! An error :/"; // when var res = mapper.toResponse(new ValidationException(new Message(msg))); // then assertEquals(400, res.getStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE, res.getMediaType()); assertEquals(new Problem("/server_error", msg), res.getEntity()); }
private Set<TimelineEntity> getEntities(Path dir, String entityType, TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve) throws IOException { // First sort the selected entities based on created/start time. Map<Long, Set<TimelineEntity>> sortedEntities = new TreeMap<>( new Comparator<Long>() { @Override public int compare(Long l1, Long l2) { return l2.compareTo(l1); } } ); dir = getNormalPath(dir); if (dir != null) { RemoteIterator<LocatedFileStatus> fileStatuses = fs.listFiles(dir, false); if (fileStatuses != null) { while (fileStatuses.hasNext()) { LocatedFileStatus locatedFileStatus = fileStatuses.next(); Path entityFile = locatedFileStatus.getPath(); if (!entityFile.getName() .contains(TIMELINE_SERVICE_STORAGE_EXTENSION)) { continue; } try (BufferedReader reader = new BufferedReader( new InputStreamReader(fs.open(entityFile), StandardCharsets.UTF_8))) { TimelineEntity entity = readEntityFromFile(reader); if (!entity.getType().equals(entityType)) { continue; } if (!isTimeInRange(entity.getCreatedTime(), filters.getCreatedTimeBegin(), filters.getCreatedTimeEnd())) { continue; } if (filters.getRelatesTo() != null && !filters.getRelatesTo().getFilterList().isEmpty() && !TimelineStorageUtils.matchRelatesTo(entity, filters.getRelatesTo())) { continue; } if (filters.getIsRelatedTo() != null && !filters.getIsRelatedTo().getFilterList().isEmpty() && !TimelineStorageUtils.matchIsRelatedTo(entity, filters.getIsRelatedTo())) { continue; } if (filters.getInfoFilters() != null && !filters.getInfoFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchInfoFilters(entity, filters.getInfoFilters())) { continue; } if (filters.getConfigFilters() != null && !filters.getConfigFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchConfigFilters(entity, filters.getConfigFilters())) { continue; } if (filters.getMetricFilters() != null && !filters.getMetricFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchMetricFilters(entity, filters.getMetricFilters())) { continue; } if (filters.getEventFilters() != null && !filters.getEventFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchEventFilters(entity, filters.getEventFilters())) { continue; } TimelineEntity entityToBeReturned = createEntityToBeReturned( entity, dataToRetrieve.getFieldsToRetrieve()); Set<TimelineEntity> entitiesCreatedAtSameTime = sortedEntities.get(entityToBeReturned.getCreatedTime()); if (entitiesCreatedAtSameTime == null) { entitiesCreatedAtSameTime = new HashSet<TimelineEntity>(); } entitiesCreatedAtSameTime.add(entityToBeReturned); sortedEntities.put(entityToBeReturned.getCreatedTime(), entitiesCreatedAtSameTime); } } } } Set<TimelineEntity> entities = new HashSet<TimelineEntity>(); long entitiesAdded = 0; for (Set<TimelineEntity> entitySet : sortedEntities.values()) { for (TimelineEntity entity : entitySet) { entities.add(entity); ++entitiesAdded; if (entitiesAdded >= filters.getLimit()) { return entities; } } } return entities; }
@Test void testGetEntitiesWithLimit() throws Exception { Set<TimelineEntity> result = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().entityLimit(2L).build(), new TimelineDataToRetrieve()); assertEquals(2, result.size()); // Needs to be rewritten once hashcode and equals for // TimelineEntity is implemented // Entities with id_1 and id_4 should be returned, // based on created time, descending. for (TimelineEntity entity : result) { if (!entity.getId().equals("id_1") && !entity.getId().equals("id_4")) { fail("Entity not sorted by created time"); } } result = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().entityLimit(3L).build(), new TimelineDataToRetrieve()); // Even though 2 entities out of 4 have same created time, one entity // is left out due to limit assertEquals(3, result.size()); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } QueuePath other = (QueuePath) o; return Objects.equals(parent, other.parent) && Objects.equals(leaf, other.leaf); }
@Test public void testEquals() { QueuePath queuePath = new QueuePath(TEST_QUEUE); QueuePath queuePathSame = new QueuePath(TEST_QUEUE); QueuePath empty = new QueuePath(""); QueuePath emptySame = new QueuePath(""); Assert.assertEquals(queuePath, queuePathSame); Assert.assertEquals(empty, emptySame); Assert.assertNotEquals(null, queuePath); }
public byte[] metadata(String protocolName) { for (JoinGroupRequestProtocol protocol : supportedProtocols) { if (protocol.name().equals(protocolName)) { return protocol.metadata(); } } throw new IllegalArgumentException("Member does not support protocol " + protocolName); }
@Test public void testMetadataRaisesOnUnsupportedProtocol() { ClassicGroupMember member = new ClassicGroupMember( "member", Optional.of("group-instance-id"), "client-id", "client-host", 10, 4500, "generic", new JoinGroupRequestProtocolCollection(), EMPTY_ASSIGNMENT ); assertThrows(IllegalArgumentException.class, () -> member.metadata("unknown") ); }
@Nullable @Override public Message decode(@Nonnull RawMessage rawMessage) { final byte[] payload = rawMessage.getPayload(); final Map<String, Object> event; try { event = objectMapper.readValue(payload, TypeReferences.MAP_STRING_OBJECT); } catch (IOException e) { LOG.error("Couldn't decode raw message {}", rawMessage); return null; } return parseEvent(event); }
@Test public void decodeMessagesHandlesGenericBeatWithCloudDigitalOcean() throws Exception { final Message message = codec.decode(messageFromJson("generic-with-cloud-digital-ocean.json")); assertThat(message).isNotNull(); assertThat(message.getMessage()).isEqualTo("null"); assertThat(message.getSource()).isEqualTo("unknown"); assertThat(message.getTimestamp()).isEqualTo(new DateTime(2016, 4, 1, 0, 0, DateTimeZone.UTC)); assertThat(message.getField("facility")).isEqualTo("genericbeat"); assertThat(message.getField("beat_foo")).isEqualTo("bar"); assertThat(message.getField("beat_meta_cloud_provider")).isEqualTo("digitalocean"); assertThat(message.getField("beat_meta_cloud_instance_id")).isEqualTo("1234567"); assertThat(message.getField("beat_meta_cloud_region")).isEqualTo("nyc2"); }
static DynamicState stateMachineStep(DynamicState dynamicState, StaticState staticState) throws Exception { LOG.debug("STATE {}", dynamicState.state); switch (dynamicState.state) { case EMPTY: return handleEmpty(dynamicState, staticState); case RUNNING: return handleRunning(dynamicState, staticState); case WAITING_FOR_WORKER_START: return handleWaitingForWorkerStart(dynamicState, staticState); case KILL_BLOB_UPDATE: return handleKillBlobUpdate(dynamicState, staticState); case KILL_AND_RELAUNCH: return handleKillAndRelaunch(dynamicState, staticState); case KILL: return handleKill(dynamicState, staticState); case WAITING_FOR_BLOB_LOCALIZATION: return handleWaitingForBlobLocalization(dynamicState, staticState); case WAITING_FOR_BLOB_UPDATE: return handleWaitingForBlobUpdate(dynamicState, staticState); default: throw new IllegalStateException("Code not ready to handle a state of " + dynamicState.state); } }
@Test public void testRunningToEmpty() throws Exception { try (SimulatedTime ignored = new SimulatedTime(1010)) { int port = 8080; String cTopoId = "CURRENT"; List<ExecutorInfo> cExecList = mkExecutorInfoList(1, 2, 3, 4, 5); LocalAssignment cAssignment = mkLocalAssignment(cTopoId, cExecList, mkWorkerResources(100.0, 100.0, 100.0)); Container cContainer = mock(Container.class); LSWorkerHeartbeat chb = mkWorkerHB(cTopoId, port, cExecList, Time.currentTimeSecs()); when(cContainer.readHeartbeat()).thenReturn(chb); when(cContainer.areAllProcessesDead()).thenReturn(false, false, true); AsyncLocalizer localizer = mock(AsyncLocalizer.class); BlobChangingCallback cb = mock(BlobChangingCallback.class); ContainerLauncher containerLauncher = mock(ContainerLauncher.class); ISupervisor iSuper = mock(ISupervisor.class); LocalState state = mock(LocalState.class); SlotMetrics slotMetrics = new SlotMetrics(new StormMetricsRegistry()); StaticState staticState = new StaticState(localizer, 5000, 120000, 1000, 1000, containerLauncher, "localhost", port, iSuper, state, cb, null, null, slotMetrics); DynamicState dynamicState = new DynamicState(cAssignment, cContainer, null, slotMetrics); DynamicState nextState = Slot.stateMachineStep(dynamicState, staticState); assertEquals(MachineState.KILL, nextState.state); verify(cContainer).kill(); verify(localizer, never()).requestDownloadTopologyBlobs(null, port, cb); assertNull(nextState.pendingDownload, "pendingDownload not set properly"); assertNull(nextState.pendingLocalization); assertTrue(Time.currentTimeMillis() > 1000); nextState = Slot.stateMachineStep(nextState, staticState); assertEquals(MachineState.KILL, nextState.state); verify(cContainer).forceKill(); assertNull(nextState.pendingDownload, "pendingDownload not set properly"); assertNull(nextState.pendingLocalization); assertTrue(Time.currentTimeMillis() > 2000); nextState = Slot.stateMachineStep(nextState, staticState); assertEquals(MachineState.EMPTY, nextState.state); verify(cContainer).cleanUp(); verify(localizer).releaseSlotFor(cAssignment, port); assertNull(nextState.container); assertNull(nextState.currentAssignment); assertTrue(Time.currentTimeMillis() > 2000); nextState = Slot.stateMachineStep(nextState, staticState); assertEquals(MachineState.EMPTY, nextState.state); assertNull(nextState.container); assertNull(nextState.currentAssignment); assertTrue(Time.currentTimeMillis() > 3000); nextState = Slot.stateMachineStep(nextState, staticState); assertEquals(MachineState.EMPTY, nextState.state); assertNull(nextState.container); assertNull(nextState.currentAssignment); assertTrue(Time.currentTimeMillis() > 3000); } }
String getUserName() { return configuration.get(USER_NAME_ATTRIBUTE).orElseThrow(() -> new IllegalArgumentException("User name attribute is missing")); }
@Test public void return_user_name_attribute() { settings.setProperty("sonar.auth.saml.user.name", "userName"); assertThat(underTest.getUserName()).isEqualTo("userName"); }
public static <T extends PipelineOptions> T validate(Class<T> klass, PipelineOptions options) { return validate(klass, options, false); }
@Test public void testWhenOneOfMultipleRequiredGroupsIsSetIsValid() { MultiGroupRequired multiGroupRequired = PipelineOptionsFactory.as(MultiGroupRequired.class); multiGroupRequired.setRunner(CrashingRunner.class); multiGroupRequired.setFoo("eggs"); PipelineOptionsValidator.validate(MultiGroupRequired.class, multiGroupRequired); }
public static Format of(final FormatInfo formatInfo) { final Format format = fromName(formatInfo.getFormat().toUpperCase()); format.validateProperties(formatInfo.getProperties()); return format; }
@Test public void shouldThrowWhenCreatingFromUnsupportedProperty() { // Given: final FormatInfo format = FormatInfo.of("JSON", ImmutableMap.of("KEY_SCHEMA_ID", "1")); final FormatInfo kafkaFormat = FormatInfo.of("KAFKA", ImmutableMap.of("VALUE_SCHEMA_ID", "1")); final FormatInfo delimitedFormat = FormatInfo.of("delimited", ImmutableMap.of("KEY_SCHEMA_ID", "123")); // When: final Exception e = assertThrows( KsqlException.class, () -> FormatFactory.of(format) ); final Exception kafkaException = assertThrows( KsqlException.class, () -> FormatFactory.of(kafkaFormat) ); final Exception delimitedException = assertThrows( KsqlException.class, () -> FormatFactory.of(delimitedFormat) ); // Then: assertThat(e.getMessage(), containsString("JSON does not support the following configs: [KEY_SCHEMA_ID]")); assertThat(kafkaException.getMessage(), containsString("KAFKA does not support the following configs: [VALUE_SCHEMA_ID]")); assertThat(delimitedException.getMessage(), containsString("DELIMITED does not support the following configs: [KEY_SCHEMA_ID]")); }
public int capacity() { return capacity; }
@Test public void testCapacityIncreasesIfNeeded() { final OAHashSet<Integer> set = new OAHashSet<>(8, 0.9F); assertEquals(8, set.capacity()); populateSet(set, 10); assertEquals(16, set.capacity()); }
@Override public List<K> keys() { return delegate.keys(); }
@Test void keysDontIncludeBaggage() { assertThat(factory.get().keys()) .isEqualTo(B3Propagation.B3_STRING.keys()); }
public abstract int status(HttpServletResponse response);
@Test void servlet25_status_cached() { HttpServletResponseImpl response = new HttpServletResponseImpl(); assertThat(servlet25.status(response)) .isEqualTo(200); assertThat(servlet25.status(response)) .isEqualTo(200); }
@Override public Mono<Void> handle(@NonNull final ServerWebExchange exchange) { try { before(exchange); Mono<Void> execute = new DefaultShenyuPluginChain(plugins).execute(exchange); if (scheduled) { return execute.subscribeOn(scheduler); } return execute; } finally { after(exchange); } }
@Test public void handle() { final ServerWebExchange exchange = MockServerWebExchange.from(MockServerHttpRequest.get("localhost") .remoteAddress(new InetSocketAddress(8090)) .build()); exchange.getAttributes().put(Constants.CONTEXT, mock(ShenyuContext.class)); exchange.getAttributes().put(Constants.PARAM_TRANSFORM, "{key:value}"); Mono<Void> handle = shenyuWebHandler.handle(exchange); StepVerifier.create(handle).expectSubscription().verifyComplete(); }
JavaClasses getClassesToAnalyzeFor(Class<?> testClass, ClassAnalysisRequest classAnalysisRequest) { checkNotNull(testClass); checkNotNull(classAnalysisRequest); if (cachedByTest.containsKey(testClass)) { return cachedByTest.get(testClass); } LocationsKey locations = RequestedLocations.by(classAnalysisRequest, testClass).asKey(); JavaClasses classes = classAnalysisRequest.getCacheMode() == FOREVER ? cachedByLocations.getUnchecked(locations).get() : new LazyJavaClasses(locations.locations, locations.importOptionTypes).get(); cachedByTest.put(testClass, classes); return classes; }
@Test public void if_whole_classpath_is_set_true_then_the_whole_classpath_is_imported() { TestAnalysisRequest defaultOptions = new TestAnalysisRequest().withWholeClasspath(true); Class<?>[] expectedImportResult = new Class[]{getClass()}; doReturn(new ClassFileImporter().importClasses(expectedImportResult)) .when(cacheClassFileImporter).importClasses(anySet(), anyCollection()); JavaClasses classes = cache.getClassesToAnalyzeFor(TestClass.class, defaultOptions); assertThatTypes(classes).matchExactly(expectedImportResult); verify(cacheClassFileImporter).importClasses(anySet(), locationCaptor.capture()); assertThat(locationCaptor.getValue()) .has(locationContaining("archunit")) .has(locationContaining("asm")) .has(locationContaining("google")) .has(locationContaining("mockito")); }
public PickTableLayoutForPredicate pickTableLayoutForPredicate() { return new PickTableLayoutForPredicate(metadata); }
@Test public void ruleAddedTableLayoutToFilterTableScan() { Map<String, Domain> filterConstraint = ImmutableMap.<String, Domain>builder() .put("orderstatus", singleValue(createVarcharType(1), utf8Slice("F"))) .build(); tester().assertThat(pickTableLayout.pickTableLayoutForPredicate()) .on(p -> { p.variable("orderstatus", createVarcharType(1)); return p.filter(p.rowExpression("orderstatus = CAST ('F' AS VARCHAR(1))"), p.tableScan( ordersTableHandle, ImmutableList.of(p.variable("orderstatus", createVarcharType(1))), ImmutableMap.of(p.variable("orderstatus", createVarcharType(1)), new TpchColumnHandle("orderstatus", createVarcharType(1))))); }) .matches( constrainedTableScanWithTableLayout("orders", filterConstraint, ImmutableMap.of("orderstatus", "orderstatus"))); tester().assertThat(pickTableLayout.pickTableLayoutForPredicate()) .on(p -> { p.variable("orderstatus", createVarcharType(1)); return p.filter(p.rowExpression("orderstatus = CAST ('F' AS VARCHAR(1))"), p.tableScan( ordersTableHandle, ImmutableList.of(variable("orderstatus", createVarcharType(1))), ImmutableMap.of(variable("orderstatus", createVarcharType(1)), new TpchColumnHandle("orderstatus", createVarcharType(1))))); }) .matches( constrainedTableScanWithTableLayout("orders", filterConstraint, ImmutableMap.of("orderstatus", "orderstatus"))); }
@Override public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException { try { if(null != status.getModified()) { final DavResource resource = this.getResource(file); session.getClient().patch(new DAVPathEncoder().encode(file), this.getCustomProperties(resource, status.getModified()), Collections.emptyList(), this.getCustomHeaders(file, status)); status.setResponse(new DAVAttributesFinderFeature(session).toAttributes(resource).withModificationDate( Timestamp.toSeconds(status.getModified()))); } } catch(SardineException e) { throw new DAVExceptionMappingService().map("Failure to write attributes of {0}", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map(e, file); } }
@Test public void testSetTimestampFolderExplicitImplicit() throws Exception { final Path folder = new DAVDirectoryFeature(session).mkdir(new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); new DAVTimestampFeature(session).setTimestamp(folder, 5100L); assertEquals(5000L, new DAVAttributesFinderFeature(session).find(folder).getModificationDate()); assertEquals(5000L, new DefaultAttributesFinderFeature(session).find(folder).getModificationDate()); Thread.sleep(1000L); final Path file = new DAVTouchFeature(session).touch(new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); assertNotEquals(5000L, new DAVAttributesFinderFeature(session).find(folder).getModificationDate()); assertNotEquals(5000L, new DefaultAttributesFinderFeature(session).find(folder).getModificationDate()); new DAVDeleteFeature(session).delete(Arrays.asList(file, folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public ByteBuf duplicate() { ensureAccessible(); return new UnpooledDuplicatedByteBuf(this); }
@Test public void testRetainedDuplicateAfterReleaseDuplicate() { ByteBuf buf = newBuffer(1); ByteBuf buf2 = buf.duplicate(); assertRetainedDuplicateFailAfterRelease(buf, buf2); }
@Override public BeamSqlTable buildBeamSqlTable(Table table) { Schema schema = table.getSchema(); ObjectNode properties = table.getProperties(); Optional<ParsedLocation> parsedLocation = Optional.empty(); if (!Strings.isNullOrEmpty(table.getLocation())) { parsedLocation = Optional.of(parseLocation(checkArgumentNotNull(table.getLocation()))); } List<String> topics = mergeParam(parsedLocation.map(loc -> loc.topic), (ArrayNode) properties.get("topics")); List<String> allBootstrapServers = mergeParam( parsedLocation.map(loc -> loc.brokerLocation), (ArrayNode) properties.get("bootstrap_servers")); String bootstrapServers = String.join(",", allBootstrapServers); Optional<String> payloadFormat = properties.has("format") ? Optional.of(properties.get("format").asText()) : Optional.empty(); if (Schemas.isNestedSchema(schema)) { Optional<PayloadSerializer> serializer = payloadFormat.map( format -> PayloadSerializers.getSerializer( format, checkArgumentNotNull(schema.getField(PAYLOAD_FIELD).getType().getRowSchema()), TableUtils.convertNode2Map(properties))); return new NestedPayloadKafkaTable(schema, bootstrapServers, topics, serializer); } else { /* * CSV is handled separately because multiple rows can be produced from a single message, which * adds complexity to payload extraction. It remains here and as the default because it is the * historical default, but it will not be extended to support attaching extended attributes to * rows. */ if (payloadFormat.orElse("csv").equals("csv")) { return new BeamKafkaCSVTable(schema, bootstrapServers, topics); } PayloadSerializer serializer = PayloadSerializers.getSerializer( payloadFormat.get(), schema, TableUtils.convertNode2Map(properties)); return new PayloadSerializerKafkaTable(schema, bootstrapServers, topics, serializer); } }
@Test public void testBuildBeamSqlNestedBytesTable() { Table table = mockNestedBytesTable("hello"); BeamSqlTable sqlTable = provider.buildBeamSqlTable(table); assertNotNull(sqlTable); assertTrue(sqlTable instanceof NestedPayloadKafkaTable); BeamKafkaTable kafkaTable = (BeamKafkaTable) sqlTable; assertEquals(LOCATION_BROKER, kafkaTable.getBootstrapServers()); assertEquals(ImmutableList.of(LOCATION_TOPIC), kafkaTable.getTopics()); }
@Override public ConfigFileList responseMessageForConfigFiles(String responseBody) { ConfigFilesResponseMessage response = codec.getGson().fromJson(responseBody, ConfigFilesResponseMessage.class); return ConfigFileList.from(response.getFiles()); }
@Test public void shouldNotHaveErrorsWhenValidJSON() { assertFalse(handler.responseMessageForConfigFiles("{\"files\": []}").hasErrors()); }
@Override public DirectPipelineResult run(Pipeline pipeline) { try { options = MAPPER .readValue(MAPPER.writeValueAsBytes(options), PipelineOptions.class) .as(DirectOptions.class); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } performRewrites(pipeline); MetricsEnvironment.setMetricsSupported(true); try { DirectGraphVisitor graphVisitor = new DirectGraphVisitor(); pipeline.traverseTopologically(graphVisitor); @SuppressWarnings("rawtypes") KeyedPValueTrackingVisitor keyedPValueVisitor = KeyedPValueTrackingVisitor.create(); pipeline.traverseTopologically(keyedPValueVisitor); DisplayDataValidator.validatePipeline(pipeline); DisplayDataValidator.validateOptions(options); ExecutorService metricsPool = Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setThreadFactory(MoreExecutors.platformThreadFactory()) .setDaemon(false) // otherwise you say you want to leak, please don't! .setNameFormat("direct-metrics-counter-committer") .build()); DirectGraph graph = graphVisitor.getGraph(); EvaluationContext context = EvaluationContext.create( clockSupplier.get(), Enforcement.bundleFactoryFor(enabledEnforcements, graph), graph, keyedPValueVisitor.getKeyedPValues(), metricsPool); TransformEvaluatorRegistry registry = TransformEvaluatorRegistry.javaSdkNativeRegistry(context, options); PipelineExecutor executor = ExecutorServiceParallelExecutor.create( options.getTargetParallelism(), registry, Enforcement.defaultModelEnforcements(enabledEnforcements), context, metricsPool); executor.start(graph, RootProviderRegistry.javaNativeRegistry(context, options)); DirectPipelineResult result = new DirectPipelineResult(executor, context); if (options.isBlockOnRun()) { try { result.waitUntilFinish(); } catch (UserCodeException userException) { throw new PipelineExecutionException(userException.getCause()); } catch (Throwable t) { if (t instanceof RuntimeException) { throw (RuntimeException) t; } throw new RuntimeException(t); } } return result; } finally { MetricsEnvironment.setMetricsSupported(false); } }
@Test public void reusePipelineSucceeds() throws Throwable { Pipeline p = getPipeline(); changed = new AtomicInteger(0); PCollection<KV<String, Long>> counts = p.apply(Create.of("foo", "bar", "foo", "baz", "bar", "foo")) .apply( MapElements.via( new SimpleFunction<String, String>() { @Override public String apply(String input) { return input; } })) .apply(Count.perElement()); PCollection<String> countStrs = counts.apply( MapElements.via( new SimpleFunction<KV<String, Long>, String>() { @Override public String apply(KV<String, Long> input) { return String.format("%s: %s", input.getKey(), input.getValue()); } })); counts.apply( ParDo.of( new DoFn<KV<String, Long>, Void>() { @ProcessElement public void updateChanged(ProcessContext c) { changed.getAndIncrement(); } })); PAssert.that(countStrs).containsInAnyOrder("baz: 1", "bar: 2", "foo: 3"); DirectPipelineResult result = (DirectPipelineResult) p.run(); result.waitUntilFinish(); DirectPipelineResult otherResult = (DirectPipelineResult) p.run(); otherResult.waitUntilFinish(); assertThat("Each element should have been processed twice", changed.get(), equalTo(6)); }
@Override public PageResult<ArticleDO> getArticlePage(ArticlePageReqVO pageReqVO) { return articleMapper.selectPage(pageReqVO); }
@Test @Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解 public void testGetArticlePage() { // mock 数据 ArticleDO dbArticle = randomPojo(ArticleDO.class, o -> { // 等会查询到 o.setCategoryId(null); o.setTitle(null); o.setAuthor(null); o.setPicUrl(null); o.setIntroduction(null); o.setBrowseCount(null); o.setSort(null); o.setStatus(null); o.setSpuId(null); o.setRecommendHot(null); o.setRecommendBanner(null); o.setContent(null); o.setCreateTime(null); }); articleMapper.insert(dbArticle); // 测试 categoryId 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setCategoryId(null))); // 测试 title 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setTitle(null))); // 测试 author 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setAuthor(null))); // 测试 picUrl 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setPicUrl(null))); // 测试 introduction 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setIntroduction(null))); // 测试 browseCount 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setBrowseCount(null))); // 测试 sort 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setSort(null))); // 测试 status 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setStatus(null))); // 测试 spuId 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setSpuId(null))); // 测试 recommendHot 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setRecommendHot(null))); // 测试 recommendBanner 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setRecommendBanner(null))); // 测试 content 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setContent(null))); // 测试 createTime 不匹配 articleMapper.insert(cloneIgnoreId(dbArticle, o -> o.setCreateTime(null))); // 准备参数 ArticlePageReqVO reqVO = new ArticlePageReqVO(); reqVO.setCategoryId(null); reqVO.setTitle(null); reqVO.setAuthor(null); reqVO.setStatus(null); reqVO.setSpuId(null); reqVO.setRecommendHot(null); reqVO.setRecommendBanner(null); reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28)); // 调用 PageResult<ArticleDO> pageResult = articleService.getArticlePage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbArticle, pageResult.getList().get(0)); }
public boolean isCaseSensitive() { return caseSensitive; }
@Test public void testIsCaseSensitive() { PropertyType instance = new PropertyType(); assertFalse(instance.isCaseSensitive()); instance.setCaseSensitive(true); assertTrue(instance.isCaseSensitive()); }
@Override @SuppressFBWarnings(value = "EI_EXPOSE_REP") public KsqlConfig getKsqlConfig() { return ksqlConfig; }
@Test public void shouldMergeExistingConfigIfExists() { // Given: addPollResult(KafkaConfigStore.CONFIG_MSG_KEY, savedProperties); expectRead(consumerBefore); // When: final KsqlConfig mergedConfig = getKsqlConfig(); // Then: verifyMergedConfig(mergedConfig); }
@Override public boolean evaluate(Map<String, Object> values) { boolean toReturn = false; if (values.containsKey(name)) { logger.debug("found matching parameter, evaluating... "); toReturn = evaluation(values.get(name)); } return toReturn; }
@Test void evaluateStringIn() { ARRAY_TYPE arrayType = ARRAY_TYPE.STRING; List<Object> values = getObjects(arrayType, 4); KiePMMLSimpleSetPredicate kiePMMLSimpleSetPredicate = getKiePMMLSimpleSetPredicate(values, arrayType, IN_NOTIN.IN); Map<String, Object> inputData = new HashMap<>(); inputData.put("FAKE", "NOT"); assertThat(kiePMMLSimpleSetPredicate.evaluate(inputData)).isFalse(); inputData.put(SIMPLE_SET_PREDICATE_NAME, "NOT"); assertThat(kiePMMLSimpleSetPredicate.evaluate(inputData)).isFalse(); inputData.put(SIMPLE_SET_PREDICATE_NAME, values.get(0)); assertThat(kiePMMLSimpleSetPredicate.evaluate(inputData)).isTrue(); }
static AnnotatedClusterState generatedStateFrom(final Params params) { final ContentCluster cluster = params.cluster; final ClusterState workingState = ClusterState.emptyState(); final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>(); for (final NodeInfo nodeInfo : cluster.getNodeInfos()) { final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params, nodeStateReasons); workingState.setNodeState(nodeInfo.getNode(), nodeState); } takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params); final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params); if (reasonToBeDown.isPresent()) { workingState.setClusterState(State.DOWN); } workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params)); return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons); }
@Test void exceeded_crash_count_does_not_override_wanted_maintenance_state() { final ClusterFixture fixture = ClusterFixture.forFlatCluster(5) .bringEntireClusterUp() .proposeStorageNodeWantedState(1, State.MAINTENANCE); final ClusterStateGenerator.Params params = fixture.generatorParams().maxPrematureCrashes(10); final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1)); nodeInfo.setPrematureCrashCount(11); final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); assertThat(state.toString(), equalTo("distributor:5 storage:5 .1.s:m")); }
public <T> T fromXmlPartial(String partial, Class<T> o) throws Exception { return fromXmlPartial(toInputStream(partial, UTF_8), o); }
@Test void shouldLoadPipelineFromXmlPartial() throws Exception { String pipelineXmlPartial = """ <pipeline name="pipeline"> <materials> <hg url="/hgrepo"/> </materials> <stage name="mingle"> <jobs> <job name="functional"> <artifacts> <log src="artifact1.xml" dest="cruise-output" /> </artifacts> </job> </jobs> </stage> </pipeline> """; PipelineConfig pipeline = xmlLoader.fromXmlPartial(pipelineXmlPartial, PipelineConfig.class); assertThat(pipeline.name()).isEqualTo(new CaseInsensitiveString("pipeline")); assertThat(pipeline.size()).isEqualTo(1); assertThat(pipeline.findBy(new CaseInsensitiveString("mingle")).jobConfigByInstanceName("functional", true)).isNotNull(); }
static MetricsConfig create(String prefix) { return loadFirst(prefix, "hadoop-metrics2-" + StringUtils.toLowerCase(prefix) + ".properties", DEFAULT_FILE_NAME); }
@Test public void testMissingFiles() { MetricsConfig config = MetricsConfig.create("JobTracker", "non-existent.properties"); assertTrue(config.isEmpty()); }
public void execute(){ logger.debug("[" + getOperationName() + "] Starting execution of paged operation. maximum time: " + maxTime + ", maximum pages: " + maxPages); long startTime = System.currentTimeMillis(); long executionTime = 0; int i = 0; int exceptionsSwallowedCount = 0; int operationsCompleted = 0; Set<String> exceptionsSwallowedClasses = new HashSet<String>(); while (i< maxPages && executionTime < maxTime){ Collection<T> page = fetchPage(); if(page == null || page.size() == 0){ break; } for (T item : page) { try { doOperation(item); operationsCompleted++; } catch (Exception e){ if(swallowExceptions){ exceptionsSwallowedCount++; exceptionsSwallowedClasses.add(e.getClass().getName()); logger.debug("Swallowing exception " + e.getMessage(), e); } else { logger.debug("Rethrowing exception " + e.getMessage()); throw e; } } } i++; executionTime = System.currentTimeMillis() - startTime; } finalReport(operationsCompleted, exceptionsSwallowedCount, exceptionsSwallowedClasses); }
@Test(timeout = 1000L) @Ignore public void execute_nonzerotime(){ Long timeMillis = 200L; CountingPageOperation op = new CountingPageOperation(Integer.MAX_VALUE,timeMillis); op.execute(); assertFalse("last fetch time " + op.getTimeToLastFetch() + "" + " and previous fetch time " + op.getTimeToPreviousFetch() + " exceed max time" + timeMillis, op.getTimeToLastFetch() > timeMillis && op.getTimeToPreviousFetch() > timeMillis); }
@Override public void put(final Bytes key, final byte[] valueAndTimestamp) { wrapped().put(key, valueAndTimestamp); log(key, rawValue(valueAndTimestamp), valueAndTimestamp == null ? context.timestamp() : timestamp(valueAndTimestamp)); }
@Test public void shouldReturnValueOnGetWhenExists() { store.put(hello, rawWorld); assertThat(store.get(hello), equalTo(rawWorld)); }
@Override public boolean release() { return handleRelease(updater.release(this)); }
@Test public void testReleaseErrorMessage() { AbstractReferenceCountedByteBuf referenceCounted = newReferenceCounted(); assertTrue(referenceCounted.release()); try { referenceCounted.release(1); fail("IllegalReferenceCountException didn't occur"); } catch (IllegalReferenceCountException e) { assertEquals("refCnt: 0, decrement: 1", e.getMessage()); } }
public void registerDelaySuppliers(String tableName, String segmentName, String columnName, int partition, Supplier<Integer> numDocsDelaySupplier, Supplier<Long> timeMsDelaySupplier) { _lock.lock(); try { TableDelay tableDelay = _tableToPartitionToDelayMs.getOrDefault(tableName, new TableDelay(tableName)); tableDelay.registerDelaySuppliers(segmentName, columnName, partition, numDocsDelaySupplier, timeMsDelaySupplier); _tableToPartitionToDelayMs.put(tableName, tableDelay); } finally { _lock.unlock(); } }
@Test public void testRegistersGaugesPerPartition() { _realtimeLuceneIndexingDelayTracker.registerDelaySuppliers("table1", "segment1", "column1", 1, () -> 0, () -> 0L); _realtimeLuceneIndexingDelayTracker.registerDelaySuppliers("table1", "segment1", "column1", 2, () -> 0, () -> 0L); verify(_serverMetrics).setOrUpdatePartitionGauge(eq("table1"), eq(1), eq(ServerGauge.LUCENE_INDEXING_DELAY_DOCS), Mockito.any()); verify(_serverMetrics).setOrUpdatePartitionGauge(eq("table1"), eq(1), eq(ServerGauge.LUCENE_INDEXING_DELAY_MS), Mockito.any()); verify(_serverMetrics).setOrUpdatePartitionGauge(eq("table1"), eq(2), eq(ServerGauge.LUCENE_INDEXING_DELAY_DOCS), Mockito.any()); verify(_serverMetrics).setOrUpdatePartitionGauge(eq("table1"), eq(2), eq(ServerGauge.LUCENE_INDEXING_DELAY_MS), Mockito.any()); }
@Override public void handleRequest(RestRequest request, RequestContext requestContext, Callback<RestResponse> callback) { //This code path cannot accept content types or accept types that contain //multipart/related. This is because these types of requests will usually have very large payloads and therefore //would degrade server performance since RestRequest reads everything into memory. if (!isMultipart(request, requestContext, callback)) { _restRestLiServer.handleRequest(request, requestContext, callback); } }
@Test(dataProvider = "restOrStream") public void testInternalErrorMessage(final RestOrStream restOrStream) throws Exception { final StatusCollectionResource statusResource = getMockResource(StatusCollectionResource.class); EasyMock.expect(statusResource.get(eq(1L))).andThrow(new IllegalArgumentException("oops")).once(); replay(statusResource); Callback<RestResponse> restResponseCallback = new Callback<RestResponse>() { @Override public void onSuccess(RestResponse restResponse) { fail(); } @Override public void onError(Throwable e) { assertTrue(e instanceof RestException); RestException restException = (RestException) e; RestResponse restResponse = restException.getResponse(); try { ErrorResponse responseBody = DataMapUtils.read(restResponse.getEntity().asInputStream(), ErrorResponse.class, restResponse.getHeaders()); assertEquals(responseBody.getMessage(), ErrorResponseBuilder.DEFAULT_INTERNAL_ERROR_MESSAGE); EasyMock.verify(statusResource); EasyMock.reset(statusResource); } catch (Exception e2) { fail(e2.toString()); } } }; if (restOrStream == RestOrStream.REST) { RestRequest request = new RestRequestBuilder(new URI("/statuses/1")) .setHeader(RestConstants.HEADER_RESTLI_PROTOCOL_VERSION, AllProtocolVersions.BASELINE_PROTOCOL_VERSION.toString()).build(); _server.handleRequest(request, new RequestContext(), restResponseCallback); } else { StreamRequest streamRequest = new StreamRequestBuilder(new URI("/statuses/1")) .setHeader(RestConstants.HEADER_RESTLI_PROTOCOL_VERSION, AllProtocolVersions.BASELINE_PROTOCOL_VERSION.toString()) .build(EntityStreams.emptyStream()); Callback<StreamResponse> callback = new Callback<StreamResponse>() { @Override public void onSuccess(StreamResponse streamResponse) { fail(); } @Override public void onError(Throwable e) { Messages.toRestException((StreamException) e, new Callback<RestException>() { @Override public void onError(Throwable e) { Assert.fail(); } @Override public void onSuccess(RestException result) { restResponseCallback.onError(result); } }); } }; _server.handleRequest(streamRequest, new RequestContext(), callback); } }
@Override public SerializedMetaSpec getSerializedMetaSpec(String dbName, String tableName, long snapshotId, String serializedPredicate, MetadataTableType metadataTableType) { List<RemoteMetaSplit> remoteMetaSplits = new ArrayList<>(); IcebergTable icebergTable = (IcebergTable) getTable(dbName, tableName); org.apache.iceberg.Table nativeTable = icebergTable.getNativeTable(); if (snapshotId == -1) { Snapshot currentSnapshot = nativeTable.currentSnapshot(); if (currentSnapshot == null) { return IcebergMetaSpec.EMPTY; } else { snapshotId = nativeTable.currentSnapshot().snapshotId(); } } Snapshot snapshot = nativeTable.snapshot(snapshotId); Expression predicate = Expressions.alwaysTrue(); if (!Strings.isNullOrEmpty(serializedPredicate)) { predicate = SerializationUtil.deserializeFromBase64(serializedPredicate); } FileIO fileIO = nativeTable.io(); if (fileIO instanceof IcebergCachingFileIO) { fileIO = ((IcebergCachingFileIO) fileIO).getWrappedIO(); } String serializedTable = SerializationUtil.serializeToBase64(new SerializableTable(nativeTable, fileIO)); if (IcebergMetaSplit.onlyNeedSingleSplit(metadataTableType)) { return new IcebergMetaSpec(serializedTable, List.of(IcebergMetaSplit.placeholderSplit()), false); } List<ManifestFile> dataManifests = snapshot.dataManifests(nativeTable.io()); List<ManifestFile> matchingDataManifests = filterManifests(dataManifests, nativeTable, predicate); for (ManifestFile file : matchingDataManifests) { remoteMetaSplits.add(IcebergMetaSplit.from(file)); } List<ManifestFile> deleteManifests = snapshot.deleteManifests(nativeTable.io()); List<ManifestFile> matchingDeleteManifests = filterManifests(deleteManifests, nativeTable, predicate); if (metadataTableType == MetadataTableType.FILES || metadataTableType == MetadataTableType.PARTITIONS) { for (ManifestFile file : matchingDeleteManifests) { remoteMetaSplits.add(IcebergMetaSplit.from(file)); } return new IcebergMetaSpec(serializedTable, remoteMetaSplits, false); } boolean loadColumnStats = enableCollectColumnStatistics(ConnectContext.get()) || (!matchingDeleteManifests.isEmpty() && mayHaveEqualityDeletes(snapshot) && catalogProperties.enableDistributedPlanLoadColumnStatsWithEqDelete()); return new IcebergMetaSpec(serializedTable, remoteMetaSplits, loadColumnStats); }
@Test public void testGetMetaSpec(@Mocked LocalMetastore localMetastore, @Mocked TemporaryTableMgr temporaryTableMgr) { mockedNativeTableG.newAppend().appendFile(FILE_B_5).commit(); new MockUp<IcebergHiveCatalog>() { @Mock org.apache.iceberg.Table getTable(String dbName, String tableName) throws StarRocksConnectorException { return mockedNativeTableG; } }; IcebergHiveCatalog icebergHiveCatalog = new IcebergHiveCatalog(CATALOG_NAME, new Configuration(), DEFAULT_CONFIG); CachingIcebergCatalog cachingIcebergCatalog = new CachingIcebergCatalog( CATALOG_NAME, icebergHiveCatalog, DEFAULT_CATALOG_PROPERTIES, Executors.newSingleThreadExecutor()); IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, cachingIcebergCatalog, Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), new IcebergCatalogProperties(DEFAULT_CONFIG)); ConnectContext.get().getSessionVariable().setEnableIcebergColumnStatistics(false); MetadataMgr metadataMgr = new MetadataMgr(localMetastore, temporaryTableMgr, null, null); new MockUp<MetadataMgr>() { @Mock public Optional<ConnectorMetadata> getOptionalMetadata(String catalogName) { return Optional.of(metadata); } }; SerializedMetaSpec metaSpec = metadataMgr.getSerializedMetaSpec( "catalog", "db", "tg", -1, null, MetadataTableType.LOGICAL_ICEBERG_METADATA); Assert.assertTrue(metaSpec instanceof IcebergMetaSpec); IcebergMetaSpec icebergMetaSpec = metaSpec.cast(); List<RemoteMetaSplit> splits = icebergMetaSpec.getSplits(); Assert.assertFalse(icebergMetaSpec.loadColumnStats()); Assert.assertEquals(1, splits.size()); }
boolean shouldRetry(GetQueryExecutionResponse getQueryExecutionResponse) { String stateChangeReason = getQueryExecutionResponse.queryExecution().status().stateChangeReason(); if (this.retry.contains("never")) { LOG.trace("AWS Athena start query execution detected error ({}), marked as not retryable", stateChangeReason); return false; } if (this.retry.contains("always")) { LOG.trace("AWS Athena start query execution detected error ({}), marked as retryable", stateChangeReason); return true; } // Generic errors happen sometimes in Athena. It's possible that a retry will fix the problem. if (stateChangeReason != null && stateChangeReason.contains("GENERIC_INTERNAL_ERROR") && (this.retry.contains("generic") || this.retry.contains("retryable"))) { LOG.trace("AWS Athena start query execution detected generic error ({}), marked as retryable", stateChangeReason); return true; } // Resource exhaustion happens sometimes in Athena. It's possible that a retry will fix the problem. if (stateChangeReason != null && stateChangeReason.contains("exhausted resources at this scale factor") && (this.retry.contains("exhausted") || this.retry.contains("retryable"))) { LOG.trace("AWS Athena start query execution detected resource exhaustion error ({}), marked as retryable", stateChangeReason); return true; } return false; }
@Test void shouldRetryReturnsTrueWhenRetryIsAlways() { Athena2QueryHelper helper = athena2QueryHelperWithRetry("always"); assertTrue(helper.shouldRetry(newGetQueryExecutionResponse(QueryExecutionState.FAILED, null))); }
@Override public ConsumeMessageDirectlyResult consumeMessageDirectly(MessageExt msg, String brokerName) { ConsumeMessageDirectlyResult result = new ConsumeMessageDirectlyResult(); result.setOrder(false); result.setAutoCommit(true); List<MessageExt> msgs = new ArrayList<>(); msgs.add(msg); MessageQueue mq = new MessageQueue(); mq.setBrokerName(brokerName); mq.setTopic(msg.getTopic()); mq.setQueueId(msg.getQueueId()); ConsumeConcurrentlyContext context = new ConsumeConcurrentlyContext(mq); this.defaultMQPushConsumerImpl.resetRetryAndNamespace(msgs, this.consumerGroup); final long beginTime = System.currentTimeMillis(); log.info("consumeMessageDirectly receive new message: {}", msg); try { ConsumeConcurrentlyStatus status = this.messageListener.consumeMessage(msgs, context); if (status != null) { switch (status) { case CONSUME_SUCCESS: result.setConsumeResult(CMResult.CR_SUCCESS); break; case RECONSUME_LATER: result.setConsumeResult(CMResult.CR_LATER); break; default: break; } } else { result.setConsumeResult(CMResult.CR_RETURN_NULL); } } catch (Throwable e) { result.setConsumeResult(CMResult.CR_THROW_EXCEPTION); result.setRemark(UtilAll.exceptionSimpleDesc(e)); log.warn("consumeMessageDirectly exception: {} Group: {} Msgs: {} MQ: {}", UtilAll.exceptionSimpleDesc(e), ConsumeMessagePopConcurrentlyService.this.consumerGroup, msgs, mq, e); } result.setSpentTimeMills(System.currentTimeMillis() - beginTime); log.info("consumeMessageDirectly Result: {}", result); return result; }
@Test public void testConsumeMessageDirectlyWithCrThrowException() { when(messageListener.consumeMessage(any(), any(ConsumeConcurrentlyContext.class))).thenThrow(new RuntimeException("exception")); ConsumeMessageDirectlyResult actual = popService.consumeMessageDirectly(createMessageExt(), defaultBroker); assertEquals(CMResult.CR_THROW_EXCEPTION, actual.getConsumeResult()); }
@Override public boolean containsAll(Collection<?> c) { checkNotNull(c, "Collection cannot be internal."); for (Object item : c) { if (!items.contains(serializer.encode(item))) { return false; } } return true; }
@Test public void testContainsAll() throws Exception { //Test contains with short circuiting Set<Integer> integersToCheck = Sets.newHashSet(); fillSet(10, integersToCheck); fillSet(10, set); assertTrue("The sets should be identical so mutual subsets.", set.containsAll(integersToCheck)); set.remove(9); assertFalse("The set should contain one fewer value.", set.containsAll(integersToCheck)); }
public void collectWithoutErrors(List<JavaInformations> javaInformationsList) { assert javaInformationsList != null; final long start = System.currentTimeMillis(); try { estimatedMemorySize = collect(javaInformationsList); lastCollectorException = null; } catch (final Throwable t) { // NOPMD lastCollectorException = t; // include cause in message for debugging logs in the report LOG.warn("exception while collecting data: " + t, t); } // note : on n'inclue pas "new JavaInformations" de collectLocalContextWithoutErrors // dans la durée de la collecte mais il est inférieur à 1 ms (sans bdd) lastCollectDuration = Math.max(0, System.currentTimeMillis() - start); }
@Test public void testCollectWithoutErrors() throws IOException { final Counter counter = createCounter(); final Counter jspCounter = new Counter(Counter.JSP_COUNTER_NAME, null); final Counter strutsCounter = new Counter(Counter.STRUTS_COUNTER_NAME, null); final Counter jobCounter = new Counter(Counter.JOB_COUNTER_NAME, null); final Collector collector = new Collector(TEST, List.of(counter, jspCounter, strutsCounter, jobCounter)); if (collector.getCounters().isEmpty()) { fail("getCounters"); } counter.addRequest("test1", 0, 0, 0, false, 1000); counter.addRequest("test5", 10000, 200, 200, true, 10000); jspCounter.addRequest("test2", 0, 0, 0, false, 0); strutsCounter.addRequest("test3", 0, 0, 0, false, 0); jobCounter.addRequest("test4", 0, 0, 0, false, 0); collector.collectWithoutErrors(Collections.singletonList(new JavaInformations(null, true))); counter.addRequest("test2", 0, 0, 0, false, 1000); counter.addRequest("test3", 1000, 500, 500, false, 1000); counter.addRequest("test4", 10000, 200, 200, true, 10000); counter.addRequest("test5", 10000, 200, 200, true, 10000); counter.addRequest("test5", 10000, 200, 200, true, 10000); collector .collectWithoutErrors(Collections.singletonList(new JavaInformations(null, false))); final Counter buildsCounter = new Counter(Counter.BUILDS_COUNTER_NAME, null); new Collector(TEST, Collections.singletonList(buildsCounter)) .collectWithoutErrors(Collections.singletonList(new JavaInformations(null, false))); setProperty(Parameter.NO_DATABASE, "true"); try { new Collector(TEST, Collections.singletonList(counter)).collectWithoutErrors( Collections.singletonList(new JavaInformations(null, false))); } finally { setProperty(Parameter.NO_DATABASE, null); } if (collector.getLastCollectDuration() == 0) { fail("getLastCollectDuration"); } if (collector.getCounterJRobins().isEmpty()) { fail("getCounterJRobins"); } final Range range = Period.JOUR.getRange(); for (final JRobin jrobin : collector.getCounterJRobins()) { final JRobin robin = collector.getJRobin(jrobin.getName()); assertNotNull("getJRobin non null", robin); jrobin.graph(range, 80, 80); robin.deleteFile(); } for (final JRobin jrobin : collector.getOtherJRobins()) { final JRobin robin = collector.getJRobin(jrobin.getName()); assertNotNull("getJRobin non null", robin); robin.deleteFile(); } for (final CounterRequest request : counter.getRequests()) { final JRobin robin = collector.getJRobin(request.getId()); if ("test5".equals(request.getName())) { assertNotNull("getJRobin non null", robin); robin.deleteFile(); } else { assertNull("getJRobin null", robin); } } assertNull("getJRobin null", collector.getJRobin("n'importe quoi")); }
@Override public void execute(Exchange exchange) throws SmppException { byte[] message = getShortMessage(exchange.getIn()); ReplaceSm replaceSm = createReplaceSmTempate(exchange); replaceSm.setShortMessage(message); if (log.isDebugEnabled()) { log.debug("Sending replacement command for a short message for exchange id '{}' and message id '{}'", exchange.getExchangeId(), replaceSm.getMessageId()); } try { session.replaceShortMessage( replaceSm.getMessageId(), TypeOfNumber.valueOf(replaceSm.getSourceAddrTon()), NumberingPlanIndicator.valueOf(replaceSm.getSourceAddrNpi()), replaceSm.getSourceAddr(), replaceSm.getScheduleDeliveryTime(), replaceSm.getValidityPeriod(), new RegisteredDelivery(replaceSm.getRegisteredDelivery()), replaceSm.getSmDefaultMsgId(), replaceSm.getShortMessage()); } catch (Exception e) { throw new SmppException(e); } if (log.isDebugEnabled()) { log.debug("Sent replacement command for a short message for exchange id '{}' and message id '{}'", exchange.getExchangeId(), replaceSm.getMessageId()); } Message rspMsg = ExchangeHelper.getResultMessage(exchange); rspMsg.setHeader(SmppConstants.ID, replaceSm.getMessageId()); }
@Test public void bodyWithSmscDefaultDataCodingNarrowedToCharset() throws Exception { final int dataCoding = 0x00; /* SMSC-default */ byte[] body = { (byte) 0xFF, 'A', 'B', (byte) 0x00, (byte) 0xFF, (byte) 0x7F, 'C', (byte) 0xFF }; byte[] bodyNarrowed = { '?', 'A', 'B', '\0', '?', (byte) 0x7F, 'C', '?' }; Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut); exchange.getIn().setHeader(SmppConstants.COMMAND, "ReplaceSm"); exchange.getIn().setHeader(SmppConstants.DATA_CODING, dataCoding); exchange.getIn().setBody(body); command.execute(exchange); verify(session).replaceShortMessage((String) isNull(), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"), (String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)), eq((byte) 0), eq(bodyNarrowed)); }
@Override public List<RegisteredMigrationStep> readAll() { return steps; }
@Test public void readAll_iterates_over_all_steps_in_constructor_list_argument() { verifyContainsNumbers(underTest.readAll(), 1L, 2L, 8L); }
@Override protected Map<String, ConfigValue> validateSourceConnectorConfig(SourceConnector connector, ConfigDef configDef, Map<String, String> config) { Map<String, ConfigValue> result = super.validateSourceConnectorConfig(connector, configDef, config); validateSourceConnectorExactlyOnceSupport(config, result, connector); validateSourceConnectorTransactionBoundary(config, result, connector); return result; }
@Test public void testExactlyOnceSourceSupportValidationHandlesInvalidValuesGracefully() { herder = exactlyOnceHerder(); Map<String, String> config = new HashMap<>(); config.put(SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_CONFIG, "invalid"); SourceConnector connectorMock = mock(SourceConnector.class); Map<String, ConfigValue> validatedConfigs = herder.validateSourceConnectorConfig( connectorMock, SourceConnectorConfig.configDef(), config); List<String> errors = validatedConfigs.get(SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_CONFIG).errorMessages(); assertFalse(errors.isEmpty()); assertTrue( errors.get(0).contains("String must be one of (case insensitive): "), "Error message did not contain expected text: " + errors.get(0)); assertEquals(1, errors.size()); }
@Override public String getCatalogName(final int column) { Preconditions.checkArgument(1 == column); return ""; }
@Test void assertGetCatalogName() throws SQLException { assertThat(actualMetaData.getCatalogName(1), is("")); }
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners, String createdAt, String expiresAt, String description, String modificationEffect, Dimension... dimensions) { return define(UnboundBooleanFlag::new, flagId, defaultValue, owners, createdAt, expiresAt, description, modificationEffect, dimensions); }
@Test void testBoolean() { final boolean defaultValue = false; FlagSource source = mock(FlagSource.class); BooleanFlag booleanFlag = Flags.defineFeatureFlag("id", defaultValue, List.of("owner"), "1970-01-01", "2100-01-01", "description", "modification effect", Dimension.ZONE_ID, Dimension.HOSTNAME) .with(Dimension.ZONE_ID, "a-zone") .bindTo(source); assertThat(booleanFlag.id().toString(), equalTo("id")); when(source.fetch(eq(new FlagId("id")), any())).thenReturn(Optional.empty()); // default value without raw flag assertThat(booleanFlag.value(), equalTo(defaultValue)); ArgumentCaptor<FetchVector> vector = ArgumentCaptor.forClass(FetchVector.class); verify(source).fetch(any(), vector.capture()); // hostname is set by default Optional<String> hostname = vector.getValue().getValue(Dimension.HOSTNAME); assertTrue(hostname.isPresent()); assertFalse(hostname.get().isEmpty()); // zone is set because it was set on the unbound flag above assertThat(vector.getValue().getValue(Dimension.ZONE_ID), is(Optional.of("a-zone"))); // application and node type are not set assertThat(vector.getValue().getValue(Dimension.INSTANCE_ID), is(Optional.empty())); assertThat(vector.getValue().getValue(Dimension.NODE_TYPE), is(Optional.empty())); RawFlag rawFlag = mock(RawFlag.class); when(source.fetch(eq(new FlagId("id")), any())).thenReturn(Optional.of(rawFlag)); when(rawFlag.asJsonNode()).thenReturn(BooleanNode.getTrue()); // raw flag deserializes to true assertThat(booleanFlag.with(Dimension.INSTANCE_ID, "an-app").value(), equalTo(true)); verify(source, times(2)).fetch(any(), vector.capture()); // application was set on the (bound) flag. assertThat(vector.getValue().getValue(Dimension.INSTANCE_ID), is(Optional.of("an-app"))); }
@Override public boolean onTouchEvent(@NonNull MotionEvent me) { if (getMiniKeyboard() != null && mMiniKeyboardPopup.isShowing()) { final int miniKeyboardX = (int) me.getX(); final int miniKeyboardY = (int) me.getY(); final int action = MotionEventCompat.getActionMasked(me); MotionEvent translated = generateMiniKeyboardMotionEvent(action, miniKeyboardX, miniKeyboardY, me.getEventTime()); getMiniKeyboard().onTouchEvent(translated); translated.recycle(); return true; } return super.onTouchEvent(me); }
@Test public void testLongPressKeyPressStateWithPopupCharacters() { final AnyKeyboard.AnyKey key = findKey('w'); Assert.assertTrue(key.popupCharacters.length() > 0); KeyDrawableStateProvider provider = new KeyDrawableStateProvider( R.attr.key_type_function, R.attr.key_type_action, R.attr.action_done, R.attr.action_search, R.attr.action_go); Assert.assertArrayEquals(provider.KEY_STATE_NORMAL, key.getCurrentDrawableState(provider)); Point keyPoint = ViewTestUtils.getKeyCenterPoint(key); ViewTestUtils.navigateFromTo(mViewUnderTest, keyPoint, keyPoint, 400, true, false); Assert.assertArrayEquals(provider.KEY_STATE_PRESSED, key.getCurrentDrawableState(provider)); mViewUnderTest.onTouchEvent( MotionEvent.obtain( SystemClock.uptimeMillis(), SystemClock.uptimeMillis(), MotionEvent.ACTION_UP, keyPoint.x, keyPoint.y, 0)); Assert.assertArrayEquals(provider.KEY_STATE_NORMAL, key.getCurrentDrawableState(provider)); }
public void getFields( RowMetaInterface r, String name, RowMetaInterface[] info, StepMeta nextStep, VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { }
@Test public void testGetFields() throws KettleStepException { SalesforceDeleteMeta meta = new SalesforceDeleteMeta(); meta.setDefault(); RowMetaInterface r = new RowMeta(); meta.getFields( r, "thisStep", null, null, new Variables(), null, null ); assertEquals( 0, r.size() ); r.clear(); r.addValueMeta( new ValueMetaString( "testString" ) ); meta.getFields( r, "thisStep", null, null, new Variables(), null, null ); assertEquals( 1, r.size() ); assertEquals( ValueMetaInterface.TYPE_STRING, r.getValueMeta( 0 ).getType() ); assertEquals( "testString", r.getValueMeta( 0 ).getName() ); }
public CompletableFuture<Void> handlePullQuery( final ServiceContext serviceContext, final PullPhysicalPlan pullPhysicalPlan, final ConfiguredStatement<Query> statement, final RoutingOptions routingOptions, final PullQueryWriteStream pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests ) { final List<KsqlPartitionLocation> allLocations = pullPhysicalPlan.getMaterialization().locator() .locate( pullPhysicalPlan.getKeys(), routingOptions, routingFilterFactory, pullPhysicalPlan.getPlanType() == PullPhysicalPlanType.RANGE_SCAN ); final Map<Integer, List<Host>> emptyPartitions = allLocations.stream() .filter(loc -> loc.getNodes().stream().noneMatch(node -> node.getHost().isSelected())) .collect(Collectors.toMap( KsqlPartitionLocation::getPartition, loc -> loc.getNodes().stream().map(KsqlNode::getHost).collect(Collectors.toList()))); if (!emptyPartitions.isEmpty()) { final MaterializationException materializationException = new MaterializationException( "Unable to execute pull query. " + emptyPartitions.entrySet() .stream() .map(kv -> String.format( "Partition %s failed to find valid host. Hosts scanned: %s", kv.getKey(), kv.getValue())) .collect(Collectors.joining(", ", "[", "]"))); LOG.debug(materializationException.getMessage()); throw materializationException; } // at this point we should filter out the hosts that we should not route to final List<KsqlPartitionLocation> locations = allLocations .stream() .map(KsqlPartitionLocation::removeFilteredHosts) .collect(Collectors.toList()); final CompletableFuture<Void> completableFuture = new CompletableFuture<>(); coordinatorExecutorService.submit(() -> { try { executeRounds(serviceContext, pullPhysicalPlan, statement, routingOptions, locations, pullQueryQueue, shouldCancelRequests); completableFuture.complete(null); } catch (Throwable t) { completableFuture.completeExceptionally(t); } }); return completableFuture; }
@Test public void shouldCallRouteQuery_success() throws InterruptedException, ExecutionException { // Given: locate(location1, location2, location3, location4); doAnswer(i -> { final PullQueryWriteStream queue = i.getArgument(1); queue.write(ImmutableList.of( StreamedRow.header(queryId, logicalSchema), PQ_ROW1 )); return null; }).when(pullPhysicalPlan).execute(eq(ImmutableList.of(location1, location3)), any(), any()); when(ksqlClient.makeQueryRequest(eq(node2.location()), any(), any(), any(), any(), any(), any())) .thenAnswer(i -> { Map<String, ?> requestProperties = i.getArgument(3); PullQueryWriteStream rowConsumer = i.getArgument(4); assertThat(requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_PARTITIONS), is ("2,4")); rowConsumer.write( ImmutableList.of( StreamedRow.header(queryId, logicalSchema), StreamedRow.pullRow(GenericRow.fromList(ROW2), Optional.empty()))); return RestResponse.successful(200, 2); } ); // When: CompletableFuture<Void> future = haRouting.handlePullQuery( serviceContext, pullPhysicalPlan, statement, routingOptions, pullQueryQueue, disconnect); future.get(); // Then: verify(pullPhysicalPlan).execute(eq(ImmutableList.of(location1, location3)), any(), any()); assertThat(pullQueryQueue.size(), is(2)); assertThat(pullQueryQueue.pollRow(1, TimeUnit.SECONDS).getRow(), is(ROW1)); assertThat(pullQueryQueue.pollRow(1, TimeUnit.SECONDS).getRow(), is(ROW2)); }
public void createTask(CreateTaskRequest request) throws Throwable { taskManager.createTask(request.id(), request.spec()); }
@Test public void testCreateTask() throws Exception { MockTime time = new MockTime(0, 0, 0); Scheduler scheduler = new MockScheduler(time); try (MiniTrogdorCluster cluster = new MiniTrogdorCluster.Builder(). addCoordinator("node01"). addAgent("node02"). scheduler(scheduler). build()) { new ExpectedTasks().waitFor(cluster.coordinatorClient()); NoOpTaskSpec fooSpec = new NoOpTaskSpec(1, 2); cluster.coordinatorClient().createTask( new CreateTaskRequest("foo", fooSpec)); new ExpectedTasks(). addTask(new ExpectedTaskBuilder("foo"). taskState(new TaskPending(fooSpec)). build()). waitFor(cluster.coordinatorClient()); // Re-creating a task with the same arguments is not an error. cluster.coordinatorClient().createTask( new CreateTaskRequest("foo", fooSpec)); // Re-creating a task with different arguments gives a RequestConflictException. NoOpTaskSpec barSpec = new NoOpTaskSpec(1000, 2000); assertThrows(RequestConflictException.class, () -> cluster.coordinatorClient().createTask( new CreateTaskRequest("foo", barSpec)), "Recreating task with different task spec is not allowed"); time.sleep(2); new ExpectedTasks(). addTask(new ExpectedTaskBuilder("foo"). taskState(new TaskRunning(fooSpec, 2, new TextNode("active"))). workerState(new WorkerRunning("foo", fooSpec, 2, new TextNode("active"))). build()). waitFor(cluster.coordinatorClient()). waitFor(cluster.agentClient("node02")); time.sleep(3); new ExpectedTasks(). addTask(new ExpectedTaskBuilder("foo"). taskState(new TaskDone(fooSpec, 2, 5, "", false, new TextNode("done"))). build()). waitFor(cluster.coordinatorClient()); } }
public Optional<String> clusterId() { for (MetaProperties metaProps : logDirProps.values()) { if (metaProps.clusterId().isPresent()) { return metaProps.clusterId(); } } return Optional.empty(); }
@Test public void testClusterIdForFoo() { assertEquals(Optional.of("fooClusterId"), FOO.clusterId()); }
public static boolean equals(CharSequence s1, CharSequence s2) { return s1 == null ? s2 == null : s1.equals(s2); }
@Test public void testEquals() { Assert.assertTrue(StringUtils.equals(null, null)); Assert.assertFalse(StringUtils.equals(null, "")); Assert.assertFalse(StringUtils.equals("", null)); Assert.assertTrue(StringUtils.equals("", "")); Assert.assertFalse(StringUtils.equals("1", "2")); Assert.assertTrue(StringUtils.equals("1", "1")); }
protected boolean isLoggerSafe(ILoggingEvent event) { for (String safeLogger : SAFE_LOGGERS) { if (event.getLoggerName().startsWith(safeLogger)) { return true; } } return false; }
@Test void isLoggerSafeShouldReturnFalseWhenLoggerNameDoesNotStartWithSafeLogger() { ILoggingEvent event = mock(ILoggingEvent.class); when(event.getLoggerName()).thenReturn("com.mycompany.myapp.example.Logger"); CRLFLogConverter converter = new CRLFLogConverter(); boolean result = converter.isLoggerSafe(event); assertFalse(result); }
@Override public void onMsg(TbContext ctx, TbMsg msg) throws ExecutionException, InterruptedException, TbNodeException { var originator = msg.getOriginator(); var msgDataAsObjectNode = TbMsgSource.DATA.equals(fetchTo) ? getMsgDataAsObjectNode(msg) : null; if (!EntityType.DEVICE.equals(originator.getEntityType())) { ctx.tellFailure(msg, new RuntimeException("Unsupported originator type: " + originator.getEntityType() + "!")); return; } var deviceId = new DeviceId(msg.getOriginator().getId()); var deviceCredentials = ctx.getDeviceCredentialsService().findDeviceCredentialsByDeviceId(ctx.getTenantId(), deviceId); if (deviceCredentials == null) { ctx.tellFailure(msg, new RuntimeException("Failed to get Device Credentials for device: " + deviceId + "!")); return; } var credentialsType = deviceCredentials.getCredentialsType(); var credentialsInfo = ctx.getDeviceCredentialsService().toCredentialsInfo(deviceCredentials); var metaData = msg.getMetaData().copy(); if (TbMsgSource.METADATA.equals(fetchTo)) { metaData.putValue(CREDENTIALS_TYPE, credentialsType.name()); if (credentialsType.equals(DeviceCredentialsType.ACCESS_TOKEN) || credentialsType.equals(DeviceCredentialsType.X509_CERTIFICATE)) { metaData.putValue(CREDENTIALS, credentialsInfo.asText()); } else { metaData.putValue(CREDENTIALS, JacksonUtil.toString(credentialsInfo)); } } else if (TbMsgSource.DATA.equals(fetchTo)) { msgDataAsObjectNode.put(CREDENTIALS_TYPE, credentialsType.name()); msgDataAsObjectNode.set(CREDENTIALS, credentialsInfo); } TbMsg transformedMsg = transformMessage(msg, msgDataAsObjectNode, metaData); ctx.tellSuccess(transformedMsg); }
@Test void givenValidMsg_whenOnMsg_thenVerifyOutput() throws Exception { // GIVEN doReturn(deviceCredentialsServiceMock).when(ctxMock).getDeviceCredentialsService(); doAnswer(invocation -> { DeviceCredentials deviceCredentials = new DeviceCredentials(); deviceCredentials.setCredentialsType(DeviceCredentialsType.ACCESS_TOKEN); return deviceCredentials; }).when(deviceCredentialsServiceMock).findDeviceCredentialsByDeviceId(any(), any()); doAnswer(invocation -> JacksonUtil.newObjectNode()).when(deviceCredentialsServiceMock).toCredentialsInfo(any()); // WHEN node.onMsg(ctxMock, getTbMsg(deviceId)); // THEN var newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class); verify(ctxMock, times(1)).tellSuccess(newMsgCaptor.capture()); verify(ctxMock, never()).tellFailure(any(), any()); verify(deviceCredentialsServiceMock, times(1)).findDeviceCredentialsByDeviceId(any(), any()); var newMsg = newMsgCaptor.getValue(); assertThat(newMsg).isNotNull(); assertThat(newMsg.getMetaData().getData().containsKey("credentials")).isEqualTo(true); assertThat(newMsg.getMetaData().getData().containsKey("credentialsType")).isEqualTo(true); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; // This handles a tombstone message if (value == null) { return SchemaAndValue.NULL; } try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); // The deserialized data should either be an envelope object containing the schema and the payload or the schema // was stripped during serialization and we need to fill in an all-encompassing schema. if (!config.schemasEnabled()) { ObjectNode envelope = JSON_NODE_FACTORY.objectNode(); envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null); envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue); jsonValue = envelope; } Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); return new SchemaAndValue( schema, convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME), config) ); }
@Test public void numericDecimalToConnect() { BigDecimal reference = new BigDecimal(new BigInteger("156"), 2); Schema schema = Decimal.schema(2); String msg = "{ \"schema\": { \"type\": \"bytes\", \"name\": \"org.apache.kafka.connect.data.Decimal\", \"version\": 1, \"parameters\": { \"scale\": \"2\" } }, \"payload\": 1.56 }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, schemaAndValue.value()); }
public static byte[] asciiCharToBytes(char[] chars) { byte[] bytes = new byte[chars.length]; for (int i = 0; i < chars.length; i++) { bytes[i] = (byte) chars[i]; chars[i] = '\0'; } return bytes; }
@Test public void testAsciiCharToBytes() { char[] asciiChars = asciiString.toCharArray(); byte[] asciiBytes = SecretStoreUtil.asciiCharToBytes(asciiChars); assertThat(asciiBytes).isEqualTo(asciiString.getBytes(StandardCharsets.US_ASCII)); assertThat(asciiChars).contains('\0'); }
public void checkAllowedNamespace(String tenant, String namespace, String fromTenant, String fromNamespace) { if (!isAllowedNamespace(tenant, namespace, fromTenant, fromNamespace)) { throw new IllegalArgumentException("Namespace " + namespace + " is not allowed."); } }
@Test void checkAllowedNamespace() { flowService.checkAllowedNamespace("tenant", "namespace", "fromTenant", "fromNamespace"); }
@Override public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws IOException { myFs.renameSnapshot(fullPath(path), snapshotOldName, snapshotNewName); }
@Test(timeout = 30000) public void testRenameSnapshot() throws Exception { Path snapRootPath = new Path("/snapPath"); Path chRootedSnapRootPath = new Path( Path.getPathWithoutSchemeAndAuthority(chrootedTo), "snapPath"); AbstractFileSystem baseFs = Mockito.spy(fc.getDefaultFileSystem()); ChRootedFs chRootedFs = new ChRootedFs(baseFs, chrootedTo); Mockito.doNothing().when(baseFs) .renameSnapshot(chRootedSnapRootPath, "snapOldName", "snapNewName"); chRootedFs.renameSnapshot(snapRootPath, "snapOldName", "snapNewName"); Mockito.verify(baseFs).renameSnapshot(chRootedSnapRootPath, "snapOldName", "snapNewName"); }
public static void bindEnvironment(ScriptEngine engine, String requestContent, Map<String, Object> requestContext, StateStore stateStore) { // Build a map of header values. bindEnvironment(engine, requestContent, requestContext, stateStore, null); }
@Test void testStateStoreIsBoundAndAccessed() { String script = """ def foo = store.get("foo"); def bar = store.put("bar", "barValue"); store.delete("baz"); return foo; """; StateStore store = new StateStore() { private final Map<String, String> map = new HashMap<>(); @Override public void put(String key, String value) { map.put(key, value); } @Override public void put(String key, String value, int secondsTTL) { map.put(key, value); } @Nullable @Override public String get(String key) { return map.get(key); } @Override public void delete(String key) { map.remove(key); } }; ScriptEngineManager sem = new ScriptEngineManager(); Map<String, Object> context = new HashMap<>(); store.put("foo", "fooValue"); store.put("baz", "bazValue"); try { // Evaluating request with script coming from operation dispatcher rules. ScriptEngine se = sem.getEngineByExtension("groovy"); ScriptEngineBinder.bindEnvironment(se, "body", context, store); String result = (String) se.eval(script); assertEquals("fooValue", result); assertEquals("barValue", store.get("bar")); assertNull(store.get("baz")); } catch (Exception e) { fail("Exception should no be thrown"); } }
public DataSource<T> loadDataSource(Path csvPath, String responseName) throws IOException { return loadDataSource(csvPath, Collections.singleton(responseName)); }
@Test public void testLoadSingleOutputAsMultiOutput() throws IOException { URL path = CSVLoader.class.getResource("/org/tribuo/data/csv/test.csv"); CSVLoader<MockMultiOutput> loader = new CSVLoader<>(new MockMultiOutputFactory()); DataSource<MockMultiOutput> source = loader.loadDataSource(path, "RESPONSE"); MutableDataset<MockMultiOutput> data = new MutableDataset<>(source); assertEquals(6, data.size()); assertEquals("monkey", data.getExample(0).getOutput().getLabelString()); assertEquals("monkey", data.getExample(1).getOutput().getLabelString()); assertEquals("baboon", data.getExample(2).getOutput().getLabelString()); for (Example<MockMultiOutput> x : data.getData()) { assertEquals(4, x.size()); } //assertThrows(IllegalArgumentException.class, () -> loader.load(path, "RESPONSE")); }
@Override boolean removeStorage(DatanodeStorageInfo storage) { int dnIndex = findStorageInfoFromEnd(storage); if (dnIndex < 0) { // the node is not found return false; } assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : "Block is still in the list and must be removed first."; // set the triplet to null setStorageInfo(dnIndex, null); setNext(dnIndex, null); setPrevious(dnIndex, null); indices[dnIndex] = -1; return true; }
@Test public void testRemoveStorage() { // first add TOTAL_NUM_BLOCKS into the BlockInfoStriped DatanodeStorageInfo[] storages = DFSTestUtil.createDatanodeStorageInfos( totalBlocks); Block[] blocks = createReportedBlocks(totalBlocks); for (int i = 0; i < storages.length; i++) { info.addStorage(storages[i], blocks[i]); } // remove two storages info.removeStorage(storages[0]); info.removeStorage(storages[2]); // check Assert.assertEquals(totalBlocks, info.getCapacity()); Assert.assertEquals(totalBlocks - 2, info.numNodes()); byte[] indices = (byte[]) Whitebox.getInternalState(info, "indices"); for (int i = 0; i < storages.length; i++) { int index = info.findStorageInfo(storages[i]); if (i != 0 && i != 2) { Assert.assertEquals(i, index); Assert.assertEquals(index, indices[index]); } else { Assert.assertEquals(-1, index); Assert.assertEquals(-1, indices[i]); } } // the same block is reported from another storage DatanodeStorageInfo[] storages2 = DFSTestUtil.createDatanodeStorageInfos( totalBlocks * 2); for (int i = totalBlocks; i < storages2.length; i++) { info.addStorage(storages2[i], blocks[i % totalBlocks]); } // now we should have 8 storages Assert.assertEquals(totalBlocks * 2 - 2, info.numNodes()); Assert.assertEquals(totalBlocks * 2 - 2, info.getCapacity()); indices = (byte[]) Whitebox.getInternalState(info, "indices"); Assert.assertEquals(totalBlocks * 2 - 2, indices.length); int j = totalBlocks; for (int i = totalBlocks; i < storages2.length; i++) { int index = info.findStorageInfo(storages2[i]); if (i == totalBlocks || i == totalBlocks + 2) { Assert.assertEquals(i - totalBlocks, index); } else { Assert.assertEquals(j++, index); } } // remove the storages from storages2 for (int i = 0; i < totalBlocks; i++) { info.removeStorage(storages2[i + totalBlocks]); } // now we should have 3 storages Assert.assertEquals(totalBlocks - 2, info.numNodes()); Assert.assertEquals(totalBlocks * 2 - 2, info.getCapacity()); indices = (byte[]) Whitebox.getInternalState(info, "indices"); Assert.assertEquals(totalBlocks * 2 - 2, indices.length); for (int i = 0; i < totalBlocks; i++) { if (i == 0 || i == 2) { int index = info.findStorageInfo(storages2[i + totalBlocks]); Assert.assertEquals(-1, index); } else { int index = info.findStorageInfo(storages[i]); Assert.assertEquals(i, index); } } for (int i = totalBlocks; i < totalBlocks * 2 - 2; i++) { Assert.assertEquals(-1, indices[i]); Assert.assertNull(info.getDatanode(i)); } }
public static CpeMemoryIndex getInstance() { return INSTANCE; }
@Test public void testGetInstance() { CpeMemoryIndex result = CpeMemoryIndex.getInstance(); assertNotNull(result); }
public Matcher parse(String xpath) { if (xpath.equals("/text()")) { return TextMatcher.INSTANCE; } else if (xpath.equals("/node()")) { return NodeMatcher.INSTANCE; } else if (xpath.equals("/descendant::node()") || xpath.equals("/descendant:node()")) { // for compatibility return new CompositeMatcher(TextMatcher.INSTANCE, new ChildMatcher(new SubtreeMatcher(NodeMatcher.INSTANCE))); } else if (xpath.equals("/@*")) { return AttributeMatcher.INSTANCE; } else if (xpath.length() == 0) { return ElementMatcher.INSTANCE; } else if (xpath.startsWith("/@")) { String name = xpath.substring(2); String prefix = null; int colon = name.indexOf(':'); if (colon != -1) { prefix = name.substring(0, colon); name = name.substring(colon + 1); } if (prefixes.containsKey(prefix)) { return new NamedAttributeMatcher(prefixes.get(prefix), name); } else { return Matcher.FAIL; } } else if (xpath.startsWith("/*")) { return new ChildMatcher(parse(xpath.substring(2))); } else if (xpath.startsWith("///")) { return Matcher.FAIL; } else if (xpath.startsWith("//")) { return new SubtreeMatcher(parse(xpath.substring(1))); } else if (xpath.startsWith("/")) { int slash = xpath.indexOf('/', 1); if (slash == -1) { slash = xpath.length(); } String name = xpath.substring(1, slash); String prefix = null; int colon = name.indexOf(':'); if (colon != -1) { prefix = name.substring(0, colon); name = name.substring(colon + 1); } if (prefixes.containsKey(prefix)) { return new NamedElementMatcher(prefixes.get(prefix), name, parse(xpath.substring(slash))); } else { return Matcher.FAIL; } } else { return Matcher.FAIL; } }
@Test public void testAnyAttribute() { Matcher matcher = parser.parse("/@*"); assertFalse(matcher.matchesText()); assertFalse(matcher.matchesElement()); assertTrue(matcher.matchesAttribute(null, "name")); assertTrue(matcher.matchesAttribute(NS, "name")); assertTrue(matcher.matchesAttribute(NS, "eman")); assertEquals(Matcher.FAIL, matcher.descend(NS, "name")); }
@VisibleForTesting JobMeta filterPrivateDatabases( JobMeta jobMeta ) { Set<String> privateDatabases = jobMeta.getPrivateDatabases(); if ( privateDatabases != null ) { // keep only private transformation databases for ( Iterator<DatabaseMeta> it = jobMeta.getDatabases().iterator(); it.hasNext(); ) { DatabaseMeta databaseMeta = it.next(); String databaseName = databaseMeta.getName(); if ( !privateDatabases.contains( databaseName ) && !jobMeta.isDatabaseConnectionUsed( databaseMeta ) ) { it.remove(); } } } return jobMeta; }
@Test public void filterPrivateDatabasesNullPrivateDatabaseTest() { IUnifiedRepository purMock = mock( IUnifiedRepository.class ); JobMeta jobMeta = new JobMeta( ); jobMeta.setDatabases( getDummyDatabases() ); jobMeta.setPrivateDatabases( null ); StreamToJobNodeConverter jobConverter = new StreamToJobNodeConverter( purMock ); assertEquals( jobMeta.getDatabases().size(), jobConverter.filterPrivateDatabases( jobMeta ).getDatabases().size() ); }
public static boolean isPlainHttp(NetworkService networkService) { checkNotNull(networkService); var isWebService = isWebService(networkService); var isKnownServiceName = IS_PLAIN_HTTP_BY_KNOWN_WEB_SERVICE_NAME.containsKey( Ascii.toLowerCase(networkService.getServiceName())); var doesNotSupportAnySslVersion = networkService.getSupportedSslVersionsCount() == 0; if (!isKnownServiceName) { return isWebService && doesNotSupportAnySslVersion; } var isKnownPlainHttpService = IS_PLAIN_HTTP_BY_KNOWN_WEB_SERVICE_NAME.getOrDefault( Ascii.toLowerCase(networkService.getServiceName()), false); return isKnownPlainHttpService && doesNotSupportAnySslVersion; }
@Test public void isPlainHttp_whenPlainHttpService_returnsTrue() { assertThat( NetworkServiceUtils.isPlainHttp( NetworkService.newBuilder().setServiceName("http").build())) .isTrue(); }
@Override public TimelineDomain getDomain(String domainId) throws IOException { LeveldbIterator iterator = null; try { byte[] prefix = KeyBuilder.newInstance() .add(DOMAIN_ENTRY_PREFIX).add(domainId).getBytesForLookup(); iterator = new LeveldbIterator(db); iterator.seek(prefix); return getTimelineDomain(iterator, domainId, prefix); } catch(DBException e) { throw new IOException(e); } finally { IOUtils.cleanupWithLogger(LOG, iterator); } }
@Test public void testGetDomain() throws IOException { super.testGetDomain(); }
public CredentialRetriever googleApplicationDefaultCredentials() { return () -> { try { if (imageReference.getRegistry().endsWith("gcr.io") || imageReference.getRegistry().endsWith("docker.pkg.dev")) { GoogleCredentials googleCredentials = googleCredentialsProvider.get(); logger.accept(LogEvent.info("Google ADC found")); if (googleCredentials.createScopedRequired()) { // not scoped if service account // The short-lived OAuth2 access token to be generated from the service account with // refreshIfExpired() below will have one-hour expiry (as of Aug 2019). Instead of using // an access token, it is technically possible to use the service account private key to // auth with GCR, but it does not worth writing complex code to achieve that. logger.accept(LogEvent.info("ADC is a service account. Setting GCS read-write scope")); List<String> scope = Collections.singletonList(OAUTH_SCOPE_STORAGE_READ_WRITE); googleCredentials = googleCredentials.createScoped(scope); } googleCredentials.refreshIfExpired(); logGotCredentialsFrom("Google Application Default Credentials"); AccessToken accessToken = googleCredentials.getAccessToken(); // https://cloud.google.com/container-registry/docs/advanced-authentication#access_token return Optional.of(Credential.from("oauth2accesstoken", accessToken.getTokenValue())); } } catch (IOException ex) { // Includes the case where ADC is simply not available. logger.accept( LogEvent.info("ADC not present or error fetching access token: " + ex.getMessage())); } return Optional.empty(); }; }
@Test public void testGoogleApplicationDefaultCredentials_endUserCredentials() throws CredentialRetrievalException { CredentialRetrieverFactory credentialRetrieverFactory = createCredentialRetrieverFactory("awesome.gcr.io", "repo"); Credential credential = credentialRetrieverFactory.googleApplicationDefaultCredentials().retrieve().get(); Assert.assertEquals("oauth2accesstoken", credential.getUsername()); Assert.assertEquals("my-token", credential.getPassword()); Mockito.verify(mockGoogleCredentials, Mockito.never()).createScoped(Mockito.anyString()); Mockito.verify(mockLogger).accept(LogEvent.info("Google ADC found")); Mockito.verify(mockLogger) .accept( LogEvent.lifecycle( "Using Google Application Default Credentials for awesome.gcr.io/repo")); Mockito.verifyNoMoreInteractions(mockLogger); }
public static StructType partitionType(Table table) { Collection<PartitionSpec> specs = table.specs().values(); return buildPartitionProjectionType("table partition", specs, allFieldIds(specs)); }
@Test public void testPartitionTypeWithIncompatibleSpecEvolution() { TestTables.TestTable table = TestTables.create(tableDir, "test", SCHEMA, BY_DATA_SPEC, V1_FORMAT_VERSION); PartitionSpec newSpec = PartitionSpec.builderFor(table.schema()).identity("category").build(); TableOperations ops = ((HasTableOperations) table).operations(); TableMetadata current = ops.current(); ops.commit(current, current.updatePartitionSpec(newSpec)); assertThat(table.specs()).hasSize(2); assertThatThrownBy(() -> Partitioning.partitionType(table)) .isInstanceOf(ValidationException.class) .hasMessageStartingWith("Conflicting partition fields"); }
@Override public Object toConnectRow(final Object ksqlData) { /* * Reconstruct ksqlData struct with given schema and try to put original data in it. * Schema may have more fields than ksqlData, don't put those field by default. If needed by * some format like Avro, create new subclass to handle */ if (ksqlData instanceof Struct) { final Schema schema = getSchema(); validate(((Struct) ksqlData).schema(), schema); final Struct struct = new Struct(schema); final Struct source = (Struct) ksqlData; for (final Field sourceField : source.schema().fields()) { final Object value = source.get(sourceField); struct.put(sourceField.name(), value); } return struct; } return ksqlData; }
@Test public void shouldNotTransformOtherType() { // Given: final Schema schema = SchemaBuilder.struct() .field("f1", SchemaBuilder.OPTIONAL_STRING_SCHEMA) .field("f2", SchemaBuilder.OPTIONAL_INT32_SCHEMA) .field("f3", SchemaBuilder.OPTIONAL_INT64_SCHEMA) .build(); final List<Integer> list = Collections.emptyList(); // When: final Object object = new ConnectSRSchemaDataTranslator(schema).toConnectRow(list); // Then: assertThat(object, sameInstance(list)); }
public Map<String, String> getAllConfigPropsWithSecretsObfuscated() { final Map<String, String> allPropsCleaned = new HashMap<>(); // build a properties map with obfuscated values for sensitive configs. // Obfuscation is handled by ConfigDef.convertToString allPropsCleaned.putAll(getKsqlConfigPropsWithSecretsObfuscated()); allPropsCleaned.putAll( getKsqlStreamConfigPropsWithSecretsObfuscated().entrySet().stream().collect( Collectors.toMap( e -> KSQL_STREAMS_PREFIX + e.getKey(), Map.Entry::getValue ) ) ); return Collections.unmodifiableMap(allPropsCleaned); }
@Test public void shouldNotListUnresolvedServerConfig() { // Given: final KsqlConfig config = new KsqlConfig(ImmutableMap.of( "some.random.property", "might be sensitive" )); // When: final Map<String, String> result = config.getAllConfigPropsWithSecretsObfuscated(); // Then: assertThat(result.get("some.random.property"), is(nullValue())); }
String getCertificate() { return configuration.get(CERTIFICATE).orElseThrow(() -> new IllegalArgumentException("Identity provider certificate is missing")); }
@Test public void return_certificate() { settings.setProperty("sonar.auth.saml.certificate.secured", "ABCDEFG"); assertThat(underTest.getCertificate()).isEqualTo("ABCDEFG"); }
@ApiOperation("账号登录") @PostMapping("/usernameLogin") public ApiResult usernameLogin(@RequestParam String username, @RequestParam String password){ UsernamePasswordAuthenticationToken token = new UsernamePasswordAuthenticationToken(username, password); //Authentication authentication = authenticationManagerBuilder.getObject().authenticate(token); Authentication authentication = authenticationManager.authenticate(token); SecurityContextHolder.getContext().setAuthentication(authentication); RequestContextHolderSupport.getRequest().getSession().setAttribute(HttpSessionSecurityContextRepository.SPRING_SECURITY_CONTEXT_KEY, SecurityContextHolder.getContext()); return ApiResult.success().message("登陆成功").data(authentication.getPrincipal()); }
@Test void usernameLogin() { System.out.println(new BCryptPasswordEncoder().encode("aa0000")); System.out.println(PasswordEncoderFactories.createDelegatingPasswordEncoder().encode("aa0000")); }
@Override public ProcessingResult process(ReplicationTask task) { try { EurekaHttpResponse<?> httpResponse = task.execute(); int statusCode = httpResponse.getStatusCode(); Object entity = httpResponse.getEntity(); if (logger.isDebugEnabled()) { logger.debug("Replication task {} completed with status {}, (includes entity {})", task.getTaskName(), statusCode, entity != null); } if (isSuccess(statusCode)) { task.handleSuccess(); } else if (statusCode == 503) { logger.debug("Server busy (503) reply for task {}", task.getTaskName()); return ProcessingResult.Congestion; } else { task.handleFailure(statusCode, entity); return ProcessingResult.PermanentError; } } catch (Throwable e) { if (maybeReadTimeOut(e)) { logger.error("It seems to be a socket read timeout exception, it will retry later. if it continues to happen and some eureka node occupied all the cpu time, you should set property 'eureka.server.peer-node-read-timeout-ms' to a bigger value", e); //read timeout exception is more Congestion then TransientError, return Congestion for longer delay return ProcessingResult.Congestion; } else if (isNetworkConnectException(e)) { logNetworkErrorSample(task, e); return ProcessingResult.TransientError; } else { logger.error("{}: {} Not re-trying this exception because it does not seem to be a network exception", peerId, task.getTaskName(), e); return ProcessingResult.PermanentError; } } return ProcessingResult.Success; }
@Test public void testNonBatchableTaskNetworkFailureHandling() throws Exception { TestableInstanceReplicationTask task = aReplicationTask().withAction(Action.Heartbeat).withNetworkFailures(1).build(); ProcessingResult status = replicationTaskProcessor.process(task); assertThat(status, is(ProcessingResult.TransientError)); assertThat(task.getProcessingState(), is(ProcessingState.Pending)); }
@Udf public <T> List<T> mapValues(final Map<String, T> input) { if (input == null) { return null; } return Lists.newArrayList(input.values()); }
@Test public void shouldReturnEmptyListFromEmptyMap() { final Map<String, BigDecimal> input = Maps.newHashMap(); assertThat(udf.mapValues(input), empty()); }
public static Metric threadSafeMetric(String name) { return MetricsImpl.threadSafeMetric(name, Unit.COUNT); }
@Test public void usingServiceAsync() { int inputSize = 100_000; Integer[] inputs = new Integer[inputSize]; Arrays.setAll(inputs, i -> i); pipeline.readFrom(TestSources.items(inputs)) .addTimestamps(i -> i, 0L) .mapUsingServiceAsync( nonSharedService(pctx -> 0L), (ctx, l) -> { Metric dropped = Metrics.threadSafeMetric("dropped"); Metric total = Metrics.threadSafeMetric("total"); return CompletableFuture.supplyAsync( () -> { boolean pass = l % 2L == ctx; if (!pass) { dropped.increment(); } total.increment(); return l; } ); } ) .writeTo(Sinks.noop()); Job job = instance.getJet().newJob(pipeline, JOB_CONFIG_WITH_METRICS); assertTrueEventually(() -> assertEquals(inputSize, job.getMetrics().get("total").stream().mapToLong(Measurement::value).sum())); assertTrueEventually(() -> assertEquals(inputSize / 2, job.getMetrics().get("dropped").stream().mapToLong(Measurement::value).sum())); job.join(); }
public Map<String, byte[]> getXAttrs(Path path) throws IOException { return retrieveHeaders(path, INVOCATION_XATTR_GET_MAP); }
@Test public void testGetFilteredXAttrs() throws Throwable { Map<String, byte[]> xAttrs = headerProcessing.getXAttrs(MAGIC_PATH, Lists.list(XA_MAGIC_MARKER, XA_CONTENT_LENGTH, "unknown")); Assertions.assertThat(xAttrs.keySet()) .describedAs("Attribute keys") .containsExactlyInAnyOrder(XA_MAGIC_MARKER, XA_CONTENT_LENGTH); // and the values are good assertLongAttributeValue( XA_MAGIC_MARKER, xAttrs.get(XA_MAGIC_MARKER), MAGIC_LEN); assertLongAttributeValue( XA_CONTENT_LENGTH, xAttrs.get(XA_CONTENT_LENGTH), FILE_LENGTH); }
@Override public void mkdir(final Path dir, final FsPermission permission, final boolean createParent) throws IOException, UnresolvedLinkException { myFs.mkdir(fullPath(dir), permission, createParent); }
@Test public void testRename() throws IOException { // Rename a file fileContextTestHelper.createFile(fc, "/newDir/foo"); fc.rename(new Path("/newDir/foo"), new Path("/newDir/fooBar")); Assert.assertFalse(exists(fc, new Path("/newDir/foo"))); Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/foo"))); Assert.assertTrue(isFile(fc, fileContextTestHelper.getTestRootPath(fc,"/newDir/fooBar"))); Assert.assertTrue(isFile(fcTarget, new Path(chrootedTo,"newDir/fooBar"))); // Rename a dir fc.mkdir(new Path("/newDir/dirFoo"), FileContext.DEFAULT_PERM, false); fc.rename(new Path("/newDir/dirFoo"), new Path("/newDir/dirFooBar")); Assert.assertFalse(exists(fc, new Path("/newDir/dirFoo"))); Assert.assertFalse(exists(fcTarget, new Path(chrootedTo,"newDir/dirFoo"))); Assert.assertTrue(isDir(fc, fileContextTestHelper.getTestRootPath(fc,"/newDir/dirFooBar"))); Assert.assertTrue(isDir(fcTarget, new Path(chrootedTo,"newDir/dirFooBar"))); }
public <T> void putJson(String url, Header header, Query query, String body, Type responseType, Callback<T> callback) { execute(url, HttpMethod.PUT, new RequestHttpEntity(header.setContentType(MediaType.APPLICATION_JSON), query, body), responseType, callback); }
@Test void testPutJson() throws Exception { Header header = Header.newInstance().setContentType(MediaType.APPLICATION_XML); restTemplate.putJson(TEST_URL, header, "body", String.class, mockCallback); verify(requestClient).execute(any(), eq("PUT"), any(), any(), eq(mockCallback)); assertEquals(MediaType.APPLICATION_JSON, header.getValue(HttpHeaderConsts.CONTENT_TYPE)); }