focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void run() { try { // We kill containers until the kernel reports the OOM situation resolved // Note: If the kernel has a delay this may kill more than necessary while (true) { String status = cgroups.getCGroupParam( CGroupsHandler.CGroupController.MEMORY, "", CGROUP_PARAM_MEMORY_OOM_CONTROL); if (!status.contains(CGroupsHandler.UNDER_OOM)) { break; } boolean containerKilled = killContainer(); if (!containerKilled) { // This can happen, if SIGKILL did not clean up // non-PGID or containers or containers launched by other users // or if a process was put to the root YARN cgroup. throw new YarnRuntimeException( "Could not find any containers but CGroups " + "reserved for containers ran out of memory. " + "I am giving up"); } } } catch (ResourceHandlerException ex) { LOG.warn("Could not fetch OOM status. " + "This is expected at shutdown. Exiting.", ex); } }
@Test(expected = YarnRuntimeException.class) public void testExceptionThrownWithNoRunningContainersToKill() throws Exception { ConcurrentHashMap<ContainerId, Container> containers = new ConcurrentHashMap<>(); Container c1 = createContainer(1, true, 1L, false); containers.put(c1.getContainerId(), c1); Context context = mock(Context.class); when(context.getContainers()).thenReturn(containers); CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); when(cGroupsHandler.getCGroupParam( CGroupsHandler.CGroupController.MEMORY, "", CGROUP_PARAM_MEMORY_OOM_CONTROL)) .thenReturn("under_oom 1").thenReturn("under_oom 0"); DefaultOOMHandler handler = new DefaultOOMHandler(context, false) { @Override protected CGroupsHandler getCGroupsHandler() { return cGroupsHandler; } }; handler.run(); }
public static <T> List<LocalProperty<T>> grouped(Collection<T> columns) { return ImmutableList.of(new GroupingProperty<>(columns)); }
@Test public void testMoreRequiredGroupsThanActual() { List<LocalProperty<String>> actual = builder() .constant("b") .grouped("a") .grouped("d") .build(); assertMatch( actual, builder() .grouped("a") .grouped("b") .grouped("c") .grouped("d") .build(), Optional.empty(), Optional.empty(), Optional.of(grouped("c")), Optional.of(grouped("d"))); }
public static ParameterizedType parameterizedType(Class<?> rawType, Type... typeArguments) { var typeParamsCount = rawType.getTypeParameters().length; if (typeParamsCount == 0) { throw new IllegalArgumentException( String.format( "Cannot parameterize `%s` because it does not have any type parameters.", rawType.getTypeName())); } if (typeArguments.length != typeParamsCount) { throw new IllegalArgumentException( String.format( "Expected %d type arguments for `%s`, but got %d.", typeParamsCount, rawType.getTypeName(), typeArguments.length)); } for (Type arg : typeArguments) { if (arg instanceof Class<?> clazz) { if (clazz.isPrimitive()) { throw new IllegalArgumentException( String.format( "`%s.class` is not a valid type argument. Did you mean `%s.class`?", clazz, Reflection.toWrapperType(clazz).getSimpleName())); } } } try { return (ParameterizedType) TypeFactory.parameterizedClass(rawType, typeArguments); } catch (TypeArgumentNotInBoundException e) { throw new IllegalArgumentException( String.format( "Type argument `%s` for type parameter `%s` is not within bound `%s`.", e.getArgument().getTypeName(), e.getParameter().getTypeName(), e.getBound().getTypeName())); } }
@Test public void createParameterizedTypeForClassWithoutTypeParameters() { var t = catchThrowable(() -> Types.parameterizedType(String.class)); assertThat(t) .isInstanceOf(IllegalArgumentException.class) .hasMessage( "Cannot parameterize `java.lang.String` " + "because it does not have any type parameters."); }
public static SourceConfig validateUpdate(SourceConfig existingConfig, SourceConfig newConfig) { SourceConfig mergedConfig = clone(existingConfig); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Function Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getTopicName())) { mergedConfig.setTopicName(newConfig.getTopicName()); } if (!StringUtils.isEmpty(newConfig.getSerdeClassName())) { mergedConfig.setSerdeClassName(newConfig.getSerdeClassName()); } if (!StringUtils.isEmpty(newConfig.getSchemaType())) { mergedConfig.setSchemaType(newConfig.getSchemaType()); } if (newConfig.getConfigs() != null) { mergedConfig.setConfigs(newConfig.getConfigs()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (!StringUtils.isEmpty(newConfig.getArchive())) { mergedConfig.setArchive(newConfig.getArchive()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (isBatchSource(existingConfig) != isBatchSource(newConfig)) { throw new IllegalArgumentException("Sources cannot be update between regular sources and batchsource"); } if (newConfig.getBatchSourceConfig() != null) { validateBatchSourceConfigUpdate(existingConfig.getBatchSourceConfig(), newConfig.getBatchSourceConfig()); mergedConfig.setBatchSourceConfig(newConfig.getBatchSourceConfig()); } if (newConfig.getProducerConfig() != null) { mergedConfig.setProducerConfig(newConfig.getProducerConfig()); } return mergedConfig; }
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Processing Guarantees cannot be altered") public void testMergeDifferentProcessingGuarantees() { SourceConfig sourceConfig = createSourceConfig(); SourceConfig newSourceConfig = createUpdatedSourceConfig("processingGuarantees", EFFECTIVELY_ONCE); SourceConfig mergedConfig = SourceConfigUtils.validateUpdate(sourceConfig, newSourceConfig); }
@CanIgnoreReturnValue public GsonBuilder setDateFormat(String pattern) { if (pattern != null) { try { new SimpleDateFormat(pattern); } catch (IllegalArgumentException e) { // Throw exception if it is an invalid date format throw new IllegalArgumentException("The date pattern '" + pattern + "' is not valid", e); } } this.datePattern = pattern; return this; }
@Test public void testSetDateFormatWithInvalidPattern() { GsonBuilder builder = new GsonBuilder(); String invalidPattern = "This is an invalid Pattern"; IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> builder.setDateFormat(invalidPattern)); assertThat(e) .hasMessageThat() .isEqualTo("The date pattern '" + invalidPattern + "' is not valid"); }
@Override public TCreatePartitionResult createPartition(TCreatePartitionRequest request) throws TException { LOG.info("Receive create partition: {}", request); TCreatePartitionResult result; try { if (partitionRequestNum.incrementAndGet() >= Config.thrift_server_max_worker_threads / 4) { result = new TCreatePartitionResult(); TStatus errorStatus = new TStatus(SERVICE_UNAVAILABLE); errorStatus.setError_msgs(Lists.newArrayList( String.format("Too many create partition requests, please try again later txn_id=%d", request.getTxn_id()))); result.setStatus(errorStatus); return result; } result = createPartitionProcess(request); } catch (Exception t) { LOG.warn(DebugUtil.getStackTrace(t)); result = new TCreatePartitionResult(); TStatus errorStatus = new TStatus(RUNTIME_ERROR); errorStatus.setError_msgs(Lists.newArrayList(String.format("txn_id=%d failed. %s", request.getTxn_id(), t.getMessage()))); result.setStatus(errorStatus); } finally { partitionRequestNum.decrementAndGet(); } return result; }
@Test public void testCreatePartitionApiHour() throws TException { new MockUp<GlobalTransactionMgr>() { @Mock public TransactionState getTransactionState(long dbId, long transactionId) { return new TransactionState(); } }; Database db = GlobalStateMgr.getCurrentState().getDb("test"); Table table = db.getTable("site_access_hour"); List<List<String>> partitionValues = Lists.newArrayList(); List<String> values = Lists.newArrayList(); values.add("1990-04-24 12:34:56"); partitionValues.add(values); FrontendServiceImpl impl = new FrontendServiceImpl(exeEnv); TCreatePartitionRequest request = new TCreatePartitionRequest(); request.setDb_id(db.getId()); request.setTable_id(table.getId()); request.setPartition_values(partitionValues); TCreatePartitionResult partition = impl.createPartition(request); Assert.assertEquals(TStatusCode.OK, partition.getStatus().getStatus_code()); Partition p1990042412 = table.getPartition("p1990042412"); Assert.assertNotNull(p1990042412); partition = impl.createPartition(request); Assert.assertEquals(1, partition.partitions.size()); }
public Object evaluate(final ProcessingDTO processingDTO, final List<Object> paramValues) { final List<KiePMMLNameValue> kiePMMLNameValues = new ArrayList<>(); if (parameterFields != null) { if (paramValues == null || paramValues.size() < parameterFields.size()) { throw new IllegalArgumentException("Expected at least " + parameterFields.size() + " arguments for " + name + " DefineFunction"); } for (int i = 0; i < parameterFields.size(); i++) { kiePMMLNameValues.add(new KiePMMLNameValue(parameterFields.get(i).getName(), paramValues.get(i))); } } for (KiePMMLNameValue kiePMMLNameValue : kiePMMLNameValues) { processingDTO.addKiePMMLNameValue(kiePMMLNameValue); } return commonEvaluate(kiePMMLExpression.evaluate(processingDTO), dataType); }
@Test void evaluateNoParamValues() { assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> { final KiePMMLParameterField parameterField1 = KiePMMLParameterField.builder(PARAM_1, Collections.emptyList()).build(); final KiePMMLParameterField parameterField2 = KiePMMLParameterField.builder(PARAM_2, Collections.emptyList()).build(); final KiePMMLDefineFunction defineFunction = new KiePMMLDefineFunction(CUSTOM_FUNCTION, Collections.emptyList(), null, OP_TYPE.CONTINUOUS, Arrays.asList(parameterField1, parameterField2), null); ProcessingDTO processingDTO = getProcessingDTO(Collections.emptyList()); defineFunction.evaluate(processingDTO, null); }); }
@Override public String pluginNamed() { return PluginEnum.RATE_LIMITER.getName(); }
@Test public void pluginNamedTest() { assertEquals(PluginEnum.RATE_LIMITER.getName(), new RateLimiterPluginDataHandler().pluginNamed()); }
@Override public KeyVersion rollNewVersion(String name, byte[] material) throws IOException { KeyVersion key = getKeyProvider().rollNewVersion(name, material); invalidateCache(name); return key; }
@Test public void testRollNewVersion() throws Exception { KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class); KeyProvider mockProv = Mockito.mock(KeyProvider.class); Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey); Mockito.when(mockProv.getConf()).thenReturn(new Configuration()); KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100); Assert.assertEquals(mockKey, cache.getCurrentKey("k1")); Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1")); cache.rollNewVersion("k1"); // asserting the cache is purged Assert.assertEquals(mockKey, cache.getCurrentKey("k1")); Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1")); cache.rollNewVersion("k1", new byte[0]); Assert.assertEquals(mockKey, cache.getCurrentKey("k1")); Mockito.verify(mockProv, Mockito.times(3)).getCurrentKey(Mockito.eq("k1")); }
public static IcebergColumnHandle primitiveIcebergColumnHandle(int id, String name, Type type, Optional<String> comment) { return new IcebergColumnHandle(primitiveColumnIdentity(id, name), type, comment, REGULAR); }
@Test public void testRoundTrip() { testRoundTrip(primitiveIcebergColumnHandle(12, "blah", BIGINT, Optional.of("this is a comment"))); // Nested column ColumnIdentity foo1 = new ColumnIdentity(1, "foo1", PRIMITIVE, ImmutableList.of()); ColumnIdentity foo2 = new ColumnIdentity(2, "foo2", PRIMITIVE, ImmutableList.of()); ColumnIdentity foo3 = new ColumnIdentity(3, "foo3", ARRAY, ImmutableList.of(foo1)); IcebergColumnHandle nestedColumn = new IcebergColumnHandle( new ColumnIdentity( 5, "foo5", STRUCT, ImmutableList.of(foo2, foo3)), RowType.from(ImmutableList.of( RowType.field("foo2", BIGINT), RowType.field("foo3", new ArrayType(BIGINT)))), Optional.empty(), REGULAR); testRoundTrip(nestedColumn); }
@Path("config/{id}") @GET @Produces(MediaType.APPLICATION_JSON) public AppEntry getDetails(@PathParam("id") String id) { AppCatalogSolrClient sc = new AppCatalogSolrClient(); return sc.findAppEntry(id); }
@Test void testGetDetails() throws Exception { String id = "application 1"; AppDetailsController ac = Mockito.mock(AppDetailsController.class); AppEntry actual = new AppEntry(); actual.setName(id); when(ac.getDetails(id)).thenReturn(actual); final AppEntry result = ac.getDetails(id); assertEquals(result, actual); }
public PropertyType getFilePath() { return filePath; }
@Test @SuppressWarnings("squid:S2699") public void testGetFilePath() { //already tested, this is just left so the IDE doesn't recreate it. }
public static String executeDockerCommand(DockerCommand dockerCommand, String containerId, Map<String, String> env, PrivilegedOperationExecutor privilegedOperationExecutor, boolean disableFailureLogging, Context nmContext) throws ContainerExecutionException { PrivilegedOperation dockerOp = dockerCommand.preparePrivilegedOperation( dockerCommand, containerId, env, nmContext); if (disableFailureLogging) { dockerOp.disableFailureLogging(); } LOG.debug("Running docker command: {}", dockerCommand); try { String result = privilegedOperationExecutor .executePrivilegedOperation(null, dockerOp, null, env, true, false); if (result != null && !result.isEmpty()) { result = result.trim(); } return result; } catch (PrivilegedOperationException e) { throw new ContainerExecutionException("Docker operation failed", e.getExitCode(), e.getOutput(), e.getErrorOutput()); } }
@Test public void testExecuteDockerPull() throws Exception { DockerPullCommand dockerCommand = new DockerPullCommand(MOCK_IMAGE_NAME); DockerCommandExecutor.executeDockerCommand(dockerCommand, MOCK_CONTAINER_ID, env, mockExecutor, false, nmContext); List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor .capturePrivilegedOperations(mockExecutor, 1, true); List<String> dockerCommands = getValidatedDockerCommands(ops); assertEquals(1, ops.size()); assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(), ops.get(0).getOperationType().name()); assertEquals(3, dockerCommands.size()); assertEquals("[docker-command-execution]", dockerCommands.get(0)); assertEquals(" docker-command=pull", dockerCommands.get(1)); assertEquals(" image=" + MOCK_IMAGE_NAME, dockerCommands.get(2)); }
@Override public ProcessingResult process(ReplicationTask task) { try { EurekaHttpResponse<?> httpResponse = task.execute(); int statusCode = httpResponse.getStatusCode(); Object entity = httpResponse.getEntity(); if (logger.isDebugEnabled()) { logger.debug("Replication task {} completed with status {}, (includes entity {})", task.getTaskName(), statusCode, entity != null); } if (isSuccess(statusCode)) { task.handleSuccess(); } else if (statusCode == 503) { logger.debug("Server busy (503) reply for task {}", task.getTaskName()); return ProcessingResult.Congestion; } else { task.handleFailure(statusCode, entity); return ProcessingResult.PermanentError; } } catch (Throwable e) { if (maybeReadTimeOut(e)) { logger.error("It seems to be a socket read timeout exception, it will retry later. if it continues to happen and some eureka node occupied all the cpu time, you should set property 'eureka.server.peer-node-read-timeout-ms' to a bigger value", e); //read timeout exception is more Congestion then TransientError, return Congestion for longer delay return ProcessingResult.Congestion; } else if (isNetworkConnectException(e)) { logNetworkErrorSample(task, e); return ProcessingResult.TransientError; } else { logger.error("{}: {} Not re-trying this exception because it does not seem to be a network exception", peerId, task.getTaskName(), e); return ProcessingResult.PermanentError; } } return ProcessingResult.Success; }
@Test public void testBatchableTaskCongestionFailureHandling() throws Exception { TestableInstanceReplicationTask task = aReplicationTask().build(); replicationClient.withNetworkStatusCode(503); ProcessingResult status = replicationTaskProcessor.process(Collections.<ReplicationTask>singletonList(task)); assertThat(status, is(ProcessingResult.Congestion)); assertThat(task.getProcessingState(), is(ProcessingState.Pending)); }
@Override public Object toConnectRow(final Object ksqlData) { /* * Reconstruct ksqlData struct with given schema and try to put original data in it. * Schema may have more fields than ksqlData, don't put those field by default. If needed by * some format like Avro, create new subclass to handle */ if (ksqlData instanceof Struct) { final Schema schema = getSchema(); validate(((Struct) ksqlData).schema(), schema); final Struct struct = new Struct(schema); final Struct source = (Struct) ksqlData; for (final Field sourceField : source.schema().fields()) { final Object value = source.get(sourceField); struct.put(sourceField.name(), value); } return struct; } return ksqlData; }
@Test public void shouldTransformStruct() { // Given: final Schema schema = SchemaBuilder.struct() .field("f1", SchemaBuilder.OPTIONAL_STRING_SCHEMA) .field("f2", SchemaBuilder.OPTIONAL_INT32_SCHEMA) .field("f3", SchemaBuilder.OPTIONAL_INT64_SCHEMA) .build(); final Struct struct = new Struct(ORIGINAL_SCHEMA) .put("f1", "abc") .put("f2", 12); // When: final Object object = new ConnectSRSchemaDataTranslator(schema).toConnectRow(struct); // Then: assertThat(object, instanceOf(Struct.class)); assertThat(((Struct) object).schema(), sameInstance(schema)); assertThat(((Struct) object).get("f3"), is(nullValue())); }
public Application<T> getApplication() { return application; }
@Test void hasAnApplication() throws Exception { assertThat(bootstrap.getApplication()) .isEqualTo(application); }
@Override public final short readShort() throws EOFException { short s = readShort(pos); pos += SHORT_SIZE_IN_BYTES; return s; }
@Test public void testReadShortPosition() throws Exception { short read = in.readShort(1); short val = Bits.readShort(INIT_DATA, 1, byteOrder == BIG_ENDIAN); assertEquals(val, read); }
public FEELFnResult<Boolean> invoke(@ParameterName( "range" ) Range range, @ParameterName( "point" ) Comparable point) { if ( point == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null")); } if ( range == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null")); } try { boolean result = ( range.getHighBoundary() == Range.RangeBoundary.CLOSED && point.compareTo( range.getHighEndPoint() ) == 0 ); return FEELFnResult.ofResult( result ); } catch( Exception e ) { // points are not comparable return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range")); } }
@Test void invokeParamRangeAndSingle() { FunctionTestUtil.assertResult( finishedByFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), "f" ), Boolean.TRUE ); FunctionTestUtil.assertResult( finishedByFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), "a"), Boolean.FALSE ); FunctionTestUtil.assertResult( finishedByFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.OPEN ), "f" ), Boolean.FALSE ); FunctionTestUtil.assertResult( finishedByFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), "g" ), Boolean.FALSE ); }
public static ArrayNode generateDataPointArrayNode(ChartModel cm) { ArrayNode array = MAPPER.createArrayNode(); for (ChartModel.DataPoint dp : cm.getDataPoints()) { array.add(toJsonNode(dp, cm)); } return array; }
@Test public void basic() { ChartModel cm = new ChartModel(FOO, BAR); cm.addDataPoint(1L).data(FOO, 1D).data(BAR, 2D); cm.addDataPoint(2L).data(FOO, 3D).data(BAR, 4D); ArrayNode array = ChartUtils.generateDataPointArrayNode(cm); Assert.assertEquals("wrong results", ARRAY_AS_STRING, array.toString()); }
public MailConfiguration getConfiguration() { if (configuration == null) { configuration = new MailConfiguration(getCamelContext()); } return configuration; }
@Test public void testMailEndpointsWithFetchSize() { MailEndpoint endpoint = checkEndpoint("pop3://james@myhost?fetchSize=5"); MailConfiguration config = endpoint.getConfiguration(); assertEquals("pop3", config.getProtocol(), "getProtocol()"); assertEquals("myhost", config.getHost(), "getHost()"); assertEquals(110, config.getPort(), "getPort()"); assertEquals("james", config.getUsername(), "getUsername()"); assertEquals("james@myhost", config.getRecipients().get(Message.RecipientType.TO), "getRecipients().get(Message.RecipientType.TO)"); assertEquals("INBOX", config.getFolderName(), "folder"); assertEquals(5, config.getFetchSize(), "fetchSize"); assertFalse(config.isDebugMode()); }
@Override public String getFileId(final Path file) throws BackgroundException { if(StringUtils.isNotBlank(file.attributes().getFileId())) { return file.attributes().getFileId(); } if(file.isRoot() || new SimplePathPredicate(file).test(DriveHomeFinderService.MYDRIVE_FOLDER) || new SimplePathPredicate(file).test(DriveHomeFinderService.SHARED_FOLDER_NAME) || new SimplePathPredicate(file).test(DriveHomeFinderService.SHARED_DRIVES_NAME)) { return DriveHomeFinderService.ROOT_FOLDER_ID; } final String cached = super.getFileId(file); if(cached != null) { if(log.isDebugEnabled()) { log.debug(String.format("Return cached fileid %s for file %s", cached, file)); } return cached; } if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(file.getParent())) { final Path found = new DriveTeamDrivesListService(session, this).list(file.getParent(), new DisabledListProgressListener()).find(new SimplePathPredicate(file) ); if(null == found) { throw new NotfoundException(file.getAbsolute()); } return this.cache(file, found.attributes().getFileId()); } final Path query; if(file.isPlaceholder()) { query = new Path(file.getParent(), FilenameUtils.removeExtension(file.getName()), file.getType(), file.attributes()); } else { query = file; } final AttributedList<Path> list = new FileidDriveListService(session, this, query).list(file.getParent(), new DisabledListProgressListener()); final Path found = list.filter(new IgnoreTrashedComparator()).find(new SimplePathPredicate(file)); if(null == found) { throw new NotfoundException(file.getAbsolute()); } return this.cache(file, found.attributes().getFileId()); }
@Test public void testFileIdCollision() throws Exception { final DriveFileIdProvider fileid = new DriveFileIdProvider(session); final Path directory = new DriveDirectoryFeature(session, fileid).mkdir( new Path(DriveHomeFinderService.MYDRIVE_FOLDER, UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path path2R = new Path(directory, "2R", EnumSet.of(Path.Type.directory)); final Path path33 = new Path(directory, "33", EnumSet.of(Path.Type.directory)); final Directory directoryFeature = new DriveDirectoryFeature(session, fileid); final Path path2RWithId = directoryFeature.mkdir(path2R, new TransferStatus()); assertNotNull(path2RWithId.attributes().getFileId()); final Path path33WithId = directoryFeature.mkdir(path33, new TransferStatus()); assertNotNull(path33WithId.attributes().getFileId()); assertNotEquals(path2RWithId.attributes().getFileId(), path33WithId.attributes().getFileId()); final String fileId = fileid.getFileId(path33); assertEquals(fileId, path33WithId.attributes().getFileId()); assertNotEquals(fileId, path2RWithId.attributes().getFileId()); new DriveDeleteFeature(session, fileid).delete(Arrays.asList(path2RWithId, path33WithId, directory), new DisabledPasswordCallback(), new Delete.DisabledCallback()); }
public Map<Endpoint, CompletableFuture<Void>> futures() { return futures; }
@Test public void testFailedReadinessFuture() { CompletableFuture<Void> foo = new CompletableFuture<>(); CompletableFuture<Void> bar = new CompletableFuture<>(); EndpointReadyFutures readyFutures = new EndpointReadyFutures.Builder(). addReadinessFuture("foo", foo). addReadinessFuture("bar", bar). build(Optional.empty(), INFO); assertEquals(new HashSet<>(Arrays.asList(EXTERNAL, INTERNAL)), readyFutures.futures().keySet()); assertIncomplete(readyFutures, EXTERNAL, INTERNAL); foo.complete(null); assertIncomplete(readyFutures, EXTERNAL, INTERNAL); bar.completeExceptionally(new RuntimeException("Failed.")); assertException(readyFutures, new RuntimeException("Failed."), EXTERNAL, INTERNAL); }
public static Db use() { return use(DSFactory.get()); }
@Test public void findLikeTest() throws SQLException { // 方式1 List<Entity> find = Db.use().find(Entity.create("user").set("name", "like 王%")); assertEquals("王五", find.get(0).get("name")); // 方式2 find = Db.use().findLike("user", "name", "王", Condition.LikeType.StartWith); assertEquals("王五", find.get(0).get("name")); // 方式3 find = Db.use().query("select * from user where name like ?", "王%"); assertEquals("王五", find.get(0).get("name")); }
public static void validate(final String table, final String column, final Comparable<?> shadowValue) { for (Class<?> each : UNSUPPORTED_TYPES) { ShardingSpherePreconditions.checkState(!each.isAssignableFrom(shadowValue.getClass()), () -> new UnsupportedShadowColumnTypeException(table, column, each)); } }
@Test void assertValidateAcceptedType() { ShadowValueValidator.validate("tbl", "col", ""); }
public static List<String> shellSplit(CharSequence string) { List<String> tokens = new ArrayList<>(); if ( string == null ) { return tokens; } boolean escaping = false; char quoteChar = ' '; boolean quoting = false; StringBuilder current = new StringBuilder() ; for (int i = 0; i<string.length(); i++) { char c = string.charAt(i); if (escaping) { current.append(c); escaping = false; } else if (c == '\\' && !(quoting && quoteChar == '\'')) { escaping = true; } else if (quoting && c == quoteChar) { quoting = false; } else if (!quoting && (c == '\'' || c == '"')) { quoting = true; quoteChar = c; } else if (!quoting && Character.isWhitespace(c)) { if (current.length() > 0) { tokens.add(current.toString()); current = new StringBuilder(); } } else { current.append(c); } } if (current.length() > 0) { tokens.add(current.toString()); } return tokens; }
@Test public void noEscapeWithinSingleQuotes() { assertEquals(List.of("hello \\\" world"), StringUtils.shellSplit("'hello \\\" world'")); }
@Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { return inject(statement, new TopicProperties.Builder()); }
@Test public void shouldPassThroughWithClauseToBuilderForCreate() { // Given: givenStatement("CREATE STREAM x (FOO VARCHAR) WITH(value_format='avro', kafka_topic='topic', partitions=2, retention_ms=5000);"); final CreateSourceProperties props = ((CreateSource) statement.getStatement()) .getProperties(); // When: injector.inject(statement, builder); // Then: verify(builder).withWithClause( Optional.of(props.getKafkaTopic()), props.getPartitions(), props.getReplicas(), props.getRetentionInMillis() ); }
public static void validatePositivePowerOfTwo(final int value) { if (!BitUtil.isPowerOfTwo(value)) { throw new IllegalArgumentException("value must be a positive power of two: " + value); } }
@Test void validatePositivePowerOfTwFailWith33() { assertThrows(IllegalArgumentException.class, () -> CollectionUtil.validatePositivePowerOfTwo(33)); }
@Override public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) { String taskType = MergeRollupTask.TASK_TYPE; List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>(); for (TableConfig tableConfig : tableConfigs) { if (!validate(tableConfig, taskType)) { continue; } String tableNameWithType = tableConfig.getTableName(); LOGGER.info("Start generating task configs for table: {} for task: {}", tableNameWithType, taskType); // Get all segment metadata List<SegmentZKMetadata> allSegments = getSegmentsZKMetadataForTable(tableNameWithType); // Filter segments based on status List<SegmentZKMetadata> preSelectedSegmentsBasedOnStatus = filterSegmentsBasedOnStatus(tableConfig.getTableType(), allSegments); // Select current segment snapshot based on lineage, filter out empty segments SegmentLineage segmentLineage = _clusterInfoAccessor.getSegmentLineage(tableNameWithType); Set<String> preSelectedSegmentsBasedOnLineage = new HashSet<>(); for (SegmentZKMetadata segment : preSelectedSegmentsBasedOnStatus) { preSelectedSegmentsBasedOnLineage.add(segment.getSegmentName()); } SegmentLineageUtils.filterSegmentsBasedOnLineageInPlace(preSelectedSegmentsBasedOnLineage, segmentLineage); List<SegmentZKMetadata> preSelectedSegments = new ArrayList<>(); for (SegmentZKMetadata segment : preSelectedSegmentsBasedOnStatus) { if (preSelectedSegmentsBasedOnLineage.contains(segment.getSegmentName()) && segment.getTotalDocs() > 0 && MergeTaskUtils.allowMerge(segment)) { preSelectedSegments.add(segment); } } if (preSelectedSegments.isEmpty()) { // Reset the watermark time if no segment found. This covers the case where the table is newly created or // all segments for the existing table got deleted. resetDelayMetrics(tableNameWithType); LOGGER.info("Skip generating task: {} for table: {}, no segment is found.", taskType, tableNameWithType); continue; } // Sort segments based on startTimeMs, endTimeMs and segmentName in ascending order preSelectedSegments.sort((a, b) -> { long aStartTime = a.getStartTimeMs(); long bStartTime = b.getStartTimeMs(); if (aStartTime != bStartTime) { return Long.compare(aStartTime, bStartTime); } long aEndTime = a.getEndTimeMs(); long bEndTime = b.getEndTimeMs(); return aEndTime != bEndTime ? Long.compare(aEndTime, bEndTime) : a.getSegmentName().compareTo(b.getSegmentName()); }); // Sort merge levels based on bucket time period Map<String, String> taskConfigs = tableConfig.getTaskConfig().getConfigsForTaskType(taskType); Map<String, Map<String, String>> mergeLevelToConfigs = MergeRollupTaskUtils.getLevelToConfigMap(taskConfigs); List<Map.Entry<String, Map<String, String>>> sortedMergeLevelConfigs = new ArrayList<>(mergeLevelToConfigs.entrySet()); sortedMergeLevelConfigs.sort(Comparator.comparingLong( e -> TimeUtils.convertPeriodToMillis(e.getValue().get(MinionConstants.MergeTask.BUCKET_TIME_PERIOD_KEY)))); // Get incomplete merge levels Set<String> inCompleteMergeLevels = new HashSet<>(); for (Map.Entry<String, TaskState> entry : TaskGeneratorUtils.getIncompleteTasks(taskType, tableNameWithType, _clusterInfoAccessor).entrySet()) { for (PinotTaskConfig taskConfig : _clusterInfoAccessor.getTaskConfigs(entry.getKey())) { inCompleteMergeLevels.add(taskConfig.getConfigs().get(MergeRollupTask.MERGE_LEVEL_KEY)); } } // Get scheduling mode which is "processFromWatermark" by default. If "processAll" mode is enabled, there will be // no watermark, and each round we pick the buckets in chronological order which have unmerged segments. boolean processAll = MergeTask.PROCESS_ALL_MODE.equalsIgnoreCase(taskConfigs.get(MergeTask.MODE)); ZNRecord mergeRollupTaskZNRecord = _clusterInfoAccessor .getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE, tableNameWithType); int expectedVersion = mergeRollupTaskZNRecord != null ? mergeRollupTaskZNRecord.getVersion() : -1; MergeRollupTaskMetadata mergeRollupTaskMetadata = mergeRollupTaskZNRecord != null ? MergeRollupTaskMetadata.fromZNRecord(mergeRollupTaskZNRecord) : new MergeRollupTaskMetadata(tableNameWithType, new TreeMap<>()); List<PinotTaskConfig> pinotTaskConfigsForTable = new ArrayList<>(); // Schedule tasks from lowest to highest merge level (e.g. Hourly -> Daily -> Monthly -> Yearly) String mergeLevel = null; for (Map.Entry<String, Map<String, String>> mergeLevelConfig : sortedMergeLevelConfigs) { String lowerMergeLevel = mergeLevel; mergeLevel = mergeLevelConfig.getKey(); Map<String, String> mergeConfigs = mergeLevelConfig.getValue(); // Skip scheduling if there's incomplete task for current mergeLevel if (inCompleteMergeLevels.contains(mergeLevel)) { LOGGER.info("Found incomplete task of merge level: {} for the same table: {}, Skipping task generation: {}", mergeLevel, tableNameWithType, taskType); continue; } // Get the bucket size, buffer size and maximum number of parallel buckets (by default 1) String bucketPeriod = mergeConfigs.get(MergeTask.BUCKET_TIME_PERIOD_KEY); long bucketMs = TimeUtils.convertPeriodToMillis(bucketPeriod); if (bucketMs <= 0) { LOGGER.error("Bucket time period: {} (table : {}, mergeLevel : {}) must be larger than 0", bucketPeriod, tableNameWithType, mergeLevel); continue; } String bufferPeriod = mergeConfigs.get(MergeTask.BUFFER_TIME_PERIOD_KEY); long bufferMs = TimeUtils.convertPeriodToMillis(bufferPeriod); if (bufferMs < 0) { LOGGER.error("Buffer time period: {} (table : {}, mergeLevel : {}) must be larger or equal to 0", bufferPeriod, tableNameWithType, mergeLevel); continue; } String maxNumParallelBucketsStr = mergeConfigs.get(MergeTask.MAX_NUM_PARALLEL_BUCKETS); int maxNumParallelBuckets = maxNumParallelBucketsStr != null ? Integer.parseInt(maxNumParallelBucketsStr) : DEFAULT_NUM_PARALLEL_BUCKETS; if (maxNumParallelBuckets <= 0) { LOGGER.error("Maximum number of parallel buckets: {} (table : {}, mergeLevel : {}) must be larger than 0", maxNumParallelBuckets, tableNameWithType, mergeLevel); continue; } // Get bucket start/end time long preSelectedSegStartTimeMs = preSelectedSegments.get(0).getStartTimeMs(); long bucketStartMs = preSelectedSegStartTimeMs / bucketMs * bucketMs; long watermarkMs = 0; if (!processAll) { // Get watermark from MergeRollupTaskMetadata ZNode // bucketStartMs = watermarkMs // bucketEndMs = bucketStartMs + bucketMs watermarkMs = getWatermarkMs(preSelectedSegStartTimeMs, bucketMs, mergeLevel, mergeRollupTaskMetadata); bucketStartMs = watermarkMs; } long bucketEndMs = bucketStartMs + bucketMs; if (lowerMergeLevel == null) { long lowestLevelMaxValidBucketEndTimeMs = Long.MIN_VALUE; for (SegmentZKMetadata preSelectedSegment : preSelectedSegments) { // Compute lowestLevelMaxValidBucketEndTimeMs among segments that are ready for merge long currentValidBucketEndTimeMs = getValidBucketEndTimeMsForSegment(preSelectedSegment, bucketMs, bufferMs); lowestLevelMaxValidBucketEndTimeMs = Math.max(lowestLevelMaxValidBucketEndTimeMs, currentValidBucketEndTimeMs); } _tableLowestLevelMaxValidBucketEndTimeMs.put(tableNameWithType, lowestLevelMaxValidBucketEndTimeMs); } // Create metrics even if there's no task scheduled, this helps the case that the controller is restarted // but the metrics are not available until the controller schedules a valid task List<String> sortedMergeLevels = sortedMergeLevelConfigs.stream().map(e -> e.getKey()).collect(Collectors.toList()); if (processAll) { createOrUpdateNumBucketsToProcessMetrics(tableNameWithType, mergeLevel, lowerMergeLevel, bufferMs, bucketMs, preSelectedSegments, sortedMergeLevels); } else { createOrUpdateDelayMetrics(tableNameWithType, mergeLevel, null, watermarkMs, bufferMs, bucketMs); } if (!isValidBucketEndTime(bucketEndMs, bufferMs, lowerMergeLevel, mergeRollupTaskMetadata, processAll)) { LOGGER.info("Bucket with start: {} and end: {} (table : {}, mergeLevel : {}, mode : {}) cannot be merged yet", bucketStartMs, bucketEndMs, tableNameWithType, mergeLevel, processAll ? MergeTask.PROCESS_ALL_MODE : MergeTask.PROCESS_FROM_WATERMARK_MODE); continue; } // Find overlapping segments for each bucket, skip the buckets that has all segments merged List<List<SegmentZKMetadata>> selectedSegmentsForAllBuckets = new ArrayList<>(maxNumParallelBuckets); List<SegmentZKMetadata> selectedSegmentsForBucket = new ArrayList<>(); boolean hasUnmergedSegments = false; boolean hasSpilledOverData = false; boolean areAllSegmentsReadyToMerge = true; // The for loop terminates in following cases: // 1. Found buckets with unmerged segments: // For each bucket find all segments overlapping with the target bucket, skip the bucket if all overlapping // segments are merged. Schedule k (numParallelBuckets) buckets at most, and stops at the first bucket that // contains spilled over data. // One may wonder how a segment with records spanning different buckets is handled. The short answer is that // it will be cut into multiple segments, each for a separate bucket. This is achieved by setting bucket time // period as PARTITION_BUCKET_TIME_PERIOD when generating PinotTaskConfigs // 2. There's no bucket with unmerged segments, skip scheduling for (SegmentZKMetadata preSelectedSegment : preSelectedSegments) { long startTimeMs = preSelectedSegment.getStartTimeMs(); if (startTimeMs < bucketEndMs) { long endTimeMs = preSelectedSegment.getEndTimeMs(); if (endTimeMs >= bucketStartMs) { // For segments overlapping with current bucket, add to the result list if (!isMergedSegment(preSelectedSegment, mergeLevel, sortedMergeLevels)) { hasUnmergedSegments = true; } if (!isMergedSegment(preSelectedSegment, lowerMergeLevel, sortedMergeLevels)) { areAllSegmentsReadyToMerge = false; } if (hasSpilledOverData(preSelectedSegment, bucketMs)) { hasSpilledOverData = true; } selectedSegmentsForBucket.add(preSelectedSegment); } // endTimeMs < bucketStartMs // Haven't find the first overlapping segment, continue to the next segment } else { // Has gone through all overlapping segments for current bucket if (hasUnmergedSegments && areAllSegmentsReadyToMerge) { // Add the bucket if there are unmerged segments selectedSegmentsForAllBuckets.add(selectedSegmentsForBucket); } if (selectedSegmentsForAllBuckets.size() == maxNumParallelBuckets || hasSpilledOverData) { // If there are enough buckets or found spilled over data, schedule merge tasks break; } else { // Start with a new bucket // TODO: If there are many small merged segments, we should merge them again selectedSegmentsForBucket = new ArrayList<>(); hasUnmergedSegments = false; areAllSegmentsReadyToMerge = true; bucketStartMs = (startTimeMs / bucketMs) * bucketMs; bucketEndMs = bucketStartMs + bucketMs; if (!isValidBucketEndTime(bucketEndMs, bufferMs, lowerMergeLevel, mergeRollupTaskMetadata, processAll)) { break; } if (!isMergedSegment(preSelectedSegment, mergeLevel, sortedMergeLevels)) { hasUnmergedSegments = true; } if (!isMergedSegment(preSelectedSegment, lowerMergeLevel, sortedMergeLevels)) { areAllSegmentsReadyToMerge = false; } if (hasSpilledOverData(preSelectedSegment, bucketMs)) { hasSpilledOverData = true; } selectedSegmentsForBucket.add(preSelectedSegment); } } } // Add the last bucket if it contains unmerged segments and is not added before if (hasUnmergedSegments && areAllSegmentsReadyToMerge && (selectedSegmentsForAllBuckets.isEmpty() || ( selectedSegmentsForAllBuckets.get(selectedSegmentsForAllBuckets.size() - 1) != selectedSegmentsForBucket))) { selectedSegmentsForAllBuckets.add(selectedSegmentsForBucket); } if (selectedSegmentsForAllBuckets.isEmpty()) { LOGGER.info("No unmerged segment found for table: {}, mergeLevel: {}", tableNameWithType, mergeLevel); continue; } // Bump up watermark to the earliest start time of selected segments truncated to the closest bucket boundary long newWatermarkMs = selectedSegmentsForAllBuckets.get(0).get(0).getStartTimeMs() / bucketMs * bucketMs; mergeRollupTaskMetadata.getWatermarkMap().put(mergeLevel, newWatermarkMs); LOGGER.info("Update watermark for table: {}, mergeLevel: {} from: {} to: {}", tableNameWithType, mergeLevel, watermarkMs, newWatermarkMs); // Update the delay metrics if (!processAll) { createOrUpdateDelayMetrics(tableNameWithType, mergeLevel, lowerMergeLevel, newWatermarkMs, bufferMs, bucketMs); } // Create task configs int maxNumRecordsPerTask = mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY) != null ? Integer.parseInt( mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY)) : DEFAULT_MAX_NUM_RECORDS_PER_TASK; SegmentPartitionConfig segmentPartitionConfig = tableConfig.getIndexingConfig().getSegmentPartitionConfig(); if (segmentPartitionConfig == null) { for (List<SegmentZKMetadata> selectedSegmentsPerBucket : selectedSegmentsForAllBuckets) { pinotTaskConfigsForTable.addAll( createPinotTaskConfigs(selectedSegmentsPerBucket, tableConfig, maxNumRecordsPerTask, mergeLevel, null, mergeConfigs, taskConfigs)); } } else { // For partitioned table, schedule separate tasks for each partitionId (partitionId is constructed from // partitions of all partition columns. There should be exact match between partition columns of segment and // partition columns of table configuration, and there is only partition per column in segment metadata). // Other segments which do not meet these conditions are considered as outlier segments, and additional tasks // are generated for them. Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap(); List<String> partitionColumns = new ArrayList<>(columnPartitionMap.keySet()); for (List<SegmentZKMetadata> selectedSegmentsPerBucket : selectedSegmentsForAllBuckets) { Map<List<Integer>, List<SegmentZKMetadata>> partitionToSegments = new HashMap<>(); List<SegmentZKMetadata> outlierSegments = new ArrayList<>(); for (SegmentZKMetadata selectedSegment : selectedSegmentsPerBucket) { SegmentPartitionMetadata segmentPartitionMetadata = selectedSegment.getPartitionMetadata(); List<Integer> partitions = new ArrayList<>(); if (segmentPartitionMetadata != null && columnPartitionMap.keySet() .equals(segmentPartitionMetadata.getColumnPartitionMap().keySet())) { for (String partitionColumn : partitionColumns) { if (segmentPartitionMetadata.getPartitions(partitionColumn).size() == 1) { partitions.add(segmentPartitionMetadata.getPartitions(partitionColumn).iterator().next()); } else { partitions.clear(); break; } } } if (partitions.isEmpty()) { outlierSegments.add(selectedSegment); } else { partitionToSegments.computeIfAbsent(partitions, k -> new ArrayList<>()).add(selectedSegment); } } for (Map.Entry<List<Integer>, List<SegmentZKMetadata>> entry : partitionToSegments.entrySet()) { List<Integer> partition = entry.getKey(); List<SegmentZKMetadata> partitionedSegments = entry.getValue(); pinotTaskConfigsForTable.addAll( createPinotTaskConfigs(partitionedSegments, tableConfig, maxNumRecordsPerTask, mergeLevel, partition, mergeConfigs, taskConfigs)); } if (!outlierSegments.isEmpty()) { pinotTaskConfigsForTable.addAll( createPinotTaskConfigs(outlierSegments, tableConfig, maxNumRecordsPerTask, mergeLevel, null, mergeConfigs, taskConfigs)); } } } } // Write updated watermark map to zookeeper if (!processAll) { try { _clusterInfoAccessor .setMinionTaskMetadata(mergeRollupTaskMetadata, MinionConstants.MergeRollupTask.TASK_TYPE, expectedVersion); } catch (ZkException e) { LOGGER.error( "Version changed while updating merge/rollup task metadata for table: {}, skip scheduling. There are " + "multiple task schedulers for the same table, need to investigate!", tableNameWithType); continue; } } pinotTaskConfigs.addAll(pinotTaskConfigsForTable); LOGGER.info("Finished generating task configs for table: {} for task: {}, numTasks: {}", tableNameWithType, taskType, pinotTaskConfigsForTable.size()); } // Clean up metrics cleanUpDelayMetrics(tableConfigs); return pinotTaskConfigs; }
@Test public void testIncompleteTask() { Map<String, Map<String, String>> taskConfigsMap = new HashMap<>(); Map<String, String> tableTaskConfigs = new HashMap<>(); tableTaskConfigs.put("daily.mergeType", "concat"); tableTaskConfigs.put("daily.bufferTimePeriod", "2d"); tableTaskConfigs.put("daily.bucketTimePeriod", "1d"); tableTaskConfigs.put("daily.maxNumRecordsPerSegment", "1000000"); tableTaskConfigs.put("daily.maxNumRecordsPerTask", "5000000"); taskConfigsMap.put(MinionConstants.MergeRollupTask.TASK_TYPE, tableTaskConfigs); TableConfig offlineTableConfig = getTableConfig(TableType.OFFLINE, taskConfigsMap); String segmentName1 = "testTable__1"; String segmentName2 = "testTable__2"; String mergedSegmentName1 = "merged_testTable__1"; SegmentZKMetadata metadata1 = getSegmentZKMetadata(segmentName1, 90_000_000L, 100_000_000L, TimeUnit.MILLISECONDS, null); SegmentZKMetadata metadata2 = getSegmentZKMetadata(segmentName2, 345_600_000L, 400_000_000L, TimeUnit.MILLISECONDS, null); SegmentZKMetadata mergedMetadata1 = getSegmentZKMetadata(mergedSegmentName1, 90_000_000L, 100_000_000L, TimeUnit.MILLISECONDS, null); ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class); Map<String, Long> waterMarkMap = new TreeMap<>(); waterMarkMap.put(DAILY, 86_400_000L); when(mockClusterInfoProvide.getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE, OFFLINE_TABLE_NAME)).thenReturn(new MergeRollupTaskMetadata(OFFLINE_TABLE_NAME, waterMarkMap).toZNRecord()); Map<String, TaskState> taskStatesMap = new HashMap<>(); String taskName = "Task_MergeRollupTask_" + System.currentTimeMillis(); Map<String, String> taskConfigs = new HashMap<>(); taskConfigs.put(MinionConstants.TABLE_NAME_KEY, OFFLINE_TABLE_NAME); taskConfigs.put(MinionConstants.MergeRollupTask.MERGE_LEVEL_KEY, DAILY); taskConfigs.put(MinionConstants.SEGMENT_NAME_KEY, segmentName1); when(mockClusterInfoProvide.getTaskStates(MinionConstants.MergeRollupTask.TASK_TYPE)).thenReturn(taskStatesMap); when(mockClusterInfoProvide.getTaskConfigs(taskName)).thenReturn( Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigs))); // If same task and table, IN_PROGRESS, then don't generate again when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn( Lists.newArrayList(metadata1, metadata2)); when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn( getIdealState(OFFLINE_TABLE_NAME, Lists.newArrayList(segmentName1, segmentName2))); taskStatesMap.put(taskName, TaskState.IN_PROGRESS); MergeRollupTaskGenerator generator = new MergeRollupTaskGenerator(); generator.init(mockClusterInfoProvide); List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig)); assertTrue(pinotTaskConfigs.isEmpty()); // If same task and table, IN_PROGRESS, but older than 1 day, generate when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn( Lists.newArrayList(metadata1, metadata2)); when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn( getIdealState(OFFLINE_TABLE_NAME, Lists.newArrayList(segmentName1, segmentName2))); String oldTaskName = "Task_MergeRollupTask_" + (System.currentTimeMillis() - TimeUnit.DAYS.toMillis(3)); taskStatesMap.remove(taskName); taskStatesMap.put(oldTaskName, TaskState.IN_PROGRESS); pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig)); assertEquals(pinotTaskConfigs.size(), 1); checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1, DAILY, "concat", "1d", null, "1000000"); // If same task and table, but COMPLETED, generate mergedMetadata1.setCustomMap( ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY)); when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn( Lists.newArrayList(metadata1, metadata2, mergedMetadata1)); when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn( getIdealState(OFFLINE_TABLE_NAME, Lists.newArrayList(segmentName1, segmentName2, mergedSegmentName1))); SegmentLineage segmentLineage = new SegmentLineage(OFFLINE_TABLE_NAME); segmentLineage.addLineageEntry(SegmentLineageUtils.generateLineageEntryId(), new LineageEntry(Collections.singletonList(segmentName1), Collections.singletonList(mergedSegmentName1), LineageEntryState.COMPLETED, 11111L)); when(mockClusterInfoProvide.getSegmentLineage(OFFLINE_TABLE_NAME)).thenReturn(segmentLineage); taskStatesMap.put(taskName, TaskState.COMPLETED); pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig)); assertEquals(pinotTaskConfigs.size(), 1); checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName2, DAILY, "concat", "1d", null, "1000000"); }
public static Optional<ESEventOriginContext> parseESContext(String url) { if (url.startsWith(ES_EVENT) || url.startsWith(ES_MESSAGE)) { final String[] tokens = url.split(":"); if (tokens.length != 6) { return Optional.empty(); } return Optional.of(ESEventOriginContext.create(tokens[4], tokens[5])); } else { return Optional.empty(); } }
@Test public void parseWrongESContext() { assertThat(EventOriginContext.parseESContext("urn:moo")).isEmpty(); }
public static <T> T[] prependOptions(T[] oldOpts, T... newOpts) { // copy the new options to the front of the array T[] result = Arrays.copyOf(newOpts, newOpts.length+oldOpts.length); // now copy the old options System.arraycopy(oldOpts, 0, result, newOpts.length, oldOpts.length); return result; }
@Test public void testAppend() throws Exception { assertArrayEquals("first append", new String[]{"Dr.", "Who", "hi", "there"}, Options.prependOptions(new String[]{"hi", "there"}, "Dr.", "Who")); assertArrayEquals("second append", new String[]{"aa","bb","cc","dd","ee","ff"}, Options.prependOptions(new String[]{"dd", "ee", "ff"}, "aa", "bb", "cc")); }
public TargetAssignmentResult build() throws PartitionAssignorException { Map<String, MemberSubscriptionAndAssignmentImpl> memberSpecs = new HashMap<>(); // Prepare the member spec for all members. members.forEach((memberId, member) -> memberSpecs.put(memberId, createMemberSubscriptionAndAssignment( member, targetAssignment.getOrDefault(memberId, Assignment.EMPTY), topicsImage )) ); // Update the member spec if updated or deleted members. updatedMembers.forEach((memberId, updatedMemberOrNull) -> { if (updatedMemberOrNull == null) { memberSpecs.remove(memberId); } else { Assignment assignment = targetAssignment.getOrDefault(memberId, Assignment.EMPTY); // A new static member joins and needs to replace an existing departed one. if (updatedMemberOrNull.instanceId() != null) { String previousMemberId = staticMembers.get(updatedMemberOrNull.instanceId()); if (previousMemberId != null && !previousMemberId.equals(memberId)) { assignment = targetAssignment.getOrDefault(previousMemberId, Assignment.EMPTY); } } memberSpecs.put(memberId, createMemberSubscriptionAndAssignment( updatedMemberOrNull, assignment, topicsImage )); } }); // Prepare the topic metadata. Map<Uuid, TopicMetadata> topicMetadataMap = new HashMap<>(); subscriptionMetadata.forEach((topicName, topicMetadata) -> topicMetadataMap.put( topicMetadata.id(), topicMetadata ) ); // Compute the assignment. GroupAssignment newGroupAssignment = assignor.assign( new GroupSpecImpl( Collections.unmodifiableMap(memberSpecs), subscriptionType, invertedTargetAssignment ), new SubscribedTopicDescriberImpl(topicMetadataMap) ); // Compute delta from previous to new target assignment and create the // relevant records. List<CoordinatorRecord> records = new ArrayList<>(); for (String memberId : memberSpecs.keySet()) { Assignment oldMemberAssignment = targetAssignment.get(memberId); Assignment newMemberAssignment = newMemberAssignment(newGroupAssignment, memberId); if (!newMemberAssignment.equals(oldMemberAssignment)) { // If the member had no assignment or had a different assignment, we // create a record for the new assignment. records.add(targetAssignmentRecordBuilder.build( groupId, memberId, newMemberAssignment.partitions() )); } } // Bump the target assignment epoch. records.add(targetAssignmentEpochRecordBuilder.build(groupId, groupEpoch)); return new TargetAssignmentResult(records, newGroupAssignment.members()); }
@Test public void testPartialAssignmentUpdate() { TargetAssignmentBuilderTestContext context = new TargetAssignmentBuilderTestContext( "my-group", 20 ); Uuid fooTopicId = context.addTopicMetadata("foo", 6, mkMapOfPartitionRacks(6)); Uuid barTopicId = context.addTopicMetadata("bar", 6, mkMapOfPartitionRacks(6)); context.addGroupMember("member-1", Arrays.asList("foo", "bar", "zar"), mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) )); context.addGroupMember("member-2", Arrays.asList("foo", "bar", "zar"), mkAssignment( mkTopicAssignment(fooTopicId, 3, 4), mkTopicAssignment(barTopicId, 3, 4) )); context.addGroupMember("member-3", Arrays.asList("bar", "zar"), mkAssignment( mkTopicAssignment(fooTopicId, 5, 6), mkTopicAssignment(barTopicId, 5, 6) )); context.prepareMemberAssignment("member-1", mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) )); context.prepareMemberAssignment("member-2", mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 3, 4, 5) )); context.prepareMemberAssignment("member-3", mkAssignment( mkTopicAssignment(fooTopicId, 6), mkTopicAssignment(barTopicId, 6) )); TargetAssignmentBuilder.TargetAssignmentResult result = context.build(); assertEquals(3, result.records().size()); // Member 1 has no record because its assignment did not change. assertUnorderedListEquals(Arrays.asList( newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 3, 4, 5) )), newConsumerGroupTargetAssignmentRecord("my-group", "member-3", mkAssignment( mkTopicAssignment(fooTopicId, 6), mkTopicAssignment(barTopicId, 6) )) ), result.records().subList(0, 2)); assertEquals(newConsumerGroupTargetAssignmentEpochRecord( "my-group", 20 ), result.records().get(2)); Map<String, MemberAssignment> expectedAssignment = new HashMap<>(); expectedAssignment.put("member-1", new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 1, 2), mkTopicAssignment(barTopicId, 1, 2) ))); expectedAssignment.put("member-2", new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 3, 4, 5) ))); expectedAssignment.put("member-3", new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 6), mkTopicAssignment(barTopicId, 6) ))); assertEquals(expectedAssignment, result.targetAssignment()); }
public Set<Long> register(final StorageTierAssoc globalStorageTierAssoc, final List<String> storageTierAliases, final Map<String, Long> totalBytesOnTiers, final Map<String, Long> usedBytesOnTiers, final Set<Long> blocks) { mUsage.updateUsage(globalStorageTierAssoc, storageTierAliases, totalBytesOnTiers, usedBytesOnTiers); Set<Long> removedBlocks; if (mIsRegistered) { // This is a re-register of an existing worker. Assume the new block ownership data is more // up-to-date and update the existing block information. LOG.info("re-registering an existing workerId: {}", mMeta.mId); // Compute the difference between the existing block data, and the new data. removedBlocks = Sets.difference(mBlocks, blocks); } else { removedBlocks = Collections.emptySet(); } // Set the new block information. mBlocks = blocks; mIsRegistered = true; return removedBlocks; }
@Test public void register() { assertEquals(NEW_BLOCKS, mInfo.getBlocks()); assertEquals(TOTAL_BYTES_ON_TIERS, mInfo.getTotalBytesOnTiers()); assertEquals(Constants.KB * 6L, mInfo.getCapacityBytes()); assertEquals(USED_BYTES_ON_TIERS, mInfo.getUsedBytesOnTiers()); assertEquals(Constants.KB * 2L, mInfo.getUsedBytes()); }
public CompletionStage<Void> migrate(MigrationSet set) { InterProcessLock lock = new InterProcessSemaphoreMutex(client.unwrap(), ZKPaths.makePath(lockPath, set.id())); CompletionStage<Void> lockStage = lockAsync(lock, lockMax.toMillis(), TimeUnit.MILLISECONDS, executor); return lockStage.thenCompose(__ -> runMigrationInLock(lock, set)); }
@Test public void testStaged() { Migration m1 = () -> Arrays.asList(v1opA, v1opB); MigrationSet migrationSet = MigrationSet.build("1", Collections.singletonList(m1)); complete(manager.migrate(migrationSet)); ModeledFramework<ModelV1> v1Client = ModeledFramework.wrap(client, v1Spec); complete(v1Client.read(), (m, e) -> assertEquals(m.getName(), "Test")); Migration m2 = () -> Collections.singletonList(v2op); migrationSet = MigrationSet.build("1", Arrays.asList(m1, m2)); complete(manager.migrate(migrationSet)); ModeledFramework<ModelV2> v2Client = ModeledFramework.wrap(client, v2Spec); complete(v2Client.read(), (m, e) -> { assertEquals(m.getName(), "Test 2"); assertEquals(m.getAge(), 10); }); Migration m3 = () -> Collections.singletonList(v3op); migrationSet = MigrationSet.build("1", Arrays.asList(m1, m2, m3)); complete(manager.migrate(migrationSet)); ModeledFramework<ModelV3> v3Client = ModeledFramework.wrap(client, v3Spec); complete(v3Client.read(), (m, e) -> { assertEquals(m.getAge(), 30); assertEquals(m.getFirstName(), "One"); assertEquals(m.getLastName(), "Two"); }); }
@Override public DataflowPipelineJob run(Pipeline pipeline) { // Multi-language pipelines and pipelines that include upgrades should automatically be upgraded // to Runner v2. if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_runner_v2")) { LOG.info( "Automatically enabling Dataflow Runner v2 since the pipeline used cross-language" + " transforms or pipeline needed a transform upgrade."); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build()); } } if (useUnifiedWorker(options)) { if (hasExperiment(options, "disable_runner_v2") || hasExperiment(options, "disable_runner_v2_until_2023") || hasExperiment(options, "disable_prime_runner_v2")) { throw new IllegalArgumentException( "Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set."); } List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("use_runner_v2")) { experiments.add("use_runner_v2"); } if (!experiments.contains("use_unified_worker")) { experiments.add("use_unified_worker"); } if (!experiments.contains("beam_fn_api")) { experiments.add("beam_fn_api"); } if (!experiments.contains("use_portable_job_submission")) { experiments.add("use_portable_job_submission"); } options.setExperiments(ImmutableList.copyOf(experiments)); } logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline); logWarningIfBigqueryDLQUnused(pipeline); if (shouldActAsStreaming(pipeline)) { options.setStreaming(true); if (useUnifiedWorker(options)) { options.setEnableStreamingEngine(true); List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("enable_streaming_engine")) { experiments.add("enable_streaming_engine"); } if (!experiments.contains("enable_windmill_service")) { experiments.add("enable_windmill_service"); } } } if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) { ProjectionPushdownOptimizer.optimize(pipeline); } LOG.info( "Executing pipeline on the Dataflow Service, which will have billing implications " + "related to Google Compute Engine usage and other Google Cloud Services."); DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class); String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions); // This incorrectly puns the worker harness container image (which implements v1beta3 API) // with the SDK harness image (which implements Fn API). // // The same Environment is used in different and contradictory ways, depending on whether // it is a v1 or v2 job submission. RunnerApi.Environment defaultEnvironmentForDataflow = Environments.createDockerEnvironment(workerHarnessContainerImageURL); // The SdkComponents for portable an non-portable job submission must be kept distinct. Both // need the default environment. SdkComponents portableComponents = SdkComponents.create(); portableComponents.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); RunnerApi.Pipeline portablePipelineProto = PipelineTranslation.toProto(pipeline, portableComponents, false); // Note that `stageArtifacts` has to be called before `resolveArtifact` because // `resolveArtifact` updates local paths to staged paths in pipeline proto. portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto); List<DataflowPackage> packages = stageArtifacts(portablePipelineProto); portablePipelineProto = resolveArtifacts(portablePipelineProto); portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options); if (LOG.isDebugEnabled()) { LOG.debug( "Portable pipeline proto:\n{}", TextFormat.printer().printToString(portablePipelineProto)); } // Stage the portable pipeline proto, retrieving the staged pipeline path, then update // the options on the new job // TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation()); byte[] serializedProtoPipeline = portablePipelineProto.toByteArray(); DataflowPackage stagedPipeline = options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME); dataflowOptions.setPipelineUrl(stagedPipeline.getLocation()); if (useUnifiedWorker(options)) { LOG.info("Skipping v1 transform replacements since job will run on v2."); } else { // Now rewrite things to be as needed for v1 (mutates the pipeline) // This way the job submitted is valid for v1 and v2, simultaneously replaceV1Transforms(pipeline); } // Capture the SdkComponents for look up during step translations SdkComponents dataflowV1Components = SdkComponents.create(); dataflowV1Components.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); // No need to perform transform upgrading for the Runner v1 proto. RunnerApi.Pipeline dataflowV1PipelineProto = PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false); if (LOG.isDebugEnabled()) { LOG.debug( "Dataflow v1 pipeline proto:\n{}", TextFormat.printer().printToString(dataflowV1PipelineProto)); } // Set a unique client_request_id in the CreateJob request. // This is used to ensure idempotence of job creation across retried // attempts to create a job. Specifically, if the service returns a job with // a different client_request_id, it means the returned one is a different // job previously created with the same job name, and that the job creation // has been effectively rejected. The SDK should return // Error::Already_Exists to user in that case. int randomNum = new Random().nextInt(9000) + 1000; String requestId = DateTimeFormat.forPattern("YYYYMMddHHmmssmmm") .withZone(DateTimeZone.UTC) .print(DateTimeUtils.currentTimeMillis()) + "_" + randomNum; JobSpecification jobSpecification = translator.translate( pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages); if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_staged_dataflow_worker_jar")) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("use_staged_dataflow_worker_jar") .build()); } } Job newJob = jobSpecification.getJob(); try { newJob .getEnvironment() .setSdkPipelineOptions( MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class)); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } newJob.setClientRequestId(requestId); DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String version = dataflowRunnerInfo.getVersion(); checkState( !"${pom.version}".equals(version), "Unable to submit a job to the Dataflow service with unset version ${pom.version}"); LOG.info("Dataflow SDK version: {}", version); newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties()); // The Dataflow Service may write to the temporary directory directly, so // must be verified. if (!isNullOrEmpty(options.getGcpTempLocation())) { newJob .getEnvironment() .setTempStoragePrefix( dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation())); } newJob.getEnvironment().setDataset(options.getTempDatasetId()); if (options.getWorkerRegion() != null) { newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion()); } if (options.getWorkerZone() != null) { newJob.getEnvironment().setWorkerZone(options.getWorkerZone()); } if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED"); } else if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED"); } // Represent the minCpuPlatform pipeline option as an experiment, if not already present. if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); List<String> minCpuFlags = experiments.stream() .filter(p -> p.startsWith("min_cpu_platform")) .collect(Collectors.toList()); if (minCpuFlags.isEmpty()) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform()) .build()); } else { LOG.warn( "Flag min_cpu_platform is defined in both top level PipelineOption, " + "as well as under experiments. Proceed using {}.", minCpuFlags.get(0)); } } newJob .getEnvironment() .setExperiments( ImmutableList.copyOf( firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()))); // Set the Docker container image that executes Dataflow worker harness, residing in Google // Container Registry. Translator is guaranteed to create a worker pool prior to this point. // For runner_v1, only worker_harness_container is set. // For runner_v2, both worker_harness_container and sdk_harness_container are set to the same // value. String containerImage = getContainerImageForJob(options); for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) { workerPool.setWorkerHarnessContainerImage(containerImage); } configureSdkHarnessContainerImages(options, portablePipelineProto, newJob); newJob.getEnvironment().setVersion(getEnvironmentVersion(options)); if (hooks != null) { hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment()); } // enable upload_graph when the graph is too large byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8); int jobGraphByteSize = jobGraphBytes.length; if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES && !hasExperiment(options, "upload_graph") && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build()); LOG.info( "The job graph size ({} in bytes) is larger than {}. Automatically add " + "the upload_graph option to experiments.", jobGraphByteSize, CREATE_JOB_REQUEST_LIMIT_BYTES); } if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) { ArrayList<String> experiments = new ArrayList<>(options.getExperiments()); while (experiments.remove("upload_graph")) {} options.setExperiments(experiments); LOG.warn( "The upload_graph experiment was specified, but it does not apply " + "to runner v2 jobs. Option has been automatically removed."); } // Upload the job to GCS and remove the graph object from the API call. The graph // will be downloaded from GCS by the service. if (hasExperiment(options, "upload_graph")) { DataflowPackage stagedGraph = options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME); newJob.getSteps().clear(); newJob.setStepsLocation(stagedGraph.getLocation()); } if (!isNullOrEmpty(options.getDataflowJobFile()) || !isNullOrEmpty(options.getTemplateLocation())) { boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation()); if (isTemplate) { checkArgument( isNullOrEmpty(options.getDataflowJobFile()), "--dataflowJobFile and --templateLocation are mutually exclusive."); } String fileLocation = firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile()); checkArgument( fileLocation.startsWith("/") || fileLocation.startsWith("gs://"), "Location must be local or on Cloud Storage, got %s.", fileLocation); ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */); String workSpecJson = DataflowPipelineTranslator.jobToString(newJob); try (PrintWriter printWriter = new PrintWriter( new BufferedWriter( new OutputStreamWriter( Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)), UTF_8)))) { printWriter.print(workSpecJson); LOG.info("Printed job specification to {}", fileLocation); } catch (IOException ex) { String error = String.format("Cannot create output file at %s", fileLocation); if (isTemplate) { throw new RuntimeException(error, ex); } else { LOG.warn(error, ex); } } if (isTemplate) { LOG.info("Template successfully created."); return new DataflowTemplateJob(); } } String jobIdToUpdate = null; if (options.isUpdate()) { jobIdToUpdate = getJobIdFromName(options.getJobName()); newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setReplaceJobId(jobIdToUpdate); } if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) { newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot()); } Job jobResult; try { jobResult = dataflowClient.createJob(newJob); } catch (GoogleJsonResponseException e) { String errorMessages = "Unexpected errors"; if (e.getDetails() != null) { if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) { errorMessages = "The size of the serialized JSON representation of the pipeline " + "exceeds the allowable limit. " + "For more information, please see the documentation on job submission:\n" + "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs"; } else { errorMessages = e.getDetails().getMessage(); } } throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e); } catch (IOException e) { throw new RuntimeException("Failed to create a workflow job", e); } // Use a raw client for post-launch monitoring, as status calls may fail // regularly and need not be retried automatically. DataflowPipelineJob dataflowPipelineJob = new DataflowPipelineJob( DataflowClient.create(options), jobResult.getId(), options, jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(), portablePipelineProto); // If the service returned client request id, the SDK needs to compare it // with the original id generated in the request, if they are not the same // (i.e., the returned job is not created by this request), throw // DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException // depending on whether this is a reload or not. if (jobResult.getClientRequestId() != null && !jobResult.getClientRequestId().isEmpty() && !jobResult.getClientRequestId().equals(requestId)) { // If updating a job. if (options.isUpdate()) { throw new DataflowJobAlreadyUpdatedException( dataflowPipelineJob, String.format( "The job named %s with id: %s has already been updated into job id: %s " + "and cannot be updated again.", newJob.getName(), jobIdToUpdate, jobResult.getId())); } else { throw new DataflowJobAlreadyExistsException( dataflowPipelineJob, String.format( "There is already an active job named %s with id: %s. If you want to submit a" + " second job, try again by setting a different name using --jobName.", newJob.getName(), jobResult.getId())); } } LOG.info( "To access the Dataflow monitoring console, please navigate to {}", MonitoringUtil.getJobMonitoringPageURL( options.getProject(), options.getRegion(), jobResult.getId())); LOG.info("Submitted job: {}", jobResult.getId()); LOG.info( "To cancel the job using the 'gcloud' tool, run:\n> {}", MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId())); return dataflowPipelineJob; }
@Test public void testGcsUploadBufferSizeIsSetForStreamingWhenDefault() throws IOException { DataflowPipelineOptions streamingOptions = buildPipelineOptions(); streamingOptions.setStreaming(true); streamingOptions.setRunner(DataflowRunner.class); Pipeline p = Pipeline.create(streamingOptions); // Instantiation of a runner prior to run() currently has a side effect of mutating the options. // This could be tested by DataflowRunner.fromOptions(streamingOptions) but would not ensure // that the pipeline itself had the expected options set. p.run(); assertEquals( DataflowRunner.GCS_UPLOAD_BUFFER_SIZE_BYTES_DEFAULT, streamingOptions.getGcsUploadBufferSizeBytes().intValue()); }
@Deprecated public static String getJwt(JwtClaims claims) throws JoseException { String jwt; RSAPrivateKey privateKey = (RSAPrivateKey) getPrivateKey( jwtConfig.getKey().getFilename(),jwtConfig.getKey().getPassword(), jwtConfig.getKey().getKeyName()); // A JWT is a JWS and/or a JWE with JSON claims as the payload. // In this example it is a JWS nested inside a JWE // So we first create a JsonWebSignature object. JsonWebSignature jws = new JsonWebSignature(); // The payload of the JWS is JSON content of the JWT Claims jws.setPayload(claims.toJson()); // The JWT is signed using the sender's private key jws.setKey(privateKey); // Get provider from security config file, it should be two digit // And the provider id will set as prefix for keyid in the token header, for example: 05100 // if there is no provider id, we use "00" for the default value String provider_id = ""; if (jwtConfig.getProviderId() != null) { provider_id = jwtConfig.getProviderId(); if (provider_id.length() == 1) { provider_id = "0" + provider_id; } else if (provider_id.length() > 2) { logger.error("provider_id defined in the security.yml file is invalid; the length should be 2"); provider_id = provider_id.substring(0, 2); } } jws.setKeyIdHeaderValue(provider_id + jwtConfig.getKey().getKid()); // Set the signature algorithm on the JWT/JWS that will integrity protect the claims jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256); // Sign the JWS and produce the compact serialization, which will be the inner JWT/JWS // representation, which is a string consisting of three dot ('.') separated // base64url-encoded parts in the form Header.Payload.Signature jwt = jws.getCompactSerialization(); return jwt; }
@Test public void longlivedCcLocalPortalScope() throws Exception { JwtClaims claims = ClaimsUtil.getTestCcClaimsScope("f7d42348-c647-4efb-a52d-4c5787421e73", "portal.r portal.w"); claims.setExpirationTimeMinutesInTheFuture(5256000); String jwt = JwtIssuer.getJwt(claims, long_kid, KeyUtil.deserializePrivateKey(long_key, KeyUtil.RSA)); System.out.println("***Long lived token for portal lightapi***: " + jwt); }
@Override protected TableRecords getUndoRows() { return super.getUndoRows(); }
@Test public void getUndoRows() { Assertions.assertEquals(executor.getUndoRows(), executor.getSqlUndoLog().getBeforeImage()); }
void precheckMaxResultLimitOnLocalPartitions(String mapName) { // check if feature is enabled if (!isPreCheckEnabled) { return; } // limit number of local partitions to check to keep runtime constant PartitionIdSet localPartitions = mapServiceContext.getCachedOwnedPartitions(); int partitionsToCheck = min(localPartitions.size(), maxLocalPartitionsLimitForPreCheck); if (partitionsToCheck == 0) { return; } // calculate size of local partitions int localPartitionSize = getLocalPartitionSize(mapName, localPartitions, partitionsToCheck); if (localPartitionSize == 0) { return; } // check local result size long localResultLimit = getNodeResultLimit(partitionsToCheck); if (localPartitionSize > localResultLimit * MAX_RESULT_LIMIT_FACTOR_FOR_PRECHECK) { var localMapStatsProvider = mapServiceContext.getLocalMapStatsProvider(); if (localMapStatsProvider != null && localMapStatsProvider.hasLocalMapStatsImpl(mapName)) { localMapStatsProvider.getLocalMapStatsImpl(mapName).incrementQueryResultSizeExceededCount(); } throw new QueryResultSizeExceededException(maxResultLimit, " Result size exceeded in local pre-check."); } }
@Test public void testLocalPreCheckDisabled() { initMocksWithConfiguration(200000, QueryResultSizeLimiter.DISABLED); limiter.precheckMaxResultLimitOnLocalPartitions(ANY_MAP_NAME); }
@Override public Long createNotifyMessage(Long userId, Integer userType, NotifyTemplateDO template, String templateContent, Map<String, Object> templateParams) { NotifyMessageDO message = new NotifyMessageDO().setUserId(userId).setUserType(userType) .setTemplateId(template.getId()).setTemplateCode(template.getCode()) .setTemplateType(template.getType()).setTemplateNickname(template.getNickname()) .setTemplateContent(templateContent).setTemplateParams(templateParams).setReadStatus(false); notifyMessageMapper.insert(message); return message.getId(); }
@Test public void testCreateNotifyMessage_success() { // 准备参数 Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); NotifyTemplateDO template = randomPojo(NotifyTemplateDO.class); String templateContent = randomString(); Map<String, Object> templateParams = randomTemplateParams(); // mock 方法 // 调用 Long messageId = notifyMessageService.createNotifyMessage(userId, userType, template, templateContent, templateParams); // 断言 NotifyMessageDO message = notifyMessageMapper.selectById(messageId); assertNotNull(message); assertEquals(userId, message.getUserId()); assertEquals(userType, message.getUserType()); assertEquals(template.getId(), message.getTemplateId()); assertEquals(template.getCode(), message.getTemplateCode()); assertEquals(template.getType(), message.getTemplateType()); assertEquals(template.getNickname(), message.getTemplateNickname()); assertEquals(templateContent, message.getTemplateContent()); assertEquals(templateParams, message.getTemplateParams()); assertEquals(false, message.getReadStatus()); assertNull(message.getReadTime()); }
@VisibleForTesting synchronized List<RemoteNode> getLeastLoadedNodes() { long currTime = System.currentTimeMillis(); if ((currTime - lastCacheUpdateTime > cacheRefreshInterval) || (cachedNodes == null)) { cachedNodes = convertToRemoteNodes( this.nodeMonitor.selectLeastLoadedNodes(this.numNodes)); if (cachedNodes.size() > 0) { lastCacheUpdateTime = currTime; } } return cachedNodes; }
@Test(timeout = 600000) public void testContainerPromoteAfterContainerComplete() throws Exception { HashMap<NodeId, MockNM> nodes = new HashMap<>(); MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService()); nodes.put(nm1.getNodeId(), nm1); MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService()); nodes.put(nm2.getNodeId(), nm2); nm1.registerNode(); nm2.registerNode(); nm1.nodeHeartbeat(oppContainersStatus, true); nm2.nodeHeartbeat(oppContainersStatus, true); OpportunisticContainerAllocatorAMService amservice = (OpportunisticContainerAllocatorAMService) rm .getApplicationMasterService(); MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm) .withAppName("app") .withUser("user") .withAcls(null) .withQueue("default") .withUnmanagedAM(false) .build(); RMApp app1 = MockRMAppSubmitter.submit(rm, data); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2); ResourceScheduler scheduler = rm.getResourceScheduler(); // All nodes 1 to 2 will be applicable for scheduling. nm1.nodeHeartbeat(oppContainersStatus, true); nm2.nodeHeartbeat(oppContainersStatus, true); GenericTestUtils.waitFor(() -> amservice.getLeastLoadedNodes().size() == 2, 10, 10 * 100); QueueMetrics metrics = ((CapacityScheduler) scheduler).getRootQueue() .getMetrics(); // Verify Metrics verifyMetrics(metrics, 7168, 7, 1024, 1, 1); AllocateResponse allocateResponse = am1.allocate( Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), "*", Resources.createResource(1 * GB), 2, true, null, ExecutionTypeRequest.newInstance( ExecutionType.OPPORTUNISTIC, true))), null); List<Container> allocatedContainers = allocateResponse .getAllocatedContainers(); Assert.assertEquals(2, allocatedContainers.size()); Container container = allocatedContainers.get(0); MockNM allocNode = nodes.get(container.getNodeId()); // Start Container in NM allocNode.nodeHeartbeat(Arrays.asList( ContainerStatus.newInstance(container.getId(), ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)), true); rm.drainEvents(); // Verify that container is actually running wrt the RM.. RMContainer rmContainer = ((CapacityScheduler) scheduler) .getApplicationAttempt( container.getId().getApplicationAttemptId()).getRMContainer( container.getId()); Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState()); // Container Completed in the NM allocNode.nodeHeartbeat(Arrays.asList( ContainerStatus.newInstance(container.getId(), ExecutionType.OPPORTUNISTIC, ContainerState.COMPLETE, "", 0)), true); rm.drainEvents(); // Verify that container has been removed.. rmContainer = ((CapacityScheduler) scheduler) .getApplicationAttempt( container.getId().getApplicationAttemptId()).getRMContainer( container.getId()); Assert.assertNull(rmContainer); // Verify Metrics After OPP allocation (Nothing should change) verifyMetrics(metrics, 7168, 7, 1024, 1, 1); // Send Promotion req... this should result in update error // Since the container doesn't exist anymore.. allocateResponse = am1.sendContainerUpdateRequest( Arrays.asList(UpdateContainerRequest.newInstance(0, container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE, null, ExecutionType.GUARANTEED))); Assert.assertEquals(1, allocateResponse.getCompletedContainersStatuses().size()); Assert.assertEquals(container.getId(), allocateResponse.getCompletedContainersStatuses().get(0) .getContainerId()); Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size()); Assert.assertEquals(1, allocateResponse.getUpdateErrors().size()); Assert.assertEquals("INVALID_CONTAINER_ID", allocateResponse.getUpdateErrors().get(0).getReason()); Assert.assertEquals(container.getId(), allocateResponse.getUpdateErrors().get(0) .getUpdateContainerRequest().getContainerId()); // Verify Metrics After OPP allocation (Nothing should change again) verifyMetrics(metrics, 7168, 7, 1024, 1, 1); }
public int getMaxPushRetryTimes() { return maxPushRetryTimes; }
@Test void getMaxPushRetryTimes() { Integer property = EnvUtil.getProperty("nacos.config.push.maxRetryTime", Integer.class, 50); assertEquals(property.intValue(), commonConfig.getMaxPushRetryTimes()); }
@Deprecated @Restricted(DoNotUse.class) public static String resolve(ConfigurationContext context, String toInterpolate) { return context.getSecretSourceResolver().resolve(toInterpolate); }
@Test public void resolve_mixedMultipleEntries() { environment.set("FOO", "www.foo.io"); environment.set("BAR", "8080"); assertThat(resolve("http://${FOO}:${BAR}"), equalTo("http://www.foo.io:8080")); }
public void add(int index, NODE element) { throw e; }
@Test void require_that_addindex_throws_exception() { assertThrows(NodeVector.ReadOnlyException.class, () -> new TestNodeVector("foo").add(0, barNode())); }
public static <T> T toBeanIgnoreError(Object source, Class<T> clazz) { return toBean(source, clazz, CopyOptions.create().setIgnoreError(true)); }
@Test public void toBeanIgnoreErrorTest() { final HashMap<String, Object> map = MapUtil.newHashMap(); map.put("name", "Joe"); // 错误的类型,此处忽略 map.put("age", "aaaaaa"); final Person person = BeanUtil.toBeanIgnoreError(map, Person.class); assertEquals("Joe", person.getName()); // 错误的类型,不copy这个字段,使用对象创建的默认值 assertEquals(0, person.getAge()); }
@Override public Set<SystemScope> fromStrings(Set<String> scope) { if (scope == null) { return null; } else { return new LinkedHashSet<>(Collections2.filter(Collections2.transform(scope, stringToSystemScope), Predicates.notNull())); } }
@Test public void fromStrings() { // check null condition assertThat(service.fromStrings(null), is(nullValue())); assertThat(service.fromStrings(allScopeStrings), equalTo(allScopes)); assertThat(service.fromStrings(allScopeStringsWithValue), equalTo(allScopesWithValue)); }
static <T> Truncate<T> get(int width) { Preconditions.checkArgument(width > 0, "Invalid truncate width: %s (must be > 0)", width); return new Truncate<>(width); }
@Test public void testVerifiedIllegalWidth() { assertThatThrownBy(() -> Truncate.get(0)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Invalid truncate width: 0 (must be > 0)"); }
public String siteUrlFor(String givenUrl) throws URISyntaxException { return siteUrlFor(givenUrl, false); }
@Test public void shouldGenerateSiteUrlUsingConfiguredSiteUrlForFragment() throws URISyntaxException { ServerSiteUrlConfig url = new SiteUrl("http://someurl.com"); assertThat(url.siteUrlFor("http://test.host/foo/bar?foo=bar#quux"), is("http://someurl.com/foo/bar?foo=bar#quux")); url = new SiteUrl("http://someurl.com/"); assertThat(url.siteUrlFor("http://test.host/foo/bar#something"), is("http://someurl.com/foo/bar#something")); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException { try { if(status.isExists()) { if(log.isWarnEnabled()) { log.warn(String.format("Trash file %s to be replaced with %s", renamed, file)); } delete.delete(Collections.singletonMap(renamed, status), connectionCallback, callback); } final String id = fileid.getFileId(file); File result = null; if(!StringUtils.equals(file.getName(), renamed.getName())) { // Rename title final File properties = new File(); properties.setName(renamed.getName()); properties.setMimeType(status.getMime()); result = session.getClient().files().update(id, properties) .setFields(DriveAttributesFinderFeature.DEFAULT_FIELDS) .setSupportsAllDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")) .execute(); } if(!new SimplePathPredicate(file.getParent()).test(renamed.getParent())) { // Retrieve the existing parents to remove final StringBuilder previousParents = new StringBuilder(); final File reference = session.getClient().files().get(id) .setFields("parents") .setSupportsAllDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")) .execute(); for(String parent : reference.getParents()) { previousParents.append(parent).append(','); } // Move the file to the new folder result = session.getClient().files().update(id, null) .setAddParents(fileid.getFileId(renamed.getParent())) .setRemoveParents(previousParents.toString()) .setFields(DriveAttributesFinderFeature.DEFAULT_FIELDS) .setSupportsAllDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")) .execute(); } fileid.cache(file, null); fileid.cache(renamed, id); return renamed.withAttributes(new DriveAttributesFinderFeature(session, fileid).toAttributes(result)); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map("Cannot rename {0}", e, file); } }
@Test(expected = NotfoundException.class) public void testMoveNotFound() throws Exception { final DriveFileIdProvider fileid = new DriveFileIdProvider(session); final Path workdir = new DefaultHomeFinderService(session).find(); final Path test = new Path(workdir, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final Path target = new Path(workdir, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new DriveMoveFeature(session, fileid).move(test, target, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback()); }
public static Number parseNumber(String numberStr) throws NumberFormatException { if (StrUtil.startWithIgnoreCase(numberStr, "0x")) { // 0x04表示16进制数 return Long.parseLong(numberStr.substring(2), 16); } else if (StrUtil.startWith(numberStr, '+')) { // issue#I79VS7 numberStr = StrUtil.subSuf(numberStr, 1); } try { final NumberFormat format = NumberFormat.getInstance(); if (format instanceof DecimalFormat) { // issue#1818@Github // 当字符串数字超出double的长度时,会导致截断,此处使用BigDecimal接收 ((DecimalFormat) format).setParseBigDecimal(true); } return format.parse(numberStr); } catch (ParseException e) { final NumberFormatException nfe = new NumberFormatException(e.getMessage()); nfe.initCause(e); throw nfe; } }
@Test public void parseNumberTest3(){ // -------------------------- Parse failed ----------------------- assertNull(NumberUtil.parseNumber("abc", (Number) null)); assertNull(NumberUtil.parseNumber(StrUtil.EMPTY, (Number) null)); assertNull(NumberUtil.parseNumber(StrUtil.repeat(StrUtil.SPACE, 10), (Number) null)); assertEquals(456, NumberUtil.parseNumber("abc", 456).intValue()); // -------------------------- Parse success ----------------------- assertEquals(123, NumberUtil.parseNumber("123.abc", 789).intValue()); assertEquals(123.3D, NumberUtil.parseNumber("123.3", (Number) null).doubleValue()); assertEquals(0.123D, NumberUtil.parseNumber("0.123.3", (Number) null).doubleValue()); }
public static Object[] generalize(Object[] objs) { Object[] dests = new Object[objs.length]; for (int i = 0; i < objs.length; i++) { dests[i] = generalize(objs[i]); } return dests; }
@Test void testGeneralizeEnumArray() throws Exception { Object days = new Enum[] {Day.FRIDAY, Day.SATURDAY}; Object o = PojoUtils.generalize(days); assertTrue(o instanceof String[]); assertEquals(((String[]) o)[0], "FRIDAY"); assertEquals(((String[]) o)[1], "SATURDAY"); }
public static Expression convert(Filter[] filters) { Expression expression = Expressions.alwaysTrue(); for (Filter filter : filters) { Expression converted = convert(filter); Preconditions.checkArgument( converted != null, "Cannot convert filter to Iceberg: %s", filter); expression = Expressions.and(expression, converted); } return expression; }
@Test public void testQuotedAttributes() { Map<String, String> attrMap = Maps.newHashMap(); attrMap.put("id", "id"); attrMap.put("`i.d`", "i.d"); attrMap.put("`i``d`", "i`d"); attrMap.put("`d`.b.`dd```", "d.b.dd`"); attrMap.put("a.`aa```.c", "a.aa`.c"); attrMap.forEach( (quoted, unquoted) -> { IsNull isNull = IsNull.apply(quoted); Expression expectedIsNull = Expressions.isNull(unquoted); Expression actualIsNull = SparkFilters.convert(isNull); Assert.assertEquals( "IsNull must match", expectedIsNull.toString(), actualIsNull.toString()); IsNotNull isNotNull = IsNotNull.apply(quoted); Expression expectedIsNotNull = Expressions.notNull(unquoted); Expression actualIsNotNull = SparkFilters.convert(isNotNull); Assert.assertEquals( "IsNotNull must match", expectedIsNotNull.toString(), actualIsNotNull.toString()); LessThan lt = LessThan.apply(quoted, 1); Expression expectedLt = Expressions.lessThan(unquoted, 1); Expression actualLt = SparkFilters.convert(lt); Assert.assertEquals("LessThan must match", expectedLt.toString(), actualLt.toString()); LessThanOrEqual ltEq = LessThanOrEqual.apply(quoted, 1); Expression expectedLtEq = Expressions.lessThanOrEqual(unquoted, 1); Expression actualLtEq = SparkFilters.convert(ltEq); Assert.assertEquals( "LessThanOrEqual must match", expectedLtEq.toString(), actualLtEq.toString()); GreaterThan gt = GreaterThan.apply(quoted, 1); Expression expectedGt = Expressions.greaterThan(unquoted, 1); Expression actualGt = SparkFilters.convert(gt); Assert.assertEquals("GreaterThan must match", expectedGt.toString(), actualGt.toString()); GreaterThanOrEqual gtEq = GreaterThanOrEqual.apply(quoted, 1); Expression expectedGtEq = Expressions.greaterThanOrEqual(unquoted, 1); Expression actualGtEq = SparkFilters.convert(gtEq); Assert.assertEquals( "GreaterThanOrEqual must match", expectedGtEq.toString(), actualGtEq.toString()); EqualTo eq = EqualTo.apply(quoted, 1); Expression expectedEq = Expressions.equal(unquoted, 1); Expression actualEq = SparkFilters.convert(eq); Assert.assertEquals("EqualTo must match", expectedEq.toString(), actualEq.toString()); EqualNullSafe eqNullSafe = EqualNullSafe.apply(quoted, 1); Expression expectedEqNullSafe = Expressions.equal(unquoted, 1); Expression actualEqNullSafe = SparkFilters.convert(eqNullSafe); Assert.assertEquals( "EqualNullSafe must match", expectedEqNullSafe.toString(), actualEqNullSafe.toString()); In in = In.apply(quoted, new Integer[] {1}); Expression expectedIn = Expressions.in(unquoted, 1); Expression actualIn = SparkFilters.convert(in); Assert.assertEquals("In must match", expectedIn.toString(), actualIn.toString()); }); }
@Override public int getInt(int index) { checkIndex(index, 4); return _getInt(index); }
@Test public void getIntBoundaryCheck2() { assertThrows(IndexOutOfBoundsException.class, new Executable() { @Override public void execute() { buffer.getInt(buffer.capacity() - 3); } }); }
@VisibleForTesting public static boolean updateMapInternal(BiMap<Integer, String> map, String mapName, String command, String regex, Map<Integer, Integer> staticMapping) throws IOException { boolean updated = false; BufferedReader br = null; try { Process process = Runtime.getRuntime().exec( new String[] { "bash", "-c", command }); br = new BufferedReader( new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8)); String line = null; while ((line = br.readLine()) != null) { String[] nameId = line.split(regex); if ((nameId == null) || (nameId.length != 2)) { throw new IOException("Can't parse " + mapName + " list entry:" + line); } LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]); // HDFS can't differentiate duplicate names with simple authentication final Integer key = staticMapping.get(parseId(nameId[1])); final String value = nameId[0]; if (map.containsKey(key)) { final String prevValue = map.get(key); if (value.equals(prevValue)) { // silently ignore equivalent entries continue; } reportDuplicateEntry( "Got multiple names associated with the same id: ", key, value, key, prevValue); continue; } if (map.containsValue(value)) { final Integer prevKey = map.inverse().get(value); reportDuplicateEntry( "Got multiple ids associated with the same name: ", key, value, prevKey, value); continue; } map.put(key, value); updated = true; } LOG.debug("Updated " + mapName + " map size: " + map.size()); } catch (IOException e) { LOG.error("Can't update " + mapName + " map"); throw e; } finally { if (br != null) { try { br.close(); } catch (IOException e1) { LOG.error("Can't close BufferedReader of command result", e1); } } } return updated; }
@Test public void testDuplicates() throws IOException { assumeNotWindows(); String GET_ALL_USERS_CMD = "echo \"root:x:0:0:root:/root:/bin/bash\n" + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n" + "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\n" + "hdfs1:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n" + "hdfs2:x:11502:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n" + "bin:x:2:2:bin:/bin:/bin/sh\n" + "bin:x:1:1:bin:/bin:/sbin/nologin\n" + "daemon:x:1:1:daemon:/usr/sbin:/bin/sh\n" + "daemon:x:2:2:daemon:/sbin:/sbin/nologin\"" + " | cut -d: -f1,3"; String GET_ALL_GROUPS_CMD = "echo \"hdfs:*:11501:hrt_hdfs\n" + "mapred:x:497\n" + "mapred2:x:497\n" + "mapred:x:498\n" + "mapred3:x:498\"" + " | cut -d: -f1,3"; // Maps for id to name map BiMap<Integer, String> uMap = HashBiMap.create(); BiMap<Integer, String> gMap = HashBiMap.create(); ShellBasedIdMapping.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":", EMPTY_PASS_THROUGH_MAP); assertEquals(5, uMap.size()); assertEquals("root", uMap.get(0)); assertEquals("hdfs", uMap.get(11501)); assertEquals("hdfs2",uMap.get(11502)); assertEquals("bin", uMap.get(2)); assertEquals("daemon", uMap.get(1)); ShellBasedIdMapping.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":", EMPTY_PASS_THROUGH_MAP); assertTrue(gMap.size() == 3); assertEquals("hdfs",gMap.get(11501)); assertEquals("mapred", gMap.get(497)); assertEquals("mapred3", gMap.get(498)); }
public void isNotEmpty() { if (actual == null) { failWithActual(simpleFact("expected a non-empty string")); } else if (actual.isEmpty()) { failWithoutActual(simpleFact("expected not to be empty")); } }
@Test public void stringIsNotEmptyFailNull() { expectFailureWhenTestingThat(null).isNotEmpty(); assertFailureKeys("expected a non-empty string", "but was"); }
@Override public OffsetDeleteRequest.Builder buildBatchedRequest(int coordinatorId, Set<CoordinatorKey> groupIds) { validateKeys(groupIds); final OffsetDeleteRequestTopicCollection topics = new OffsetDeleteRequestTopicCollection(); partitions.stream().collect(Collectors.groupingBy(TopicPartition::topic)).forEach((topic, topicPartitions) -> topics.add( new OffsetDeleteRequestTopic() .setName(topic) .setPartitions(topicPartitions.stream() .map(tp -> new OffsetDeleteRequestPartition().setPartitionIndex(tp.partition())) .collect(Collectors.toList()) ) )); return new OffsetDeleteRequest.Builder( new OffsetDeleteRequestData() .setGroupId(groupId.idValue) .setTopics(topics) ); }
@Test public void testBuildRequest() { DeleteConsumerGroupOffsetsHandler handler = new DeleteConsumerGroupOffsetsHandler(groupId, tps, logContext); OffsetDeleteRequest request = handler.buildBatchedRequest(1, singleton(CoordinatorKey.byGroupId(groupId))).build(); assertEquals(groupId, request.data().groupId()); assertEquals(2, request.data().topics().size()); assertEquals(2, request.data().topics().find("t0").partitions().size()); assertEquals(1, request.data().topics().find("t1").partitions().size()); }
@Override public int readUnsignedShort() throws EOFException { return readShort() & 0xFFFF; }
@Test void testReadUnsignedShort() throws EOFException { int read = _dataBufferPinotInputStream.readUnsignedShort(); assertEquals(read, _byteBuffer.getShort(0) & 0xFFFF); assertEquals(_dataBufferPinotInputStream.getCurrentOffset(), Short.BYTES); }
public static ImmutableList<String> splitToLowercaseTerms(String identifierName) { if (ONLY_UNDERSCORES.matcher(identifierName).matches()) { // Degenerate case of names which contain only underscore return ImmutableList.of(identifierName); } return TERM_SPLITTER .splitToStream(identifierName) .map(String::toLowerCase) .collect(toImmutableList()); }
@Test public void splitToLowercaseTerms_noEmptyTerm_withTrailingUnderscoreDigit() { String identifierName = "test_1"; ImmutableList<String> terms = NamingConventions.splitToLowercaseTerms(identifierName); assertThat(terms).containsExactly("test", "1"); }
public static <T, R> Callable<R> andThen(Callable<T> callable, Function<T, R> resultHandler) { return () -> resultHandler.apply(callable.call()); }
@Test public void shouldChainCallableAndRecoverWithErrorHandler() throws Exception { Callable<String> callable = () -> { throw new IOException("BAM!"); }; Callable<String> callableWithRecovery = CallableUtils .andThen(callable, (result) -> result, ex -> "Bla"); String result = callableWithRecovery.call(); assertThat(result).isEqualTo("Bla"); }
public DES() { super(SymmetricAlgorithm.DES); }
@Test public void encryptDecryptTest(){ String content = "我是一个测试的test字符串123"; final DES des = SecureUtil.des(); final String encryptHex = des.encryptHex(content); final String result = des.decryptStr(encryptHex); assertEquals(content, result); }
@Override public CompletableFuture<LeaveGroupResponseData> leaveGroup( RequestContext context, LeaveGroupRequestData request ) { if (!isActive.get()) { return CompletableFuture.completedFuture(new LeaveGroupResponseData() .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) ); } if (!isGroupIdNotEmpty(request.groupId())) { return CompletableFuture.completedFuture(new LeaveGroupResponseData() .setErrorCode(Errors.INVALID_GROUP_ID.code()) ); } return runtime.scheduleWriteOperation( "classic-group-leave", topicPartitionFor(request.groupId()), Duration.ofMillis(config.offsetCommitTimeoutMs()), coordinator -> coordinator.classicGroupLeave(context, request) ).exceptionally(exception -> handleOperationException( "classic-group-leave", request, exception, (error, __) -> { if (error == Errors.UNKNOWN_MEMBER_ID) { // Group was not found. List<LeaveGroupResponseData.MemberResponse> memberResponses = request.members().stream() .map(member -> new LeaveGroupResponseData.MemberResponse() .setMemberId(member.memberId()) .setGroupInstanceId(member.groupInstanceId()) .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code())) .collect(Collectors.toList()); return new LeaveGroupResponseData() .setMembers(memberResponses); } else { return new LeaveGroupResponseData() .setErrorCode(error.code()); } } )); }
@Test public void testLeaveGroup() throws Exception { CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime(); GroupCoordinatorService service = new GroupCoordinatorService( new LogContext(), createConfig(), runtime, new GroupCoordinatorMetrics(), createConfigManager() ); LeaveGroupRequestData request = new LeaveGroupRequestData() .setGroupId("foo"); service.startup(() -> 1); when(runtime.scheduleWriteOperation( ArgumentMatchers.eq("classic-group-leave"), ArgumentMatchers.eq(new TopicPartition("__consumer_offsets", 0)), ArgumentMatchers.eq(Duration.ofMillis(5000)), ArgumentMatchers.any() )).thenReturn(CompletableFuture.completedFuture( new LeaveGroupResponseData() )); CompletableFuture<LeaveGroupResponseData> future = service.leaveGroup( requestContext(ApiKeys.LEAVE_GROUP), request ); assertTrue(future.isDone()); assertEquals(new LeaveGroupResponseData(), future.get()); }
@Override public int size() { if (cachedSize == SIZE_UNINITIALIZED) { int calculatedSize = 0; for (Iterator<QueryableEntry> it = iterator(); it.hasNext(); it.next()) { calculatedSize++; } cachedSize = calculatedSize; } return cachedSize; }
@Test // https://github.com/hazelcast/hazelcast/issues/9614 public void size_nonMatchingPredicate() { Set<QueryableEntry> entries = generateEntries(100000); AndResultSet resultSet = new AndResultSet(entries, null, asList(Predicates.alwaysFalse())); int size = resultSet.size(); int countedSize = 0; for (QueryableEntry queryableEntry : resultSet) { countedSize++; } assertEquals(0, countedSize); assertEquals(size, countedSize); }
public T getLimit() { return limit; }
@Test public void testGetLimit() { assertEquals(QUERY_LIMIT_DURATION.getLimit(), new Duration(1, HOURS)); assertEquals(QUERY_LIMIT_DATA_SIZE.getLimit(), new DataSize(1, MEGABYTE)); }
@Override public final short readShort() throws EOFException { short s = readShort(pos); pos += SHORT_SIZE_IN_BYTES; return s; }
@Test public void testReadShortForPositionByteOrder() throws Exception { short read1 = in.readShort(1, LITTLE_ENDIAN); short read2 = in.readShort(3, BIG_ENDIAN); short val1 = Bits.readShort(INIT_DATA, 1, false); short val2 = Bits.readShort(INIT_DATA, 3, true); assertEquals(val1, read1); assertEquals(val2, read2); }
public boolean includes(String ipAddress) { if (all) { return true; } if (ipAddress == null) { throw new IllegalArgumentException("ipAddress is null."); } try { return includes(addressFactory.getByName(ipAddress)); } catch (UnknownHostException e) { return false; } }
@Test public void testStaticIPHostNameList()throws UnknownHostException { // create MachineList with a list of of Hostnames TestAddressFactory addressFactory = new TestAddressFactory(); addressFactory.put("1.2.3.1", "host1"); addressFactory.put("1.2.3.4", "host4"); MachineList ml = new MachineList( StringUtils.getTrimmedStringCollection(HOST_LIST), addressFactory); // test for inclusion with an known IP assertTrue(ml.includes("1.2.3.4")); // test for exclusion with an unknown IP assertFalse(ml.includes("1.2.3.5")); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testDeclOfStateUsedInSuperclass() throws Exception { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("process"); thrown.expectMessage("declared in a different class"); thrown.expectMessage(DoFnUsingState.STATE_ID); DoFnSignatures.getSignature( new DoFnUsingState() { @StateId(DoFnUsingState.STATE_ID) private final StateSpec<ValueState<Integer>> spec = StateSpecs.value(VarIntCoder.of()); }.getClass()); }
public static RemoteCall<TransactionReceipt> sendFunds( Web3j web3j, Credentials credentials, String toAddress, BigDecimal value, Convert.Unit unit) throws InterruptedException, IOException, TransactionException { TransactionManager transactionManager = new RawTransactionManager(web3j, credentials); return new RemoteCall<>( () -> new Transfer(web3j, transactionManager).send(toAddress, value, unit)); }
@Test public void testTransferInvalidValue() { assertThrows( UnsupportedOperationException.class, () -> sendFunds( SampleKeys.CREDENTIALS, ADDRESS, new BigDecimal(0.1), Convert.Unit.WEI)); }
@Override public void connectorOffsets(String connName, Callback<ConnectorOffsets> cb) { ClusterConfigState configSnapshot = configBackingStore.snapshot(); try { if (!configSnapshot.contains(connName)) { cb.onCompletion(new NotFoundException("Connector " + connName + " not found"), null); return; } // The worker asynchronously processes the request and completes the passed callback when done worker.connectorOffsets(connName, configSnapshot.connectorConfig(connName), cb); } catch (Throwable t) { cb.onCompletion(t, null); } }
@Test public void testConnectorOffsets() throws Exception { ConnectorOffsets offsets = new ConnectorOffsets(Arrays.asList( new ConnectorOffset(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), new ConnectorOffset(Collections.singletonMap("partitionKey", "partitionValue2"), Collections.singletonMap("offsetKey", "offsetValue")) )); @SuppressWarnings("unchecked") ArgumentCaptor<Callback<ConnectorOffsets>> workerCallback = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { workerCallback.getValue().onCompletion(null, offsets); return null; }).when(worker).connectorOffsets(eq(CONN1), eq(CONN1_CONFIG), workerCallback.capture()); AbstractHerder herder = testHerder(); when(configStore.snapshot()).thenReturn(SNAPSHOT); FutureCallback<ConnectorOffsets> cb = new FutureCallback<>(); herder.connectorOffsets(CONN1, cb); assertEquals(offsets, cb.get(1000, TimeUnit.MILLISECONDS)); }
public String getMessage(String key, String... params) { if (StringUtils.isBlank(key)) { return null; } StringBuilder sb = new StringBuilder(); sb.append(getFormattedMessage(key)); String msg = parseStringValue(sb.toString(), new HashSet<String>()); if (params == null || params.length == 0) { return msg; } if (StringUtils.isBlank(msg)) { return msg; } return MessageFormat.format(msg, (Object[])params); }
@Test void getMessage() { ResourceBundleUtil resourceBundleUtil = ResourceBundleUtil.getInstance(); String emptyKeyMsg = resourceBundleUtil.getMessage("", "param1"); Assertions.assertNull(emptyKeyMsg); Assertions.assertThrows(MissingResourceException.class, new Executable() { @Override public void execute() throws Throwable { resourceBundleUtil.getMessage("NotExist"); } }); String configErrorMsgWithoutParams = resourceBundleUtil.getMessage("ERR_CONFIG"); Assertions.assertEquals("config error, {0}", configErrorMsgWithoutParams); String configErrorMsgWithParams = resourceBundleUtil.getMessage("ERR_CONFIG", "vgroup_mapping_test"); Assertions.assertEquals("config error, vgroup_mapping_test", configErrorMsgWithParams); }
public static boolean isCorrectTrackInstallation() { try { return SAStoreManager.getInstance().getBool(SHARED_PREF_CORRECT_TRACK_INSTALLATION, false); } catch (Exception e) { SALog.printStackTrace(e); } return false; }
@Test public void isCorrectTrackInstallation() { }
@Override public Set<RuleDescriptionSectionDto> generateSections(RulesDefinition.Rule rule) { return getDescriptionInHtml(rule) .map(this::generateSections) .orElse(emptySet()); }
@Test public void parse_return_null_fixIt_when_desc_has_no_Recommended_Secure_Coding_Practices_title() { when(rule.htmlDescription()).thenReturn(DESCRIPTION + ASKATRISK); Set<RuleDescriptionSectionDto> results = generator.generateSections(rule); Map<String, String> sectionKeyToContent = results.stream().collect(toMap(RuleDescriptionSectionDto::getKey, RuleDescriptionSectionDto::getContent)); assertThat(sectionKeyToContent).hasSize(3) .containsEntry(DEFAULT_SECTION_KEY, rule.htmlDescription()) .containsEntry(ROOT_CAUSE_SECTION_KEY, DESCRIPTION) .containsEntry(ASSESS_THE_PROBLEM_SECTION_KEY, ASKATRISK); }
public static long readVarlong(InputStream in) throws IOException { long value = 0L; int i = 0; long b; while (((b = in.read()) & 0x80) != 0) { value |= (b & 0x7f) << i; i += 7; if (i > 63) throw illegalVarlongException(value); } value |= b << i; return (value >>> 1) ^ -(value & 1); }
@Test public void testInvalidVarlong() { // varlong encoding has one overflow byte ByteBuffer buf = ByteBuffer.wrap(new byte[] {xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF, x01}); assertThrows(IllegalArgumentException.class, () -> ByteUtils.readVarlong(buf)); }
@Override public boolean match(Message msg, StreamRule rule) { if(msg.getField(Message.FIELD_GL2_SOURCE_INPUT) == null) { return rule.getInverted(); } final String value = msg.getField(Message.FIELD_GL2_SOURCE_INPUT).toString(); return rule.getInverted() ^ value.trim().equalsIgnoreCase(rule.getValue()); }
@Test public void testSuccessfulMatchInverted() { StreamRule rule = getSampleRule(); rule.setValue("input-id-beef"); rule.setInverted(true); Message msg = getSampleMessage(); msg.addField(Message.FIELD_GL2_SOURCE_INPUT, "input-id-beef"); StreamRuleMatcher matcher = getMatcher(rule); assertFalse(matcher.match(msg, rule)); }
@Beta public static Application fromBuilder(Builder builder) throws Exception { return builder.build(); }
@Test void server() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .server("foo", MockServer.class))) )) { MockServer server = (MockServer) app.getServerById("foo"); assertNotNull(server); assertTrue(server.isStarted()); } }
@Override public MapSettings setProperty(String key, String value) { return (MapSettings) super.setProperty(key, value); }
@Test public void shouldKeepEmptyValuesWhenSplitting() { Settings settings = new MapSettings(); settings.setProperty("foo", " one, , two"); String[] array = settings.getStringArray("foo"); assertThat(array).isEqualTo(new String[]{"one", "", "two"}); }
@SuppressWarnings("deprecation") public static <K> KStreamHolder<K> build( final KStreamHolder<K> left, final KStreamHolder<K> right, final StreamStreamJoin<K> join, final RuntimeBuildContext buildContext, final StreamJoinedFactory streamJoinedFactory) { final QueryContext queryContext = join.getProperties().getQueryContext(); final QueryContext.Stacker stacker = QueryContext.Stacker.of(queryContext); final LogicalSchema leftSchema; final LogicalSchema rightSchema; final Formats rightFormats; final Formats leftFormats; if (join.getJoinType().equals(RIGHT)) { leftFormats = join.getRightInternalFormats(); rightFormats = join.getLeftInternalFormats(); leftSchema = right.getSchema(); rightSchema = left.getSchema(); } else { leftFormats = join.getLeftInternalFormats(); rightFormats = join.getRightInternalFormats(); leftSchema = left.getSchema(); rightSchema = right.getSchema(); } final PhysicalSchema leftPhysicalSchema = PhysicalSchema.from( leftSchema, leftFormats.getKeyFeatures(), leftFormats.getValueFeatures() ); final Serde<GenericRow> leftSerde = buildContext.buildValueSerde( leftFormats.getValueFormat(), leftPhysicalSchema, stacker.push(LEFT_SERDE_CTX).getQueryContext() ); final PhysicalSchema rightPhysicalSchema = PhysicalSchema.from( rightSchema, rightFormats.getKeyFeatures(), rightFormats.getValueFeatures() ); final Serde<GenericRow> rightSerde = buildContext.buildValueSerde( rightFormats.getValueFormat(), rightPhysicalSchema, stacker.push(RIGHT_SERDE_CTX).getQueryContext() ); final Serde<K> keySerde = left.getExecutionKeyFactory().buildKeySerde( leftFormats.getKeyFormat(), leftPhysicalSchema, queryContext ); final StreamJoined<K, GenericRow, GenericRow> joined = streamJoinedFactory.create( keySerde, leftSerde, rightSerde, StreamsUtil.buildOpName(queryContext), StreamsUtil.buildOpName(queryContext) ); final JoinParams joinParams = JoinParamsFactory .create(join.getKeyColName(), leftSchema, rightSchema); JoinWindows joinWindows; // Grace, as optional, helps to identify if a user specified the GRACE PERIOD syntax in the // join window. If specified, then we'll call the new KStreams API ofTimeDifferenceAndGrace() // which enables the "spurious" results bugfix with left/outer joins (see KAFKA-10847). if (join.getGraceMillis().isPresent()) { joinWindows = JoinWindows.ofTimeDifferenceAndGrace( join.getBeforeMillis(), join.getGraceMillis().get()); } else { joinWindows = JoinWindows.of(join.getBeforeMillis()); } joinWindows = joinWindows.after(join.getAfterMillis()); final KStream<K, GenericRow> result; switch (join.getJoinType()) { case LEFT: result = left.getStream().leftJoin( right.getStream(), joinParams.getJoiner(), joinWindows, joined); break; case RIGHT: result = right.getStream().leftJoin( left.getStream(), joinParams.getJoiner(), joinWindows, joined); break; case OUTER: result = left.getStream().outerJoin( right.getStream(), joinParams.getJoiner(), joinWindows, joined); break; case INNER: result = left.getStream().join( right.getStream(), joinParams.getJoiner(), joinWindows, joined); break; default: throw new IllegalStateException("invalid join type"); } return left.withStream(result, joinParams.getSchema()); }
@Test public void shouldDoLeftJoinWithSyntheticKey() { // Given: givenLeftJoin(SYNTH_KEY); // When: final KStreamHolder<Struct> result = join.build(planBuilder, planInfo); // Then: verify(leftKStream).leftJoin( same(rightKStream), eq(new KsqlValueJoiner(LEFT_SCHEMA.value().size(), RIGHT_SCHEMA.value().size(), 1)), eq(WINDOWS_NO_GRACE), same(joined) ); verifyNoMoreInteractions(leftKStream, rightKStream, resultKStream); assertThat(result.getStream(), is(resultKStream)); assertThat(result.getExecutionKeyFactory(), is(executionKeyFactory)); }
@Override public final int position() { return pos; }
@Test public void testPositionNewPos() { in.position(INIT_DATA.length - 1); assertEquals(INIT_DATA.length - 1, in.position()); }
public static Collection<File> getFileResourcesByExtension(String extension) { return Arrays.stream(getClassPathElements()) .flatMap(elem -> internalGetFileResources(elem, Pattern.compile(".*\\." + extension + "$")) .stream()) .collect(Collectors.toSet()); }
@Test public void getResourcesByExtensionNotExisting() { final Collection<File> retrieved = getFileResourcesByExtension("arg"); commonVerifyCollectionWithoutExpectedFile(retrieved); }
public static Predicate parse(String expression) { final Stack<Predicate> predicateStack = new Stack<>(); final Stack<Character> operatorStack = new Stack<>(); final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll(""); final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true); boolean isTokenMode = true; while (true) { final Character operator; final String token; if (isTokenMode) { if (tokenizer.hasMoreTokens()) { token = tokenizer.nextToken(); } else { break; } if (OPERATORS.contains(token)) { operator = token.charAt(0); } else { operator = null; } } else { operator = operatorStack.pop(); token = null; } isTokenMode = true; if (operator == null) { try { predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance()); } catch (ClassCastException e) { throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e); } catch (Exception e) { throw new RuntimeException(e); } } else { if (operatorStack.empty() || operator == '(') { operatorStack.push(operator); } else if (operator == ')') { while (operatorStack.peek() != '(') { evaluate(predicateStack, operatorStack); } operatorStack.pop(); } else { if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek())) { evaluate(predicateStack, operatorStack); isTokenMode = false; } operatorStack.push(operator); } } } while (!operatorStack.empty()) { evaluate(predicateStack, operatorStack); } if (predicateStack.size() > 1) { throw new RuntimeException("Invalid logical expression"); } return predicateStack.pop(); }
@Test public void testAndNot() { final Predicate parsed = PredicateExpressionParser.parse("com.linkedin.data.it.AlwaysTruePredicate & !com.linkedin.data.it.AlwaysFalsePredicate"); Assert.assertEquals(parsed.getClass(), AndPredicate.class); final List<Predicate> andChildren = ((AndPredicate) parsed).getChildPredicates(); Assert.assertEquals(andChildren.get(0).getClass(), AlwaysTruePredicate.class); Assert.assertEquals(andChildren.get(1).getClass(), NotPredicate.class); final Predicate notChild = ((NotPredicate) andChildren.get(1)).getChildPredicate(); Assert.assertEquals(notChild.getClass(), AlwaysFalsePredicate.class); }
public Schema getSchema() { return schemaSupplier.get(); }
@Test public void testAvroProhibitsShadowing() { // This test verifies that Avro won't serialize a class with two fields of // the same name. This is important for our error reporting, and also how // we lookup a field. try { ReflectData.get().getSchema(SubclassHidingParent.class); fail("Expected AvroTypeException"); } catch (AvroRuntimeException e) { assertThat(e.getMessage(), containsString("mapField")); assertThat(e.getMessage(), containsString("two fields named")); } }
@Override public void run(Configuration configuration, Environment environment) { environment.jersey().register(new LoggingSQLExceptionMapper()); environment.jersey().register(new LoggingJdbiExceptionMapper()); }
@Test void test() { Environment environment = mock(Environment.class); JerseyEnvironment jerseyEnvironment = mock(JerseyEnvironment.class); when(environment.jersey()).thenReturn(jerseyEnvironment); new JdbiExceptionsBundle().run(new Configuration(), environment); verify(jerseyEnvironment).register(isA(LoggingSQLExceptionMapper.class)); verify(jerseyEnvironment).register(isA(LoggingJdbiExceptionMapper.class)); }
@Override public SchemaResult getKeySchema( final Optional<String> topicName, final Optional<Integer> schemaId, final FormatInfo expectedFormat, final SerdeFeatures serdeFeatures ) { return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, true); }
@Test public void shouldReturnErrorFromGetKeySchemaOnMultipleColumns() { // Given: when(schemaTranslator.toColumns(parsedSchema, SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES), true)) .thenReturn(ImmutableList.of(column1, column2)); // When: final SchemaResult result = supplier.getKeySchema(Optional.of(TOPIC_NAME), Optional.empty(), expectedFormat, SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES)); // Then: assertThat(result.schemaAndId, is(Optional.empty())); assertThat(result.failureReason.get().getMessage(), containsString( "The key schema for topic: some-topic contains multiple columns, " + "which is not supported by ksqlDB at this time.")); assertThat(result.failureReason.get().getMessage(), containsString(AVRO_SCHEMA)); }
public PropertiesSnapshot updateWorkflowProperties( String workflowId, User author, Properties props, PropertiesUpdate update) { LOG.debug("Updating workflow properties for workflow id [{}]", workflowId); Checks.notNull( props, "properties changes to apply cannot be null for workflow [%s]", workflowId); return withMetricLogError( () -> withRetryableTransaction( conn -> { WorkflowInfo workflowInfo = getWorkflowInfoForUpdate(conn, workflowId); Checks.notNull( workflowInfo.getPrevPropertiesSnapshot(), "Cannot update workflow properties while the workflow [%s] does not exist", workflowId); PropertiesSnapshot snapshot = updateWorkflowProps( conn, workflowId, author, System.currentTimeMillis(), workflowInfo.getPrevPropertiesSnapshot(), props, update); List<StatementPreparer> preparers = new ArrayList<>(); StringBuilder fields = prepareProperties(preparers, workflowId, snapshot); long[] updateRes = executeTemplateUpdate(conn, fields, preparers); if (updateRes != null) { if (workflowInfo.getPrevActiveVersionId() != Constants.INACTIVE_VERSION_ID) { updateWorkflowInfoForNextActiveWorkflow( conn, workflowId, workflowInfo.getPrevActiveVersionId(), workflowInfo, snapshot); addWorkflowTriggersIfNeeded(conn, workflowInfo); } MaestroJobEvent jobEvent = logToTimeline(conn, workflowId, snapshot); publisher.publishOrThrow( jobEvent, "Failed to publish maestro properties change job event."); } return snapshot; }), "updateWorkflowProperties", "Failed updating the properties for workflow [{}]", workflowId); }
@Test public void testUpdateWorkflowProperties() throws Exception { WorkflowDefinition wfd = loadWorkflow(TEST_WORKFLOW_ID2); workflowDao.addWorkflowDefinition(wfd, wfd.getPropertiesSnapshot().extractProperties()); assertNotNull(wfd.getInternalId()); verify(publisher, times(1)).publishOrThrow(any(), any()); MaestroWorkflow maestroWorkflow = workflowDao.getMaestroWorkflow(TEST_WORKFLOW_ID2); assertEquals("tester", maestroWorkflow.getPropertiesSnapshot().getOwner().getName()); assertEquals(1L, maestroWorkflow.getLatestVersionId().longValue()); PropertiesSnapshot newSnapshot = workflowDao.updateWorkflowProperties( TEST_WORKFLOW_ID2, User.create("test"), new Properties(), PROPERTIES_UPDATE); assertEquals( wfd.getPropertiesSnapshot().toBuilder() .createTime(newSnapshot.getCreateTime()) .author(User.create("test")) .build(), newSnapshot); verify(publisher, times(2)).publishOrThrow(any(), any()); maestroWorkflow = workflowDao.getMaestroWorkflow(TEST_WORKFLOW_ID2); assertEquals("tester", maestroWorkflow.getPropertiesSnapshot().getOwner().getName()); Properties props = new Properties(); props.setOwner(User.create("another-owner")); workflowDao.updateWorkflowProperties( TEST_WORKFLOW_ID2, User.create("test"), props, PROPERTIES_UPDATE); verify(publisher, times(3)).publishOrThrow(any(), any()); maestroWorkflow = workflowDao.getMaestroWorkflow(TEST_WORKFLOW_ID2); assertEquals("another-owner", maestroWorkflow.getPropertiesSnapshot().getOwner().getName()); assertEquals(wfd, workflowDao.addWorkflowDefinition(wfd, null)); verify(triggerClient, times(0)).upsertTriggerSubscription(any(), any(), any()); }
@SqlNullable @Description("Returns the interior ring element at the specified index (indices start at 1)") @ScalarFunction("ST_InteriorRingN") @SqlType(GEOMETRY_TYPE_NAME) public static Slice stInteriorRingN(@SqlType(GEOMETRY_TYPE_NAME) Slice input, @SqlType(INTEGER) long index) { Geometry geometry = deserialize(input); validateType("ST_InteriorRingN", geometry, EnumSet.of(POLYGON)); org.locationtech.jts.geom.Polygon polygon = (org.locationtech.jts.geom.Polygon) geometry; if (index < 1 || index > polygon.getNumInteriorRing()) { return null; } return serialize(polygon.getInteriorRingN(toIntExact(index) - 1)); }
@Test public void testSTInteriorRingN() { assertInvalidInteriorRingN("POINT EMPTY", 0, "POINT"); assertInvalidInteriorRingN("LINESTRING (1 2, 2 3, 3 4)", 1, "LINE_STRING"); assertInvalidInteriorRingN("MULTIPOINT (1 1, 2 3, 5 8)", -1, "MULTI_POINT"); assertInvalidInteriorRingN("MULTILINESTRING ((2 4, 4 2), (3 5, 5 3))", 0, "MULTI_LINE_STRING"); assertInvalidInteriorRingN("MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((2 4, 2 6, 6 6, 6 4, 2 4)))", 2, "MULTI_POLYGON"); assertInvalidInteriorRingN("GEOMETRYCOLLECTION (POINT (2 2), POINT (10 20))", 1, "GEOMETRY_COLLECTION"); assertInteriorRingN("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))", 1, null); assertInteriorRingN("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))", 2, null); assertInteriorRingN("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))", -1, null); assertInteriorRingN("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))", 0, null); assertInteriorRingN("POLYGON ((0 0, 0 3, 3 3, 3 0, 0 0), (1 1, 2 1, 2 2, 1 2, 1 1))", 1, "LINESTRING (1 1, 2 1, 2 2, 1 2, 1 1)"); assertInteriorRingN("POLYGON ((0 0, 0 5, 5 5, 5 0, 0 0), (1 1, 2 1, 2 2, 1 2, 1 1), (3 3, 4 3, 4 4, 3 4, 3 3))", 2, "LINESTRING (3 3, 4 3, 4 4, 3 4, 3 3)"); }
public static Formatter forNumbers(@Nonnull String format) { return new NumberFormat(format); }
@Test public void testExponentialForm() { check(0, " 0E+00", "-0E+00", "9EEEE"); check(0, " .0E+00", "-.0E+00", ".9EEEE"); Formatter f = forNumbers("FM9.99EEEE"); check(0, f, "0E+00"); check(5e4, f, "5E+04"); check(0.00045, f, "4.5E-04"); check(0.0004859, f, "4.86E-04"); }
Map<String, String> describeNetworkInterfaces(List<String> privateAddresses, AwsCredentials credentials) { if (privateAddresses.isEmpty()) { return Collections.emptyMap(); } try { Map<String, String> attributes = createAttributesDescribeNetworkInterfaces(privateAddresses); Map<String, String> headers = createHeaders(attributes, credentials); String response = callAwsService(attributes, headers); return parseDescribeNetworkInterfaces(response); } catch (Exception e) { LOGGER.finest(e); // Log warning only once. if (!isNoPublicIpAlreadyLogged) { LOGGER.warning("Cannot fetch the public IPs of ECS Tasks. You won't be able to use " + "Hazelcast Smart Client from outside of this VPC."); isNoPublicIpAlreadyLogged = true; } Map<String, String> map = new HashMap<>(); privateAddresses.forEach(k -> map.put(k, null)); return map; } }
@Test public void describeNetworkInterfacesException() { // given List<String> privateAddresses = asList("10.0.1.207", "10.0.1.82"); String requestUrl = "/?Action=DescribeNetworkInterfaces" + "&Filter.1.Name=addresses.private-ip-address" + "&Filter.1.Value.1=10.0.1.207" + "&Filter.1.Value.2=10.0.1.82" + "&Version=2016-11-15"; //language=XML String response = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + "<DescribeNetworkInterfacesResponse xmlns=\"http://ec2.amazonaws.com/doc/2016-11-15/\">\n" + " <requestId>21bc9f93-2196-4107-87a3-9e5b2b3f29d9</requestId>\n" + " <networkInterfaceSet>\n" + " <item>\n" + " <availabilityZone>eu-central-1a</availabilityZone>\n" + " <privateIpAddress>10.0.1.207</privateIpAddress>\n" + " <association>\n" + " <publicIp>54.93.217.194</publicIp>\n" + " </association>\n" + " </item>\n" + " <item>\n" + " <availabilityZone>eu-central-1a</availabilityZone>\n" + " <privateIpAddress>10.0.1.82</privateIpAddress>\n" + " <association>\n" + " <publicIp>35.156.192.128</publicIp>\n" + " </association>\n" + " </item>\n" + " </networkInterfaceSet>\n" + "</DescribeNetworkInterfacesResponse>"; stubFor(get(urlEqualTo(requestUrl)) .withHeader("X-Amz-Date", equalTo("20200403T102518Z")) .withHeader("Authorization", equalTo(AUTHORIZATION_HEADER)) .withHeader("X-Amz-Security-Token", equalTo(TOKEN)) .willReturn(aResponse().withStatus(HttpURLConnection.HTTP_INTERNAL_ERROR))); // when Map<String, String> result = awsEc2Api.describeNetworkInterfaces(privateAddresses, CREDENTIALS); // then assertEquals(2, result.size()); assertNull(result.get("10.0.1.207")); assertNull(result.get("10.0.1.82")); }
public static Set<String> getDependencyTree(final byte[] jarBytes) { Set<String> dependencies = new HashSet<>(); try (InputStream inputStream = new ByteArrayInputStream(jarBytes); ZipInputStream zipInputStream = new ZipInputStream(inputStream)) { ZipEntry entry; while ((entry = zipInputStream.getNextEntry()) != null) { if (entry.getName().endsWith(".class")) { ClassNode classNode = new ClassNode(Opcodes.ASM7); ClassReader classReader = new ClassReader(zipInputStream); classReader.accept(classNode, 0); addDependencies(classNode.superName, dependencies); for (String interfaceName : classNode.interfaces) { addDependencies(interfaceName, dependencies); } for (FieldNode fieldNode : classNode.fields) { addDependencies(Type.getType(fieldNode.desc).getClassName(), dependencies); } for (MethodNode methodNode : classNode.methods) { addDependencies(Type.getReturnType(methodNode.desc).getClassName(), dependencies); for (Type argumentType : Type.getArgumentTypes(methodNode.desc)) { addDependencies(argumentType.getClassName(), dependencies); } } } } return dependencies; } catch (Exception e) { LOG.error("get dependency tree error", e); throw new ShenyuException(AdminConstants.THE_PLUGIN_JAR_FILE_IS_NOT_CORRECT_OR_EXCEEDS_16_MB); } }
@Test public void test() { try (MockedConstruction<ByteArrayInputStream> byteStream = mockConstruction(ByteArrayInputStream.class); MockedConstruction<ZipInputStream> zipStream = mockConstruction(ZipInputStream.class, (mock, context) -> when(mock.getNextEntry()).thenReturn(new ZipEntry("abc.class")).thenReturn(null)); MockedConstruction<ClassNode> classNode = mockConstruction(ClassNode.class, (mock, context) -> { Field superName = ClassNode.class.getDeclaredField("superName"); superName.set(mock, "superName"); Field interfaces = ClassNode.class.getDeclaredField("interfaces"); interfaces.set(mock, Lists.newArrayList("interface")); FieldNode fieldNode = mock(FieldNode.class); Field fieldDesc = FieldNode.class.getDeclaredField("desc"); fieldDesc.set(fieldNode, "desc"); Field fields = ClassNode.class.getDeclaredField("fields"); fields.set(mock, Lists.newArrayList(fieldNode)); MethodNode methodNode = mock(MethodNode.class); Field methodDesc = MethodNode.class.getDeclaredField("desc"); methodDesc.set(methodNode, "desc"); Field methods = ClassNode.class.getDeclaredField("methods"); methods.set(mock, Lists.newArrayList(methodNode)); }); MockedConstruction<ClassReader> classReader = mockConstruction(ClassReader.class); MockedStatic type = mockStatic(Type.class)) { when(Type.getType(anyString())).thenReturn(Type.BOOLEAN_TYPE); when(Type.getReturnType(anyString())).thenReturn(Type.CHAR_TYPE); when(Type.getArgumentTypes(anyString())).thenReturn(new Type[] {Type.INT_TYPE}); assertNotNull(JarDependencyUtils.getDependencyTree(null)); } }
public static String toJson(MetadataUpdate metadataUpdate) { return toJson(metadataUpdate, false); }
@Test public void testSetDefaultSortOrderToJson() { String action = MetadataUpdateParser.SET_DEFAULT_SORT_ORDER; int sortOrderId = 2; String expected = String.format("{\"action\":\"%s\",\"sort-order-id\":%d}", action, sortOrderId); MetadataUpdate update = new MetadataUpdate.SetDefaultSortOrder(sortOrderId); String actual = MetadataUpdateParser.toJson(update); assertThat(actual) .as("Set default sort order should serialize to the correct JSON value") .isEqualTo(expected); }
static int readDirectBuffer(InputStream f, ByteBuffer buf, byte[] temp) throws IOException { // copy all the bytes that return immediately, stopping at the first // read that doesn't return a full buffer. int nextReadLength = Math.min(buf.remaining(), temp.length); int totalBytesRead = 0; int bytesRead; while ((bytesRead = f.read(temp, 0, nextReadLength)) == temp.length) { buf.put(temp); totalBytesRead += bytesRead; nextReadLength = Math.min(buf.remaining(), temp.length); } if (bytesRead < 0) { // return -1 if nothing was read return totalBytesRead == 0 ? -1 : totalBytesRead; } else { // copy the last partial buffer buf.put(temp, 0, bytesRead); totalBytesRead += bytesRead; return totalBytesRead; } }
@Test public void testDirectSmallBuffer() throws Exception { ByteBuffer readBuffer = ByteBuffer.allocateDirect(5); MockInputStream stream = new MockInputStream(); int len = DelegatingSeekableInputStream.readDirectBuffer(stream, readBuffer, TEMP.get()); Assert.assertEquals(5, len); Assert.assertEquals(5, readBuffer.position()); Assert.assertEquals(5, readBuffer.limit()); len = DelegatingSeekableInputStream.readDirectBuffer(stream, readBuffer, TEMP.get()); Assert.assertEquals(0, len); readBuffer.flip(); Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY, 0, 5), readBuffer); }
public static Matcher<HttpRequest> methodEquals(String method) { if (method == null) throw new NullPointerException("method == null"); if (method.isEmpty()) throw new NullPointerException("method is empty"); return new MethodEquals(method); }
@Test void methodEquals_unmatched_null() { assertThat(methodEquals("GET").matches(httpRequest)).isFalse(); }
@Override public Object read(final MySQLPacketPayload payload, final boolean unsigned) { return payload.readStringLenencByBytes(); }
@Test void assertRead() { byte[] input = {0x0d, 0x0a, 0x33, 0x18, 0x01, 0x4a, 0x08, 0x0a, (byte) 0x9a, 0x01, 0x18, 0x01, 0x4a, 0x6f}; byte[] expected = {0x0a, 0x33, 0x18, 0x01, 0x4a, 0x08, 0x0a, (byte) 0x9a, 0x01, 0x18, 0x01, 0x4a, 0x6f}; ByteBuf byteBuf = Unpooled.wrappedBuffer(input); MySQLPacketPayload payload = new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8); byte[] actual = (byte[]) new MySQLByteLenencBinaryProtocolValue().read(payload, false); assertThat(actual, is(expected)); }
@Override public BasicAppInfo getApp(HttpServletRequest req, String appId, String clusterId) { UserGroupInformation callerUGI = LogWebServiceUtils.getUser(req); String cId = clusterId != null ? clusterId : defaultClusterid; MultivaluedMap<String, String> params = new MultivaluedMapImpl(); params.add("fields", "INFO"); String path = JOINER.join("clusters/", cId, "/apps/", appId); TimelineEntity appEntity = null; try { if (callerUGI == null) { appEntity = getEntity(path, params); } else { setUserName(params, callerUGI.getShortUserName()); appEntity = callerUGI.doAs(new PrivilegedExceptionAction<TimelineEntity>() { @Override public TimelineEntity run() throws Exception { return getEntity(path, params); } }); } } catch (Exception e) { LogWebServiceUtils.rewrapAndThrowException(e); } if (appEntity == null) { return null; } String appOwner = (String) appEntity.getInfo() .get(ApplicationMetricsConstants.USER_ENTITY_INFO); String state = (String) appEntity.getInfo() .get(ApplicationMetricsConstants.STATE_EVENT_INFO); YarnApplicationState appState = YarnApplicationState.valueOf(state); return new BasicAppInfo(appState, appOwner); }
@Test public void testGetApp() { BasicAppInfo app = logWebService.getApp(request, appId.toString(), null); Assert.assertEquals("RUNNING", app.getAppState().toString()); Assert.assertEquals(user, app.getUser()); }
@Override public void clearGPSLocation() { }
@Test public void clearGPSLocation() { mSensorsAPI.clearGPSLocation(); }
public ReadyCheckingSideInputReader createReaderForViews( Collection<PCollectionView<?>> newContainedViews) { if (!containedViews.containsAll(newContainedViews)) { Set<PCollectionView<?>> currentlyContained = ImmutableSet.copyOf(containedViews); Set<PCollectionView<?>> newRequested = ImmutableSet.copyOf(newContainedViews); throw new IllegalArgumentException( "Can't create a SideInputReader with unknown views " + Sets.difference(newRequested, currentlyContained)); } return new SideInputContainerSideInputReader(newContainedViews); }
@Test public void getOnReaderForViewNotInReaderFails() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("unknown view: " + iterableView.toString()); container .createReaderForViews(ImmutableList.of(mapView)) .get(iterableView, GlobalWindow.INSTANCE); }
@Override public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) { int result = payload.readInt1(); return 0 == result ? MySQLTimeValueUtils.YEAR_OF_ZERO : Integer.toString(result + 1900); }
@Test void assertReadNullYear() { when(payload.readInt1()).thenReturn(0); assertThat(new MySQLYearBinlogProtocolValue().read(columnDef, payload), is(MySQLTimeValueUtils.YEAR_OF_ZERO)); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command( config, MigrationsUtil::getKsqlClient, getMigrationsDir(getConfigFile(), config), Clock.systemDefaultZone() ); }
@Test public void shouldNotApplyOlderVersion() throws Exception { // Given: command = PARSER.parse("-v", "1"); createMigrationFile(1, NAME, migrationsDir, COMMAND); givenAppliedMigration(1, NAME, MigrationState.MIGRATED); givenCurrentMigrationVersion("1"); // When: final int result = command.command(config, (cfg, headers) -> ksqlClient, migrationsDir, Clock.fixed( Instant.ofEpochMilli(1000), ZoneId.systemDefault())); // Then: assertThat(result, is(1)); }
@Override @SuppressFBWarnings(value = "EI_EXPOSE_REP") public ImmutableSet<String> getSupportedProperties() { return SUPPORTED_PROPERTIES; }
@Test public void shouldGetSupportedProperties() { // Given: final JsonSchemaProperties properties = new JsonSchemaProperties(ImmutableMap.of()); // When: final ImmutableSet<String> supportedProperties = properties.getSupportedProperties(); // Then: assertThat(supportedProperties, is(JsonSchemaProperties.SUPPORTED_PROPERTIES)); }
@Override public PrepareStatement getSqlStatement() { return (PrepareStatement) super.getSqlStatement(); }
@Test void assertNewInstance() { PostgreSQLPrepareStatement sqlStatement = new PostgreSQLPrepareStatement(); sqlStatement.setSelect(getSelect()); sqlStatement.setInsert(getInsert()); sqlStatement.setUpdate(getUpdate()); sqlStatement.setDelete(getDelete()); PrepareStatementContext actual = new PrepareStatementContext(sqlStatement, DefaultDatabase.LOGIC_NAME); assertThat(actual, instanceOf(CommonSQLStatementContext.class)); assertThat(actual.getSqlStatement(), is(sqlStatement)); assertThat(actual.getTablesContext().getSimpleTables().stream().map(each -> each.getTableName().getIdentifier().getValue()).collect(Collectors.toList()), is(Arrays.asList("tbl_1", "tbl_1", "tbl_1", "tbl_1"))); }
public Set<String> allowedRequestHeaders() { return Collections.unmodifiableSet(allowedRequestHeaders); }
@Test public void requestHeaders() { final CorsConfig cors = forAnyOrigin().allowedRequestHeaders("preflight-header1", "preflight-header2").build(); assertThat(cors.allowedRequestHeaders(), hasItems("preflight-header1", "preflight-header2")); }
Future<Boolean> canRollController(int nodeId) { LOGGER.debugCr(reconciliation, "Determining whether controller pod {} can be rolled", nodeId); return describeMetadataQuorum().map(info -> { boolean canRoll = isQuorumHealthyWithoutNode(nodeId, info); if (!canRoll) { LOGGER.debugCr(reconciliation, "Not restarting controller pod {}. Restart would affect the quorum health", nodeId); } return canRoll; }).recover(error -> { LOGGER.warnCr(reconciliation, "Error determining whether it is safe to restart controller pod {}", nodeId, error); return Future.failedFuture(error); }); }
@Test public void shouldHandleNoLeaderQuorumScenario(VertxTestContext context) { // Simulate a valid controller list Map<Integer, OptionalLong> controllers = new HashMap<>(); controllers.put(1, OptionalLong.of(10000L)); controllers.put(2, OptionalLong.of(12000L)); controllers.put(3, OptionalLong.of(11000L)); // Simulate no leader by passing a negative leader ID Admin admin = setUpMocks(-1, controllers); KafkaQuorumCheck quorumCheck = new KafkaQuorumCheck(Reconciliation.DUMMY_RECONCILIATION, admin, vertx, CONTROLLER_QUORUM_FETCH_TIMEOUT_MS); quorumCheck.canRollController(1).onComplete(context.succeeding(result -> { context.verify(() -> assertFalse(result)); context.completeNow(); })); }
@PutMapping("/{id}") @RequiresPermissions("system:dict:edit") public ShenyuAdminResult updateShenyuDict(@PathVariable("id") @Valid @Existed(provider = ShenyuDictMapper.class, message = "dict is not existed") final String id, @Valid @NotNull @RequestBody final ShenyuDictDTO shenyuDictDTO) { shenyuDictDTO.setId(id); return ShenyuAdminResult.success(ShenyuResultMessage.UPDATE_SUCCESS, shenyuDictService.createOrUpdate(shenyuDictDTO)); }
@Test public void testUpdateShenyuDict() throws Exception { ShenyuDictDTO shenyuDictDTO = buildTestDict(); SpringBeanUtils.getInstance().setApplicationContext(mock(ConfigurableApplicationContext.class)); when(SpringBeanUtils.getInstance().getBean(ShenyuDictMapper.class)).thenReturn(shenyuDictMapper); when(shenyuDictMapper.existed(shenyuDictDTO.getId())).thenReturn(true); given(this.shenyuDictService.createOrUpdate(shenyuDictDTO)).willReturn(1); this.mockMvc.perform(MockMvcRequestBuilders.put("/shenyu-dict/{id}", "123") .contentType(MediaType.APPLICATION_JSON) .content(GsonUtils.getInstance().toJson(shenyuDictDTO))) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.UPDATE_SUCCESS))) .andReturn(); }
public int compareTo(FileStatus o) { return this.getPath().compareTo(o.getPath()); }
@Test public void testCompareTo() throws IOException { Path path1 = new Path("path1"); Path path2 = new Path("path2"); FileStatus fileStatus1 = new FileStatus(1, true, 1, 1, 1, 1, FsPermission.valueOf("-rw-rw-rw-"), "one", "one", null, path1); FileStatus fileStatus2 = new FileStatus(1, true, 1, 1, 1, 1, FsPermission.valueOf("-rw-rw-rw-"), "one", "one", null, path2); assertTrue(fileStatus1.compareTo(fileStatus2) < 0); assertTrue(fileStatus2.compareTo(fileStatus1) > 0); List<FileStatus> statList = new ArrayList<>(); statList.add(fileStatus1); statList.add(fileStatus2); assertTrue(Collections.binarySearch(statList, fileStatus1) > -1); }