focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static Duration parseDuration(String text) { checkNotNull(text); final String trimmed = text.trim(); checkArgument(!trimmed.isEmpty(), "argument is an empty- or whitespace-only string"); final int len = trimmed.length(); int pos = 0; char current; while (pos < len && (current = trimmed.charAt(pos)) >= '0' && current <= '9') { pos++; } final String number = trimmed.substring(0, pos); final String unitLabel = trimmed.substring(pos).trim().toLowerCase(Locale.US); if (number.isEmpty()) { throw new NumberFormatException("text does not start with a number"); } final BigInteger value; try { value = new BigInteger(number); // this throws a NumberFormatException } catch (NumberFormatException e) { throw new IllegalArgumentException( "The value '" + number + "' cannot be represented as an integer number.", e); } final ChronoUnit unit; if (unitLabel.isEmpty()) { unit = ChronoUnit.MILLIS; } else { unit = LABEL_TO_UNIT_MAP.get(unitLabel); } if (unit == null) { throw new IllegalArgumentException( "Time interval unit label '" + unitLabel + "' does not match any of the recognized units: " + TimeUnit.getAllUnits()); } try { return convertBigIntToDuration(value, unit); } catch (ArithmeticException e) { throw new IllegalArgumentException( "The value '" + number + "' cannot be represented as java.time.Duration (numeric overflow).", e); } }
@Test void testParseDurationMinutes() { assertThat(TimeUtils.parseDuration("7657623m").toMinutes()).isEqualTo(7657623); assertThat(TimeUtils.parseDuration("7657623min").toMinutes()).isEqualTo(7657623); assertThat(TimeUtils.parseDuration("7657623minute").toMinutes()).isEqualTo(7657623); assertThat(TimeUtils.parseDuration("7657623minutes").toMinutes()).isEqualTo(7657623); assertThat(TimeUtils.parseDuration("7657623 min").toMinutes()).isEqualTo(7657623); }
String getFirstTimeTriggerTimeZone() { if (timeTriggers != null && !timeTriggers.isEmpty()) { return timeTriggers.get(0).getTimezone(); } return null; }
@Test public void testGetFirstTimeTriggerTimeZone() throws Exception { WorkflowInstance instance = loadObject( "fixtures/instances/sample-workflow-instance-created.json", WorkflowInstance.class); CronTimeTrigger cronTrigger1 = new CronTimeTrigger(); cronTrigger1.setTimezone("US/Pacific"); CronTimeTrigger cronTrigger2 = new CronTimeTrigger(); cronTrigger2.setTimezone("UTC"); instance.setRuntimeWorkflow( instance.getRuntimeWorkflow().toBuilder() .timeTriggers(Arrays.asList(cronTrigger1, cronTrigger2)) .build()); RunRequest runRequest = RunRequest.builder() .currentPolicy(RunPolicy.START_FRESH_NEW_RUN) .requester(User.create("tester")) .build(); InstanceWrapper instanceWrapper = InstanceWrapper.from(instance, runRequest); assertEquals(runRequest.getInitiator(), instanceWrapper.getInitiator()); assertTrue(instanceWrapper.isWorkflowParam()); assertNull(instanceWrapper.getInitiatorTimeZone()); assertEquals(runRequest.getCurrentPolicy().name(), instanceWrapper.getRunPolicy()); assertEquals( instance.getRunProperties().getOwner().getName(), instanceWrapper.getWorkflowOwner()); assertEquals("US/Pacific", instanceWrapper.getFirstTimeTriggerTimeZone()); }
public static boolean subjectExists( final SchemaRegistryClient srClient, final String subject ) { return getLatestSchema(srClient, subject).isPresent(); }
@Test public void shouldThrowAuthorizationExceptionOnUnauthorizedSubjectAccess() throws Exception { // Given: when(schemaRegistryClient.getLatestSchemaMetadata("bar-value")).thenThrow( new RestClientException( "User is denied operation Write on Subject: bar-value; error code: 40301", 403, 40301) ); // When: final Exception e = assertThrows(KsqlSchemaAuthorizationException.class, () -> SchemaRegistryUtil.subjectExists(schemaRegistryClient, "bar-value")); // Then: assertThat(e.getMessage(), equalTo( "Authorization denied to Write on Schema Registry subject: [bar-value]")); }
public static String decodeBase64(final String what) { return new String(BaseEncoding.base64().decode(what), StandardCharsets.UTF_8); }
@Test public void testDecodeBase64() { assertEquals("lolwat.encoded", Tools.decodeBase64("bG9sd2F0LmVuY29kZWQ=")); }
public static int getSizeOfPrimitiveType(TypeRef<?> numericType) { return getSizeOfPrimitiveType(getRawType(numericType)); }
@Test public void testGetSizeOfPrimitiveType() { List<Integer> sizes = ImmutableList.of( void.class, boolean.class, byte.class, char.class, short.class, int.class, float.class, long.class, double.class) .stream() .map(TypeUtils::getSizeOfPrimitiveType) .collect(Collectors.toList()); assertEquals(sizes, ImmutableList.of(0, 1, 1, 2, 2, 4, 4, 8, 8)); }
@Override public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) throws IOException { if (userSession.hasSession() && userSession.isLoggedIn() && userSession.shouldResetPassword()) { redirectTo(response, request.getContextPath() + RESET_PASSWORD_PATH); } chain.doFilter(request, response); }
@Test public void do_not_redirect_if_not_logged_in() throws Exception { when(session.isLoggedIn()).thenReturn(false); underTest.doFilter(request, response, chain); verify(response, never()).sendRedirect(any()); }
@GET @Operation(summary = "Get details about this Connect worker and the ID of the Kafka cluster it is connected to") public ServerInfo serverInfo() { return new ServerInfo(herder.kafkaClusterId()); }
@Test public void testRootGet() { when(herder.kafkaClusterId()).thenReturn(MockAdminClient.DEFAULT_CLUSTER_ID); ServerInfo info = rootResource.serverInfo(); assertEquals(AppInfoParser.getVersion(), info.version()); assertEquals(AppInfoParser.getCommitId(), info.commit()); assertEquals(MockAdminClient.DEFAULT_CLUSTER_ID, info.clusterId()); verify(herder).kafkaClusterId(); }
@Override public Column convert(BasicTypeDefine typeDefine) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .sourceType(typeDefine.getColumnType()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String db2Type = typeDefine.getDataType().toUpperCase(); switch (db2Type) { case DB2_BOOLEAN: builder.sourceType(DB2_BOOLEAN); builder.dataType(BasicType.BOOLEAN_TYPE); break; case DB2_SMALLINT: builder.sourceType(DB2_SMALLINT); builder.dataType(BasicType.SHORT_TYPE); break; case DB2_INT: case DB2_INTEGER: builder.sourceType(DB2_INT); builder.dataType(BasicType.INT_TYPE); break; case DB2_BIGINT: builder.sourceType(DB2_BIGINT); builder.dataType(BasicType.LONG_TYPE); break; case DB2_REAL: builder.sourceType(DB2_REAL); builder.dataType(BasicType.FLOAT_TYPE); break; case DB2_DOUBLE: builder.sourceType(DB2_DOUBLE); builder.dataType(BasicType.DOUBLE_TYPE); break; case DB2_DECFLOAT: builder.sourceType(DB2_DECFLOAT); builder.dataType(BasicType.DOUBLE_TYPE); break; case DB2_DECIMAL: builder.sourceType( String.format( "%s(%s,%s)", DB2_DECIMAL, typeDefine.getPrecision(), typeDefine.getScale())); builder.dataType( new DecimalType( Math.toIntExact(typeDefine.getPrecision()), typeDefine.getScale())); builder.columnLength(typeDefine.getPrecision()); builder.scale(typeDefine.getScale()); break; case DB2_CHARACTER: case DB2_CHAR: builder.sourceType(String.format("%s(%d)", DB2_CHAR, typeDefine.getLength())); // For char/varchar this length is in bytes builder.columnLength(typeDefine.getLength()); builder.dataType(BasicType.STRING_TYPE); break; case DB2_VARCHAR: builder.sourceType(String.format("%s(%d)", DB2_VARCHAR, typeDefine.getLength())); builder.columnLength(typeDefine.getLength()); builder.dataType(BasicType.STRING_TYPE); break; case DB2_LONG_VARCHAR: builder.sourceType(DB2_LONG_VARCHAR); // default length is 32700 builder.columnLength(typeDefine.getLength()); builder.dataType(BasicType.STRING_TYPE); break; case DB2_CLOB: builder.sourceType(String.format("%s(%d)", DB2_CLOB, typeDefine.getLength())); builder.columnLength(typeDefine.getLength()); builder.dataType(BasicType.STRING_TYPE); break; case DB2_GRAPHIC: builder.sourceType(String.format("%s(%d)", DB2_GRAPHIC, typeDefine.getLength())); builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength())); builder.dataType(BasicType.STRING_TYPE); break; case DB2_VARGRAPHIC: builder.sourceType(String.format("%s(%d)", DB2_VARGRAPHIC, typeDefine.getLength())); builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength())); builder.dataType(BasicType.STRING_TYPE); break; case DB2_DBCLOB: builder.sourceType(String.format("%s(%d)", DB2_DBCLOB, typeDefine.getLength())); builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength())); builder.dataType(BasicType.STRING_TYPE); break; case DB2_XML: builder.sourceType(DB2_XML); builder.columnLength((long) Integer.MAX_VALUE); builder.dataType(BasicType.STRING_TYPE); break; case DB2_BINARY: builder.sourceType(String.format("%s(%d)", DB2_BINARY, typeDefine.getLength())); builder.columnLength(typeDefine.getLength()); builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case DB2_VARBINARY: builder.sourceType(String.format("%s(%d)", DB2_VARBINARY, typeDefine.getLength())); builder.columnLength(typeDefine.getLength()); builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case DB2_BLOB: builder.sourceType(String.format("%s(%d)", DB2_BLOB, typeDefine.getLength())); builder.columnLength(typeDefine.getLength()); builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case DB2_DATE: builder.sourceType(DB2_DATE); builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); break; case DB2_TIME: builder.sourceType(DB2_TIME); builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); break; case DB2_TIMESTAMP: builder.sourceType(String.format("%s(%d)", DB2_TIMESTAMP, typeDefine.getScale())); builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(typeDefine.getScale()); break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.DB_2, db2Type, typeDefine.getName()); } return builder.build(); }
@Test public void testConvertSmallint() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder() .name("test") .columnType("SMALLINT") .dataType("SMALLINT") .build(); Column column = DB2TypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.SHORT_TYPE, column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); }
@Override public LocalAddress localAddress() { return (LocalAddress) super.localAddress(); }
@Test public void localChannelRaceCondition() throws Exception { final CountDownLatch closeLatch = new CountDownLatch(1); final EventLoopGroup clientGroup = new DefaultEventLoopGroup(1) { @Override protected EventLoop newChild(Executor threadFactory, Object... args) throws Exception { return new SingleThreadEventLoop(this, threadFactory, true) { @Override protected void run() { for (;;) { Runnable task = takeTask(); if (task != null) { /* Only slow down the anonymous class in LocalChannel#doRegister() */ if (task.getClass().getEnclosingClass() == LocalChannel.class) { try { closeLatch.await(); } catch (InterruptedException e) { throw new Error(e); } } task.run(); updateLastExecutionTime(); } if (confirmShutdown()) { break; } } } }; } }; Channel sc = null; Channel cc = null; try { ServerBootstrap sb = new ServerBootstrap(); sc = sb.group(group2). channel(LocalServerChannel.class). childHandler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ch.close(); closeLatch.countDown(); } }). bind(TEST_ADDRESS). sync().channel(); Bootstrap bootstrap = new Bootstrap(); bootstrap.group(clientGroup). channel(LocalChannel.class). handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { /* Do nothing */ } }); ChannelFuture future = bootstrap.connect(sc.localAddress()); assertTrue(future.await(2000), "Connection should finish, not time out"); cc = future.channel(); } finally { closeChannel(cc); closeChannel(sc); clientGroup.shutdownGracefully(0, 0, SECONDS).await(); } }
@SuppressWarnings("unchecked") public static Class<? extends UuidGenerator> parseUuidGenerator(String cucumberUuidGenerator) { Class<?> uuidGeneratorClass; try { uuidGeneratorClass = Class.forName(cucumberUuidGenerator); } catch (ClassNotFoundException e) { throw new IllegalArgumentException( String.format("Could not load UUID generator class for '%s'", cucumberUuidGenerator), e); } if (!UuidGenerator.class.isAssignableFrom(uuidGeneratorClass)) { throw new IllegalArgumentException(String.format("UUID generator class '%s' was not a subclass of '%s'", uuidGeneratorClass, UuidGenerator.class)); } return (Class<? extends UuidGenerator>) uuidGeneratorClass; }
@Test void parseUuidGenerator_IncrementingUuidGenerator() { // When Class<? extends UuidGenerator> uuidGeneratorClass = UuidGeneratorParser .parseUuidGenerator(IncrementingUuidGenerator.class.getName()); // Then assertEquals(IncrementingUuidGenerator.class, uuidGeneratorClass); }
@Override @SuppressWarnings("ReferenceEquality") protected boolean evaluateDependencies(final Dependency dependency, final Dependency nextDependency, final Set<Dependency> dependenciesToRemove) { Dependency main; //CSOFF: InnerAssignment if ((main = getMainGemspecDependency(dependency, nextDependency)) != null) { if (main == dependency) { mergeDependencies(dependency, nextDependency, dependenciesToRemove); } else { mergeDependencies(nextDependency, dependency, dependenciesToRemove); return true; //since we merged into the next dependency - skip forward to the next in mainIterator } } else if ((main = getMainSwiftDependency(dependency, nextDependency)) != null) { if (main == dependency) { mergeDependencies(dependency, nextDependency, dependenciesToRemove); } else { mergeDependencies(nextDependency, dependency, dependenciesToRemove); return true; //since we merged into the next dependency - skip forward to the next in mainIterator } } else if ((main = getMainAndroidDependency(dependency, nextDependency)) != null) { if (main == dependency) { mergeDependencies(dependency, nextDependency, dependenciesToRemove); } else { mergeDependencies(nextDependency, dependency, dependenciesToRemove); return true; //since we merged into the next dependency - skip forward to the next in mainIterator } } else if ((main = getMainDotnetDependency(dependency, nextDependency)) != null) { if (main == dependency) { mergeDependencies(dependency, nextDependency, dependenciesToRemove); } else { mergeDependencies(nextDependency, dependency, dependenciesToRemove); return true; //since we merged into the next dependency - skip forward to the next in mainIterator } } else if ((main = getMainVirtualDependency(dependency, nextDependency)) != null) { if (main == dependency) { mergeDependencies(dependency, nextDependency, dependenciesToRemove); } else { mergeDependencies(nextDependency, dependency, dependenciesToRemove); return true; //since we merged into the next dependency - skip forward to the next in mainIterator } } //CSON: InnerAssignment return false; }
@Test public void testEvaluateDependencies() { // Dependency dependency = null; // Dependency nextDependency = null; // Set<Dependency> dependenciesToRemove = null; // DependencyMergingAnalyzer instance = new DependencyMergingAnalyzer(); // boolean expResult = false; // boolean result = instance.evaluateDependencies(dependency, nextDependency, dependenciesToRemove); // assertEquals(expResult, result); }
@Override public void print(Iterator<RowData> it, PrintWriter printWriter) { if (!it.hasNext()) { printEmptyResult(it, printWriter); return; } long numRows = printTable(it, printWriter); printFooter(printWriter, numRows); }
@Test void testPrintWithMultipleRows() { PrintStyle.tableauWithDataInferredColumnWidths(getSchema(), getConverter()) .print(getData().iterator(), new PrintWriter(outContent)); // note: the expected result may look irregular because every CJK(Chinese/Japanese/Korean) // character's // width < 2 in IDE by default, every CJK character usually's width is 2, you can open this // source file // by vim or just cat the file to check the regular result. // The last row of `varchar` value will pad with two ' ' before the column. // Because the length of `これは日本語をテストするた` plus the length of `...` is 29, // no more Japanese character can be added to the line. assertThat(outContent.toString()) .isEqualTo( "+---------+-------------+----------------------+--------------------------------+----------------+----------------------------+" + System.lineSeparator() + "| boolean | int | bigint | varchar | decimal(10, 5) | timestamp |" + System.lineSeparator() + "+---------+-------------+----------------------+--------------------------------+----------------+----------------------------+" + System.lineSeparator() + "| <NULL> | 1 | 2 | abc | 1.23000 | 2020-03-01 18:39:14.000000 |" + System.lineSeparator() + "| false | <NULL> | 0 | | 1.00000 | 2020-03-01 18:39:14.100000 |" + System.lineSeparator() + "| true | 2147483647 | <NULL> | abcdefg | 12345.00000 | 2020-03-01 18:39:14.120000 |" + System.lineSeparator() + "| false | -2147483648 | 9223372036854775807 | <NULL> | 12345.06789 | 2020-03-01 18:39:14.123000 |" + System.lineSeparator() + "| true | 100 | -9223372036854775808 | abcdefg111 | <NULL> | 2020-03-01 18:39:14.123456 |" + System.lineSeparator() + "| <NULL> | -1 | -1 | abcdefghijklmnopqrstuvwxyza... | -12345.06789 | <NULL> |" + System.lineSeparator() + "| <NULL> | -1 | -1 | 这是一段中文 | -12345.06789 | 2020-03-04 18:39:14.000000 |" + System.lineSeparator() + "| <NULL> | -1 | -1 | これは日本語をテストするた... | -12345.06789 | 2020-03-04 18:39:14.000000 |" + System.lineSeparator() + "+---------+-------------+----------------------+--------------------------------+----------------+----------------------------+" + System.lineSeparator() + "8 rows in set" + System.lineSeparator()); }
static String generateDatabaseName(String baseString) { baseString = Character.isLetter(baseString.charAt(0)) ? baseString : "d_" + baseString; return generateResourceId( baseString, ILLEGAL_DATABASE_NAME_CHARS, REPLACE_DATABASE_NAME_CHAR, MAX_DATABASE_NAME_LENGTH, TIME_FORMAT); }
@Test public void testGenerateDatabaseNameShouldReplaceIllegalCharacters() { String testBaseString = "!@#_()"; String actual = generateDatabaseName(testBaseString); assertThat(actual).matches("d___#___\\d{8}_\\d{6}_\\d{6}"); }
@Override public ParSeqBasedCompletionStage<T> whenComplete(BiConsumer<? super T, ? super Throwable> action) { return nextStageByComposingTask(_task.transform("whenComplete", prevTaskResult -> { if (prevTaskResult.isFailed()) { try { action.accept(null, prevTaskResult.getError()); } catch (Throwable e) { // no ops } return Failure.of(prevTaskResult.getError()); } else { try { action.accept(prevTaskResult.get(), prevTaskResult.getError()); } catch (Throwable e) { return Failure.of(e); } return Success.of(prevTaskResult.get()); } })); }
@Test public void testWhenComplete_useUnwrappedException() throws Exception { BiConsumer<String, Throwable> biConsumer = mock(BiConsumer.class); CompletionStage<String> completionStage = createTestFailedStage(EXCEPTION); finish(completionStage.whenComplete(biConsumer)); verify(biConsumer, times(1)).accept(null, EXCEPTION); }
public static SqlType fromValue(final BigDecimal value) { // SqlDecimal does not support negative scale: final BigDecimal decimal = value.scale() < 0 ? value.setScale(0, BigDecimal.ROUND_UNNECESSARY) : value; /* We can't use BigDecimal.precision() directly for all cases, since it defines * precision differently from SQL Decimal. * In particular, if the decimal is between -0.1 and 0.1, BigDecimal precision can be * lower than scale, which is disallowed in SQL Decimal. For example, 0.005 in * BigDecimal has a precision,scale of 1,3; whereas we expect 4,3. * If the decimal is in (-1,1) but outside (-0.1,0.1), the code doesn't throw, but * gives lower precision than expected (e.g., 0.8 has precision 1 instead of 2). * To account for this edge case, we just take the scale and add one and use that * for the precision instead. This works since BigDecimal defines scale as the * number of digits to the right of the period; which is one lower than the precision for * anything in the range (-1, 1). * This covers the case where BigDecimal has a value of 0. * Note: This solution differs from the SQL definition in that it returns (4, 3) for * both "0.005" and ".005", whereas SQL expects (3, 3) for the latter. This is unavoidable * if we use BigDecimal as an intermediate representation, since the two strings are parsed * identically by it to have precision 1. */ if (decimal.compareTo(BigDecimal.ONE) < 0 && decimal.compareTo(BigDecimal.ONE.negate()) > 0) { return SqlTypes.decimal(decimal.scale() + 1, decimal.scale()); } return SqlTypes.decimal(decimal.precision(), Math.max(decimal.scale(), 0)); }
@Test public void shouldGetSchemaFromDecimal4_3() { // When: final SqlType schema = DecimalUtil.fromValue(new BigDecimal("0.005")); // Then: assertThat(schema, is(SqlTypes.decimal(4, 3))); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } try { if(containerService.isContainer(file)) { final PathAttributes attributes = new PathAttributes(); final CloudBlobContainer container = session.getClient().getContainerReference(containerService.getContainer(file).getName()); container.downloadAttributes(null, null, context); final BlobContainerProperties properties = container.getProperties(); attributes.setETag(properties.getEtag()); attributes.setModificationDate(properties.getLastModified().getTime()); return attributes; } if(file.isFile() || file.isPlaceholder()) { try { final CloudBlob blob = session.getClient().getContainerReference(containerService.getContainer(file).getName()) .getBlobReferenceFromServer(containerService.getKey(file)); final BlobRequestOptions options = new BlobRequestOptions(); blob.downloadAttributes(AccessCondition.generateEmptyCondition(), options, context); return this.toAttributes(blob); } catch(StorageException e) { switch(e.getHttpStatusCode()) { case HttpStatus.SC_NOT_FOUND: if(file.isPlaceholder()) { // Ignore failure and look for common prefix break; } default: throw e; } } } // Check for common prefix try { new AzureObjectListService(session, context).list(file, new CancellingListProgressListener()); return PathAttributes.EMPTY; } catch(ListCanceledException l) { // Found common prefix return PathAttributes.EMPTY; } } catch(StorageException e) { throw new AzureExceptionMappingService().map("Failure to read attributes of {0}", e, file); } catch(URISyntaxException e) { throw new NotfoundException(e.getMessage(), e); } }
@Test public void testMissingPlaceholder() throws Exception { final Path container = new AzureDirectoryFeature(session, null).mkdir( new Path(new AlphanumericRandomStringService().random().toLowerCase(Locale.ROOT), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final String prefix = new AlphanumericRandomStringService().random(); final Path intermediate = new Path(container, prefix, EnumSet.of(Path.Type.directory)); final Path directory = new AzureDirectoryFeature(session, null).mkdir(new Path(intermediate, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new AzureFindFeature(session, null).find(directory)); final Path test = new AzureTouchFeature(session, null).touch( new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final AzureAttributesFinderFeature f = new AzureAttributesFinderFeature(session, null); final PathAttributes attributes = f.find(container); assertNotEquals(PathAttributes.EMPTY, attributes); assertNotNull(attributes.getETag()); assertNotNull(new AzureObjectListService(session, null).list(directory, new DisabledListProgressListener()).find(new DefaultPathPredicate(test))); new AzureDeleteFeature(session, null).delete(Collections.singletonList(directory), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertNotNull(new AzureObjectListService(session, null).list(directory, new DisabledListProgressListener()).find(new DefaultPathPredicate(test))); // Still found as prefix assertNotNull(new AzureObjectListService(session, null).list(container, new DisabledListProgressListener()).find(new DefaultPathPredicate(intermediate))); assertNotNull(new AzureObjectListService(session, null).list(intermediate, new DisabledListProgressListener()).find(new DefaultPathPredicate(directory))); // Ignore 404 failures assertSame(PathAttributes.EMPTY, new AzureAttributesFinderFeature(session, null).find(directory)); assertSame(PathAttributes.EMPTY, new AzureAttributesFinderFeature(session, null).find(intermediate)); new AzureDeleteFeature(session, null).delete(Collections.singletonList(directory), new DisabledLoginCallback(), new Delete.DisabledCallback()); new AzureDeleteFeature(session, null).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static boolean isUri(String potentialUri) { if (StringUtils.isBlank(potentialUri)) { return false; } try { URI uri = new URI(potentialUri); return uri.getScheme() != null && uri.getHost() != null; } catch (URISyntaxException e) { return false; } }
@Test public void returns_false_when_uri_doesnt_contain_host() { assertThat(UriValidator.isUri("http://"), is(false)); }
public boolean isMatch(String input) { for (StringMatch stringMatch : oneof) { if (stringMatch.isMatch(input)) { return true; } } return false; }
@Test void isMatch() { ListStringMatch listStringMatch = new ListStringMatch(); List<StringMatch> oneof = new ArrayList<>(); StringMatch stringMatch1 = new StringMatch(); stringMatch1.setExact("1"); StringMatch stringMatch2 = new StringMatch(); stringMatch2.setExact("2"); oneof.add(stringMatch1); oneof.add(stringMatch2); listStringMatch.setOneof(oneof); assertTrue(listStringMatch.isMatch("1")); assertTrue(listStringMatch.isMatch("2")); assertFalse(listStringMatch.isMatch("3")); }
public Type type() { return type; }
@Test void testType() { final var prototype = new Character(); prototype.set(Stats.ARMOR, 1); prototype.set(Stats.INTELLECT, 2); assertNull(prototype.type()); final var stupid = new Character(Type.ROGUE, prototype); stupid.remove(Stats.INTELLECT); assertEquals(Type.ROGUE, stupid.type()); final var weak = new Character("weak", prototype); weak.remove(Stats.ARMOR); assertNull(weak.type()); }
public ExitStatus(Options options) { this.options = options; }
@Test void should_pass_if_no_features_are_found() { createRuntime(); assertThat(exitStatus.exitStatus(), is(equalTo((byte) 0x0))); }
@Description("count of code points of the given string") @ScalarFunction @LiteralParameters("x") @SqlType(StandardTypes.BIGINT) public static long length(@SqlType("varchar(x)") Slice slice) { return countCodePoints(slice); }
@Test public void testLength() { assertFunction("LENGTH('')", BIGINT, 0L); assertFunction("LENGTH('hello')", BIGINT, 5L); assertFunction("LENGTH('Quadratically')", BIGINT, 13L); // // Test length for non-ASCII assertFunction("LENGTH('hello na\u00EFve world')", BIGINT, 17L); assertFunction("LENGTH('\uD801\uDC2Dend')", BIGINT, 4L); assertFunction("LENGTH('\u4FE1\u5FF5,\u7231,\u5E0C\u671B')", BIGINT, 7L); }
@Override public List<Instance> getAllInstances(String serviceName) throws NacosException { return getAllInstances(serviceName, new ArrayList<>()); }
@Test void testGetAllInstanceFromFailover() throws NacosException { when(serviceInfoHolder.isFailoverSwitch()).thenReturn(true); ServiceInfo serviceInfo = new ServiceInfo("group1@@service1"); serviceInfo.setHosts(Collections.singletonList(new Instance())); when(serviceInfoHolder.getFailoverServiceInfo(anyString(), anyString(), anyString())).thenReturn(serviceInfo); List<Instance> actual = client.getAllInstances("service1", "group1", false); verify(proxy, never()).queryInstancesOfService(anyString(), anyString(), anyString(), anyBoolean()); assertEquals(1, actual.size()); assertEquals(new Instance(), actual.get(0)); }
static Map<Boolean, List<String>> partitionSupport( String[] supportedByJVM, String[] enabledByJVM, String[] excludedByConfig, String[] includedByConfig ) { final List<Pattern> enabled = Arrays.stream(enabledByJVM).map(Pattern::compile).collect(Collectors.toList()); final List<Pattern> disabled = Arrays.stream(excludedByConfig).map(Pattern::compile).collect(Collectors.toList()); final List<Pattern> included = Arrays.stream(includedByConfig).map(Pattern::compile).collect(Collectors.toList()); return Arrays.stream(supportedByJVM) .sorted(Comparator.naturalOrder()) .collect(Collectors.partitioningBy(x -> disabled.stream().noneMatch(pat -> pat.matcher(x).matches()) && enabled.stream().anyMatch(pat -> pat.matcher(x).matches()) && (included.isEmpty() || included.stream().anyMatch(pat -> pat.matcher(x).matches())) )); }
@Test void partitionSupportInclude() { final String[] supported = {"SSLv2Hello", "SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2"}; final String[] enabled = {"SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2"}; final String[] exclude = {"SSL*"}; final String[] include = {"TLSv1.2|SSLv2Hello"}; final Map<Boolean, List<String>> partition = HttpsConnectorFactory.partitionSupport(supported, enabled, exclude, include); assertThat(partition) .containsOnly( entry(true, Collections.singletonList("TLSv1.2")), entry(false, Arrays.asList("SSLv2Hello", "SSLv3", "TLSv1", "TLSv1.1")) ); }
public static boolean isUnclosedQuote(final String line) { // CHECKSTYLE_RULES.ON: CyclomaticComplexity int quoteStart = -1; for (int i = 0; i < line.length(); ++i) { if (quoteStart < 0 && isQuoteChar(line, i)) { quoteStart = i; } else if (quoteStart >= 0 && isTwoQuoteStart(line, i) && !isEscaped(line, i)) { // Together, two quotes are effectively an escaped quote and don't act as a quote character. // Skip the next quote char, since it's coupled with the first. i++; } else if (quoteStart >= 0 && isQuoteChar(line, i) && !isEscaped(line, i)) { quoteStart = -1; } } final int commentInd = line.indexOf(COMMENT); if (commentInd < 0) { return quoteStart >= 0; } else if (quoteStart < 0) { return false; } else { return commentInd > quoteStart; } }
@Test public void shouldNotFindUnclosedQuote_endsQuote() { // Given: final String line = "some line 'this is in a quote'"; // Then: assertThat(UnclosedQuoteChecker.isUnclosedQuote(line), is(false)); }
public static Read read() { return new AutoValue_RabbitMqIO_Read.Builder() .setQueueDeclare(false) .setExchangeDeclare(false) .setMaxReadTime(null) .setMaxNumRecords(Long.MAX_VALUE) .setUseCorrelationId(false) .build(); }
@Test public void testUseCorrelationIdSucceedsWhenIdsPresent() throws Exception { int messageCount = 1; AMQP.BasicProperties publishProps = new AMQP.BasicProperties().builder().correlationId("123").build(); doExchangeTest( new ExchangeTestPlan( RabbitMqIO.read() .withExchange("CorrelationIdSuccess", "fanout") .withUseCorrelationId(true), messageCount, messageCount, publishProps)); }
public static String decodeObjectIdentifier(byte[] data) { return decodeObjectIdentifier(data, 0, data.length); }
@Test public void decodeObjectIdentifierWithDoubleBytes() { assertEquals("1.2.131", Asn1Utils.decodeObjectIdentifier(new byte[] { 0x2a, (byte) 0x81, 0x03 })); }
@Activate public void activate() { driverAdminService.addListener(driverListener); eventDispatcher.addSink(PiPipeconfEvent.class, listenerRegistry); checkMissingMergedDrivers(); if (!missingMergedDrivers.isEmpty()) { // Missing drivers should be created upon detecting registration // events of a new pipeconf or a base driver. If, for any reason, we // miss such event, here's a watchdog task. SharedExecutors.getPoolThreadExecutor() .execute(this::missingDriversWatchdogTask); } log.info("Started"); }
@Test public void activate() { assertEquals("Incorrect driver admin service", driverAdminService, mgr.driverAdminService); assertEquals("Incorrect driverAdminService service", driverAdminService, mgr.driverAdminService); assertEquals("Incorrect configuration service", cfgService, mgr.cfgService); }
public static Protocol adaptiveProtocol(byte[] magicHeadBytes) { for (Protocol protocol : TYPE_PROTOCOL_MAP.values()) { if (protocol.protocolInfo().isMatchMagic(magicHeadBytes)) { return protocol; } } return null; }
@Test public void adaptiveProtocol() throws Exception { }
public List<HintRule> getHintRules() { return hintRules; }
@Test public void testHandler() throws ParserConfigurationException, SAXException, IOException { File file = BaseTest.getResourceAsFile(this, "hints.xml"); File schema = BaseTest.getResourceAsFile(this, "schema/dependency-hint.1.1.xsd"); HintHandler handler = new HintHandler(); SAXParserFactory factory = SAXParserFactory.newInstance(); factory.setNamespaceAware(true); factory.setValidating(true); SAXParser saxParser = factory.newSAXParser(); saxParser.setProperty(HintParser.JAXP_SCHEMA_LANGUAGE, HintParser.W3C_XML_SCHEMA); saxParser.setProperty(HintParser.JAXP_SCHEMA_SOURCE, schema); XMLReader xmlReader = saxParser.getXMLReader(); xmlReader.setErrorHandler(new HintErrorHandler()); xmlReader.setContentHandler(handler); InputStream inputStream = new FileInputStream(file); Reader reader = new InputStreamReader(inputStream, StandardCharsets.UTF_8); InputSource in = new InputSource(reader); xmlReader.parse(in); List<HintRule> result = handler.getHintRules(); assertEquals("two hint rules should have been loaded",2,result.size()); }
@Override public void visitField(QueryVisitorFieldEnvironment queryVisitorFieldEnvironment) { // Each new property should put as required and we should not allow additional properties. ArrayNode required = getRequiredArrayNode(); required.add(queryVisitorFieldEnvironment.getFieldDefinition().getName()); // Even if type is marked as optional in the GraphQL Schema, it must be present and // serialized as null into the Json response. We have to unwrap it first. GraphQLOutputType outputType = queryVisitorFieldEnvironment.getFieldDefinition().getType(); Type definitionType = queryVisitorFieldEnvironment.getFieldDefinition().getDefinition().getType(); if (TypeUtil.isNonNull(definitionType)) { definitionType = TypeUtil.unwrapOne(definitionType); } // Add this field to current node. ObjectNode fieldNode = currentNode.putObject(queryVisitorFieldEnvironment.getFieldDefinition().getName()); TypeInfo definitionTypeInfo = TypeInfo.typeInfo(definitionType); // Treat most common case first: we've got a scalar property. if (ScalarInfo.isGraphqlSpecifiedScalar(definitionTypeInfo.getName())) { fieldNode.put(JSON_SCHEMA_TYPE, getJsonScalarType(definitionTypeInfo.getName())); } else if (outputType instanceof GraphQLObjectType) { // Then we deal with objects. fieldNode.put(JSON_SCHEMA_TYPE, JSON_SCHEMA_OBJECT_TYPE); ObjectNode properties = fieldNode.putObject(JSON_SCHEMA_PROPERTIES); parentNode.put(JSON_SCHEMA_ADDITIONAL_PROPERTIES, false); fieldNode.put(JSON_SCHEMA_ADDITIONAL_PROPERTIES, false); parentNode = fieldNode; currentNode = properties; } else if (TypeUtil.isList(definitionType)) { // Then we deal with lists. fieldNode.put(JSON_SCHEMA_TYPE, JSON_SCHEMA_ARRAY_TYPE); ObjectNode items = fieldNode.putObject(JSON_SCHEMA_ITEMS); // Depending on item type, we should initialize an object structure. TypeName itemTypeInfo = TypeUtil.unwrapAll(definitionType); if (!ScalarInfo.isGraphqlSpecifiedScalar(itemTypeInfo.getName())) { items.put(JSON_SCHEMA_TYPE, JSON_SCHEMA_OBJECT_TYPE); ObjectNode properties = items.putObject(JSON_SCHEMA_PROPERTIES); items.put(JSON_SCHEMA_ADDITIONAL_PROPERTIES, false); parentNode = items; currentNode = properties; } } else if (outputType instanceof GraphQLEnumType enumType) { // Then we deal with enumerations. fieldNode.put(JSON_SCHEMA_TYPE, JSON_SCHEMA_STRING_TYPE); ArrayNode enumNode = fieldNode.putArray(JSON_SCHEMA_ENUM); for (GraphQLEnumValueDefinition valDef : enumType.getValues()) { enumNode.add(valDef.getName()); } } }
@Test public void testVisitFieldWithEnumType() { QueryVisitorFieldEnvironment environment = mock(QueryVisitorFieldEnvironment.class); GraphQLEnumType enumType = mock(GraphQLEnumType.class); TypeName definitionType = TypeName.newTypeName().name("EnumType").build(); when(environment.getFieldDefinition()).thenReturn(mock(GraphQLFieldDefinition.class)); when(environment.getFieldDefinition().getDefinition()).thenReturn(mock(FieldDefinition.class)); when(environment.getFieldDefinition().getType()).thenReturn(enumType); when(environment.getFieldDefinition().getDefinition().getType()).thenReturn(definitionType); when(environment.getFieldDefinition().getName()).thenReturn("enumField"); EnumValueDefinition valueDef1 = EnumValueDefinition.newEnumValueDefinition().name("VALUE1").build(); EnumValueDefinition valueDef2 = EnumValueDefinition.newEnumValueDefinition().name("VALUE2").build(); GraphQLEnumValueDefinition enumValueDef1 = new GraphQLEnumValueDefinition.Builder().name("VALUE1") .value("Description").build(); GraphQLEnumValueDefinition enumValueDef2 = new GraphQLEnumValueDefinition.Builder().name("VALUE2") .value("Description").build(); when(enumType.getValues()).thenReturn(List.of(enumValueDef1, enumValueDef2)); visitor.visitField(environment); JsonNode fieldNode = jsonSchemaData.get(JsonSchemaBuilderQueryVisitor.JSON_SCHEMA_PROPERTIES).get("enumField"); assertEquals("string", fieldNode.get(JsonSchemaBuilderQueryVisitor.JSON_SCHEMA_TYPE).asText()); assertEquals(2, fieldNode.get(JsonSchemaBuilderQueryVisitor.JSON_SCHEMA_ENUM).size()); assertEquals("VALUE1", fieldNode.get(JsonSchemaBuilderQueryVisitor.JSON_SCHEMA_ENUM).get(0).asText()); assertEquals("VALUE2", fieldNode.get(JsonSchemaBuilderQueryVisitor.JSON_SCHEMA_ENUM).get(1).asText()); }
@Override public void to(final String topic) { to(topic, Produced.with(keySerde, valueSerde, null)); }
@Test public void shouldNotAllowNullTopicOnToWithProduced() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.to((String) null, Produced.as("to"))); assertThat(exception.getMessage(), equalTo("topic can't be null")); }
@Override public <T> AsyncResult<T> startProcess(Callable<T> task) { return startProcess(task, null); }
@Test void testLongRunningTaskWithoutCallback() { assertTimeout(ofMillis(5000), () -> { // Instantiate a new executor and start a new 'null' task ... final var executor = new ThreadAsyncExecutor(); final var result = new Object(); when(task.call()).thenAnswer(i -> { Thread.sleep(1500); return result; }); final var asyncResult = executor.startProcess(task); assertNotNull(asyncResult); assertFalse(asyncResult.isCompleted()); try { asyncResult.getValue(); fail("Expected IllegalStateException when calling AsyncResult#getValue on a non-completed task"); } catch (IllegalStateException e) { assertNotNull(e.getMessage()); } // Our task should only execute once, but it can take a while ... verify(task, timeout(3000).times(1)).call(); // Prevent timing issues, and wait until the result is available asyncResult.await(); assertTrue(asyncResult.isCompleted()); verifyNoMoreInteractions(task); // ... and the result should be exactly the same object assertSame(result, asyncResult.getValue()); }); }
@Override public String getRequestURL() { val url = request.getRequestURL().toString(); var idx = url.indexOf('?'); if (idx != -1) { return url.substring(0, idx); } return url; }
@Test public void testGetRequestUrl() throws Exception { when(request.getRequestURL()).thenReturn(new StringBuffer("https://pac4j.org?name=value&name2=value2")); WebContext context = new JEEContext(request, response); assertEquals("https://pac4j.org", context.getRequestURL()); }
@PrivateApi public static <V> Collection<V> returnWithDeadline(Collection<Future<V>> futures, long timeout, TimeUnit timeUnit) { return returnWithDeadline(futures, timeout, timeUnit, IGNORE_ALL_EXCEPT_LOG_MEMBER_LEFT); }
@Test public void test_returnWithDeadline_failing_second() { AtomicBoolean waitLock = new AtomicBoolean(true); List<Future<Integer>> futures = new ArrayList<>(); for (int i = 0; i < 2; i++) { futures.add(executorService.submit(new FailingCallable(waitLock))); } ExceptionCollector exceptionHandler = new ExceptionCollector(); returnWithDeadline(futures, 5, TimeUnit.SECONDS, exceptionHandler); assertEquals(1, exceptionHandler.throwables.size()); Throwable throwable = exceptionHandler.throwables.iterator().next(); assertTrue(throwable instanceof ExecutionException); assertTrue(throwable.getCause() instanceof SpecialRuntimeException); }
@Override public void onMsg(TbContext ctx, TbMsg msg) throws ExecutionException, InterruptedException, TbNodeException { var originator = msg.getOriginator(); var msgDataAsObjectNode = TbMsgSource.DATA.equals(fetchTo) ? getMsgDataAsObjectNode(msg) : null; if (!EntityType.DEVICE.equals(originator.getEntityType())) { ctx.tellFailure(msg, new RuntimeException("Unsupported originator type: " + originator.getEntityType() + "!")); return; } var deviceId = new DeviceId(msg.getOriginator().getId()); var deviceCredentials = ctx.getDeviceCredentialsService().findDeviceCredentialsByDeviceId(ctx.getTenantId(), deviceId); if (deviceCredentials == null) { ctx.tellFailure(msg, new RuntimeException("Failed to get Device Credentials for device: " + deviceId + "!")); return; } var credentialsType = deviceCredentials.getCredentialsType(); var credentialsInfo = ctx.getDeviceCredentialsService().toCredentialsInfo(deviceCredentials); var metaData = msg.getMetaData().copy(); if (TbMsgSource.METADATA.equals(fetchTo)) { metaData.putValue(CREDENTIALS_TYPE, credentialsType.name()); if (credentialsType.equals(DeviceCredentialsType.ACCESS_TOKEN) || credentialsType.equals(DeviceCredentialsType.X509_CERTIFICATE)) { metaData.putValue(CREDENTIALS, credentialsInfo.asText()); } else { metaData.putValue(CREDENTIALS, JacksonUtil.toString(credentialsInfo)); } } else if (TbMsgSource.DATA.equals(fetchTo)) { msgDataAsObjectNode.put(CREDENTIALS_TYPE, credentialsType.name()); msgDataAsObjectNode.set(CREDENTIALS, credentialsInfo); } TbMsg transformedMsg = transformMessage(msg, msgDataAsObjectNode, metaData); ctx.tellSuccess(transformedMsg); }
@Test void givenGetDeviceCredentials_whenOnMsg_thenShouldTellFailure() throws Exception { // GIVEN doReturn(deviceCredentialsServiceMock).when(ctxMock).getDeviceCredentialsService(); willAnswer(invocation -> null).given(deviceCredentialsServiceMock).findDeviceCredentialsByDeviceId(any(), any()); // WHEN node.onMsg(ctxMock, getTbMsg(deviceId)); // THEN var newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class); var exceptionCaptor = ArgumentCaptor.forClass(Exception.class); verify(ctxMock, never()).tellSuccess(any()); verify(ctxMock, times(1)).tellFailure(newMsgCaptor.capture(), exceptionCaptor.capture()); assertThat(exceptionCaptor.getValue()).isInstanceOf(RuntimeException.class); }
public Sensor cacheLevelSensor(final String threadId, final String taskName, final String storeName, final String ratioName, final Sensor.RecordingLevel recordingLevel, final Sensor... parents) { // use ratio name as sensor suffix final String sensorPrefix = cacheSensorPrefix(threadId, taskName, storeName); synchronized (cacheLevelSensors) { return getSensors(cacheLevelSensors, ratioName, sensorPrefix, recordingLevel, parents); } }
@Test public void shouldGetExistingCacheLevelSensor() { final Metrics metrics = mock(Metrics.class); final RecordingLevel recordingLevel = RecordingLevel.INFO; final String processorCacheName = "processorNodeName"; setupGetExistingSensorTest(metrics); final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, VERSION, time); final Sensor actualSensor = streamsMetrics.cacheLevelSensor( THREAD_ID1, TASK_ID1, processorCacheName, SENSOR_NAME_1, recordingLevel ); assertThat(actualSensor, is(equalToObject(sensor))); }
@Override protected double maintain() { List<Node> provisionedSnapshot; try { NodeList nodes; // Host and child nodes are written in separate transactions, but both are written while holding the // unallocated lock. Hold the unallocated lock while reading nodes to ensure we get all the children // of newly provisioned hosts. try (Mutex ignored = nodeRepository().nodes().lockUnallocated()) { nodes = nodeRepository().nodes().list(); } provisionedSnapshot = provision(nodes); } catch (NodeAllocationException | IllegalStateException e) { log.log(Level.WARNING, "Failed to allocate preprovisioned capacity and/or find excess hosts: " + e.getMessage()); return 0; // avoid removing excess hosts } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to allocate preprovisioned capacity and/or find excess hosts", e); return 0; // avoid removing excess hosts } return markForRemoval(provisionedSnapshot); }
@Test public void does_not_deprovision_when_preprovisioning_enabled() { tester = new DynamicProvisioningTester().addInitialNodes(); setPreprovisionCapacityFlag(tester, new ClusterCapacity(1, 1.0, 3.0, 2.0, 1.0, "fast", "remote", "x86_64", null)); Optional<Node> failedHost = node("host2"); assertTrue(failedHost.isPresent()); tester.maintain(); assertSame("Failed host is deprovisioned", State.deprovisioned, node(failedHost.get().hostname()).get().state()); assertEquals(1, tester.hostProvisioner.deprovisionedHosts()); }
@Override public Path copy(final Path file, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException { try { if(status.isExists()) { if(log.isWarnEnabled()) { log.warn(String.format("Delete file %s to be replaced with %s", target, file)); } new DropboxDeleteFeature(session).delete(Collections.singletonMap(target, status), callback, new Delete.DisabledCallback()); } // If the source path is a folder all its contents will be copied. final RelocationResult result = new DbxUserFilesRequests(session.getClient(file)).copyV2(containerService.getKey(file), containerService.getKey(target)); listener.sent(status.getLength()); return target.withAttributes(new DropboxAttributesFinderFeature(session).toAttributes(result.getMetadata())); } catch(DbxException e) { throw new DropboxExceptionMappingService().map("Cannot copy {0}", e, file); } }
@Test public void testCopyFile() throws Exception { final Path file = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final Path target = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new DropboxTouchFeature(session).touch(file, new TransferStatus()); assertTrue(new DropboxFindFeature(session).find(file)); final Path copy = new DropboxCopyFeature(session).copy(file, target, new TransferStatus(), new DisabledConnectionCallback(), new DisabledStreamListener()); assertNotEquals(PathAttributes.EMPTY, copy.attributes()); assertTrue(new DropboxFindFeature(session).find(file)); assertTrue(new DropboxFindFeature(session).find(target)); new DropboxDeleteFeature(session).delete(Collections.singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public E poll(final long timeout, final TimeUnit unit) throws InterruptedException { final E e = super.poll(timeout, unit); memoryLimiter.releaseInterruptibly(e, timeout, unit); return e; }
@Test public void testPoll() { MemoryLimitedLinkedBlockingQueue<Integer> queue = new MemoryLimitedLinkedBlockingQueue<>(instrumentation); Integer testObject = 0; queue.offer(testObject); assertEquals(testObject, queue.poll()); assertEquals(0, queue.getCurrentMemory()); }
public static Optional<Object> getAdjacentValue(Type type, Object value, boolean isPrevious) { if (!type.isOrderable()) { throw new IllegalStateException("Type is not orderable: " + type); } requireNonNull(value, "value is null"); if (type.equals(BIGINT) || type instanceof TimestampType) { return getBigintAdjacentValue(value, isPrevious); } if (type.equals(INTEGER) || type.equals(DATE)) { return getIntegerAdjacentValue(value, isPrevious); } if (type.equals(SMALLINT)) { return getSmallIntAdjacentValue(value, isPrevious); } if (type.equals(TINYINT)) { return getTinyIntAdjacentValue(value, isPrevious); } if (type.equals(DOUBLE)) { return getDoubleAdjacentValue(value, isPrevious); } if (type.equals(REAL)) { return getRealAdjacentValue(value, isPrevious); } return Optional.empty(); }
@Test public void testPreviousValueForTinyInt() { long minValue = Byte.MIN_VALUE; long maxValue = Byte.MAX_VALUE; assertThat(getAdjacentValue(TINYINT, minValue, true)) .isEqualTo(Optional.empty()); assertThat(getAdjacentValue(TINYINT, minValue + 1, true)) .isEqualTo(Optional.of(minValue)); assertThat(getAdjacentValue(TINYINT, 123L, true)) .isEqualTo(Optional.of(122L)); assertThat(getAdjacentValue(TINYINT, maxValue - 1, true)) .isEqualTo(Optional.of(maxValue - 2)); assertThat(getAdjacentValue(TINYINT, maxValue, true)) .isEqualTo(Optional.of(maxValue - 1)); }
public static CharSequence removeGender( @NonNull CharSequence text, @NonNull JavaEmojiUtils.Gender gender) { return JavaEmojiUtils.removeGender(text, gender); }
@Test public void testRemoveGender() { for (JavaEmojiUtils.Gender gender : JavaEmojiUtils.Gender.values()) { Assert.assertEquals( "\uD83D\uDC4D", EmojiUtils.removeGender("\uD83D\uDC4D", gender).toString()); } // woman-mini-qualified Assert.assertEquals( "\uD83E\uDDD4", EmojiUtils.removeGender("\uD83E\uDDD4\u200D♀", JavaEmojiUtils.Gender.Woman).toString()); // woman-fully-qualified Assert.assertEquals( "\uD83E\uDDD4", EmojiUtils.removeGender("\uD83E\uDDD4\u200D♀️", JavaEmojiUtils.Gender.Woman).toString()); // man-minimal-qualified-dark-skin Assert.assertEquals( "\uD83E\uDDD4\uD83C\uDFFF", EmojiUtils.removeGender("\uD83E\uDDD4\uD83C\uDFFF\u200D♂", JavaEmojiUtils.Gender.Man) .toString()); // man-fully-qualified-dark-skin Assert.assertEquals( "\uD83E\uDDD4\uD83C\uDFFF", EmojiUtils.removeGender("\uD83E\uDDD4\uD83C\uDFFF\u200D♂️", JavaEmojiUtils.Gender.Man) .toString()); }
protected abstract void heartbeat() throws Exception;
@Test(timeout = 2000) public void testRMContainerAllocatorYarnRuntimeExceptionIsHandled() throws Exception { ClientService mockClientService = mock(ClientService.class); AppContext mockContext = mock(AppContext.class); MockRMCommunicator mockRMCommunicator = new MockRMCommunicator(mockClientService, mockContext); final RMCommunicator communicator = spy(mockRMCommunicator); Clock mockClock = mock(Clock.class); when(mockContext.getClock()).thenReturn(mockClock); doThrow(new YarnRuntimeException("Test")).doNothing() .when(communicator).heartbeat(); when(mockClock.getTime()).thenReturn(1L).thenAnswer( (Answer<Long>) invocation -> { communicator.stop(); return 2L; }).thenThrow(new AssertionError( "GetClock called second time, when it should not " + "have since the thread should have quit")); AllocatorRunnable testRunnable = communicator.new AllocatorRunnable(); testRunnable.run(); verify(mockClock, times(2)).getTime(); }
@Override public CompressionProvider createCompressionProviderInstance( String name ) { CompressionProvider provider = null; List<PluginInterface> providers = getPlugins(); if ( providers != null ) { for ( PluginInterface plugin : providers ) { if ( name != null && name.equalsIgnoreCase( plugin.getName() ) ) { try { return PluginRegistry.getInstance().loadClass( plugin, CompressionProvider.class ); } catch ( Exception e ) { provider = null; } } } } return provider; }
@Test public void testCreateCoreProviders() { CompressionProvider provider = factory.createCompressionProviderInstance( "None" ); assertNotNull( provider ); assertTrue( provider.getClass().isAssignableFrom( NoneCompressionProvider.class ) ); assertEquals( "None", provider.getName() ); assertEquals( "No compression", provider.getDescription() ); provider = factory.createCompressionProviderInstance( "Zip" ); assertNotNull( provider ); assertTrue( provider.getClass().isAssignableFrom( ZIPCompressionProvider.class ) ); assertEquals( "Zip", provider.getName() ); assertEquals( "ZIP compression", provider.getDescription() ); provider = factory.createCompressionProviderInstance( "GZip" ); assertNotNull( provider ); assertTrue( provider.getClass().isAssignableFrom( GZIPCompressionProvider.class ) ); assertEquals( "GZip", provider.getName() ); assertEquals( "GZIP compression", provider.getDescription() ); provider = factory.createCompressionProviderInstance( "Snappy" ); assertNotNull( provider ); assertTrue( provider.getClass().isAssignableFrom( SnappyCompressionProvider.class ) ); assertEquals( "Snappy", provider.getName() ); assertEquals( "Snappy compression", provider.getDescription() ); provider = factory.createCompressionProviderInstance( "Hadoop-snappy" ); assertNotNull( provider ); assertTrue( provider.getClass().isAssignableFrom( HadoopSnappyCompressionProvider.class ) ); assertEquals( "Hadoop-snappy", provider.getName() ); assertEquals( "Hadoop Snappy compression", provider.getDescription() ); }
public static AggregateOperation1<CharSequence, StringBuilder, String> concatenating() { return AggregateOperation .withCreate(StringBuilder::new) .<CharSequence>andAccumulate(StringBuilder::append) .andCombine(StringBuilder::append) .andExportFinish(StringBuilder::toString); }
@Test public void when_concatenating_withDelimiterPrefixSuffix() { validateOpWithoutDeduct( concatenating(",", "(", ")"), StringBuilder::toString, "A", "B", "(A", "(A,B", "(A,B)" ); }
public static <T> ReadAll<T> readAll() { return new AutoValue_CassandraIO_ReadAll.Builder<T>().build(); }
@Test public void testReadAllQuery() { String physQuery = String.format( "SELECT * From %s.%s WHERE person_department='phys' AND person_id=0;", CASSANDRA_KEYSPACE, CASSANDRA_TABLE); String mathQuery = String.format( "SELECT * From %s.%s WHERE person_department='math' AND person_id=6;", CASSANDRA_KEYSPACE, CASSANDRA_TABLE); PCollection<Scientist> output = pipeline .apply(Create.of(getReadWithQuery(physQuery), getReadWithQuery(mathQuery))) .apply( CassandraIO.<Scientist>readAll().withCoder(SerializableCoder.of(Scientist.class))); PCollection<String> mapped = output.apply( MapElements.via( new SimpleFunction<Scientist, String>() { @Override public String apply(Scientist scientist) { return scientist.name; } })); PAssert.that(mapped).containsInAnyOrder("Einstein", "Newton"); PAssert.thatSingleton(output.apply("count", Count.globally())).isEqualTo(2L); pipeline.run(); }
void decode(int streamId, ByteBuf in, Http2Headers headers, boolean validateHeaders) throws Http2Exception { Http2HeadersSink sink = new Http2HeadersSink( streamId, headers, maxHeaderListSize, validateHeaders); // Check for dynamic table size updates, which must occur at the beginning: // https://www.rfc-editor.org/rfc/rfc7541.html#section-4.2 decodeDynamicTableSizeUpdates(in); decode(in, sink); // Now that we've read all of our headers we can perform the validation steps. We must // delay throwing until this point to prevent dynamic table corruption. sink.finish(); }
@Test public void testIllegalIndex() throws Http2Exception { // Index larger than the header table assertThrows(Http2Exception.class, new Executable() { @Override public void execute() throws Throwable { decode("FF00"); } }); }
public boolean canPassCheck(/*@NonNull*/ FlowRule rule, Context context, DefaultNode node, int acquireCount) { return canPassCheck(rule, context, node, acquireCount, false); }
@Test public void testPassCheckSelectEmptyNodeSuccess() { FlowRule rule = new FlowRule("abc").setCount(1); rule.setLimitApp("abc"); DefaultNode node = mock(DefaultNode.class); Context context = mock(Context.class); when(context.getOrigin()).thenReturn("def"); FlowRuleChecker checker = new FlowRuleChecker(); assertTrue(checker.canPassCheck(rule, context, node, 1)); }
static InjectorSource instantiateUserSpecifiedInjectorSource(Class<?> injectorSourceClass) { try { return (InjectorSource) injectorSourceClass.getConstructor().newInstance(); } catch (Exception e) { String message = format("Instantiation of '%s' failed. Check the caused by exception and ensure your " + "InjectorSource implementation is accessible and has a public zero args constructor.", injectorSourceClass.getName()); throw new InjectorSourceInstantiationFailed(message, e); } }
@Test void failsToInstantiateClassWithNoDefaultConstructor() { Executable testMethod = () -> instantiateUserSpecifiedInjectorSource(NoDefaultConstructor.class); InjectorSourceInstantiationFailed actualThrown = assertThrows(InjectorSourceInstantiationFailed.class, testMethod); assertAll( () -> assertThat("Unexpected exception message", actualThrown.getMessage(), is(equalTo( "Instantiation of 'io.cucumber.guice.InjectorSourceFactoryTest$NoDefaultConstructor' failed. Check the caused by exception and ensure your InjectorSource implementation is accessible and has a public zero args constructor."))), () -> assertThat("Unexpected exception cause class", actualThrown.getCause(), isA(NoSuchMethodException.class))); }
@JsonIgnore public boolean canHaveCustomFieldMappings() { return !this.indexTemplateType().map(TEMPLATE_TYPES_FOR_INDEX_SETS_WITH_IMMUTABLE_FIELD_TYPES::contains).orElse(false); }
@Test public void testEventIndexWithChangedFieldMappingsIsIllegal() { assertFalse(testIndexSetConfig(EVENT_TEMPLATE_TYPE, new CustomFieldMappings(List.of(new CustomFieldMapping("john", "long"))), null).canHaveCustomFieldMappings()); }
@Override public DataTableType dataTableType() { return dataTableType; }
@Test void can_define_table_cell_transformer() throws NoSuchMethodException { Method method = JavaDataTableTypeDefinitionTest.class.getMethod("converts_table_cell_to_string", String.class); JavaDataTableTypeDefinition definition = new JavaDataTableTypeDefinition(method, lookup, new String[0]); assertThat(definition.dataTableType().transform(dataTable.cells()), is(asList( asList("converts_table_cell_to_string=a", "converts_table_cell_to_string=b"), asList("converts_table_cell_to_string=c", "converts_table_cell_to_string=d")))); }
@Override public UpdateApplicationPriorityResponse updateApplicationPriority( UpdateApplicationPriorityRequest request) throws YarnException, IOException { ApplicationId applicationId = request.getApplicationId(); Priority newAppPriority = request.getApplicationPriority(); UserGroupInformation callerUGI = getCallerUgi(applicationId, AuditConstants.UPDATE_APP_PRIORITY); RMApp application = verifyUserAccessForRMApp(applicationId, callerUGI, AuditConstants.UPDATE_APP_PRIORITY, ApplicationAccessType.MODIFY_APP, true); UpdateApplicationPriorityResponse response = recordFactory .newRecordInstance(UpdateApplicationPriorityResponse.class); // Update priority only when app is tracked by the scheduler if (!ACTIVE_APP_STATES.contains(application.getState())) { if (application.isAppInCompletedStates()) { // If Application is in any of the final states, change priority // can be skipped rather throwing exception. RMAuditLogger.logSuccess(callerUGI.getShortUserName(), AuditConstants.UPDATE_APP_PRIORITY, "ClientRMService", applicationId); response.setApplicationPriority(application .getApplicationPriority()); return response; } String msg = "Application in " + application.getState() + " state cannot update priority."; RMAuditLogger .logFailure(callerUGI.getShortUserName(), AuditConstants.UPDATE_APP_PRIORITY, "UNKNOWN", "ClientRMService", msg); throw new YarnException(msg); } try { rmAppManager.updateApplicationPriority(callerUGI, application.getApplicationId(), newAppPriority); } catch (YarnException ex) { RMAuditLogger.logFailure(callerUGI.getShortUserName(), AuditConstants.UPDATE_APP_PRIORITY, "UNKNOWN", "ClientRMService", ex.getMessage()); throw ex; } RMAuditLogger.logSuccess(callerUGI.getShortUserName(), AuditConstants.UPDATE_APP_PRIORITY, "ClientRMService", applicationId); response.setApplicationPriority(application.getApplicationPriority()); return response; }
@Test(timeout = 120000) public void testUpdateApplicationPriorityRequest() throws Exception { int maxPriority = 10; int appPriority = 5; conf = new YarnConfiguration(); Assume.assumeFalse("FairScheduler does not support Application Priorities", conf.get(YarnConfiguration.RM_SCHEDULER) .equals(FairScheduler.class.getName())); conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, maxPriority); MockRM rm = new MockRM(conf); resourceManager = rm; rm.init(conf); rm.start(); rm.registerNode("host1:1234", 1024); // Start app1 with appPriority 5 MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder .createWithMemory(1024, rm) .withAppPriority(Priority.newInstance(appPriority)) .build(); RMApp app1 = MockRMAppSubmitter.submit(rm, data); Assert.assertEquals("Incorrect priority has been set to application", appPriority, app1.getApplicationPriority().getPriority()); appPriority = 11; ClientRMService rmService = rm.getClientRMService(); testApplicationPriorityUpdation(rmService, app1, appPriority, maxPriority); appPriority = 9; testApplicationPriorityUpdation(rmService, app1, appPriority, appPriority); rm.killApp(app1.getApplicationId()); rm.waitForState(app1.getApplicationId(), RMAppState.KILLED); // Update priority request for invalid application id. ApplicationId invalidAppId = ApplicationId.newInstance(123456789L, 3); UpdateApplicationPriorityRequest updateRequest = UpdateApplicationPriorityRequest.newInstance(invalidAppId, Priority.newInstance(appPriority)); try { rmService.updateApplicationPriority(updateRequest); Assert.fail("ApplicationNotFoundException should be thrown " + "for invalid application id"); } catch (ApplicationNotFoundException e) { // Expected } updateRequest = UpdateApplicationPriorityRequest.newInstance(app1.getApplicationId(), Priority.newInstance(11)); Assert.assertEquals("Incorrect priority has been set to application", appPriority, rmService.updateApplicationPriority(updateRequest) .getApplicationPriority().getPriority()); }
public static <T, PredicateT extends ProcessFunction<T, Boolean>> Filter<T> by( PredicateT predicate) { return new Filter<>(predicate); }
@Test @Category(NeedsRunner.class) public void testNoFilterByPredicateWithLambda() { PCollection<Integer> output = p.apply(Create.of(1, 2, 4, 5)).apply(Filter.by(i -> false)); PAssert.that(output).empty(); p.run(); }
public static CompositeMeterRegistry getMeterRegistry(String registry) { return METER_REGISTRIES.get(registry); }
@Test void testGetMeterRegistry() { assertNotNull(NacosMeterRegistryCenter.getMeterRegistry(NacosMeterRegistryCenter.CORE_STABLE_REGISTRY)); assertNotNull(NacosMeterRegistryCenter.getMeterRegistry(NacosMeterRegistryCenter.CONFIG_STABLE_REGISTRY)); assertNotNull(NacosMeterRegistryCenter.getMeterRegistry(NacosMeterRegistryCenter.NAMING_STABLE_REGISTRY)); assertNotNull(NacosMeterRegistryCenter.getMeterRegistry(NacosMeterRegistryCenter.TOPN_CONFIG_CHANGE_REGISTRY)); assertNotNull(NacosMeterRegistryCenter.getMeterRegistry(NacosMeterRegistryCenter.TOPN_SERVICE_CHANGE_REGISTRY)); }
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestStopReplication() { internalEncodeLogHeader(buffer, 0, 1000, 1000, () -> 500_000_000L); final StopReplicationRequestEncoder requestEncoder = new StopReplicationRequestEncoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(-2) .correlationId(-5) .replicationId(-999); dissectControlRequest(CMD_IN_STOP_REPLICATION, buffer, 0, builder); assertEquals("[0.500000000] " + CONTEXT + ": " + CMD_IN_STOP_REPLICATION.name() + " [1000/1000]:" + " controlSessionId=-2" + " correlationId=-5" + " replicationId=-999", builder.toString()); }
@Override public Set<DeviceId> getDevices(PiPipeconfId pipeconfId) { return ImmutableSet.copyOf(pipeconfToDevices.get(pipeconfId)); }
@Test public void getDevices() { clear(); createOrUpdatePipeconfToDeviceBinding(); assertEquals("Wrong set of DeviceIds", store.getDevices(PIPECONF_ID), ImmutableSet.of(DEVICE_ID)); }
@Override public CompletableFuture<ExecutionGraphInfo> getExecutionGraphInfo( JobID jobId, RestfulGateway restfulGateway) { return getExecutionGraphInternal(jobId, restfulGateway).thenApply(Function.identity()); }
@Test void testExecutionGraphCaching() throws Exception { final Time timeout = Time.milliseconds(100L); final Time timeToLive = Time.hours(1L); final CountingRestfulGateway restfulGateway = createCountingRestfulGateway( expectedJobId, CompletableFuture.completedFuture(expectedExecutionGraphInfo)); try (ExecutionGraphCache executionGraphCache = new DefaultExecutionGraphCache(timeout, timeToLive)) { CompletableFuture<ExecutionGraphInfo> executionGraphInfoFuture = executionGraphCache.getExecutionGraphInfo(expectedJobId, restfulGateway); assertThatFuture(executionGraphInfoFuture) .eventuallySucceeds() .isEqualTo(expectedExecutionGraphInfo); executionGraphInfoFuture = executionGraphCache.getExecutionGraphInfo(expectedJobId, restfulGateway); assertThatFuture(executionGraphInfoFuture) .eventuallySucceeds() .isEqualTo(expectedExecutionGraphInfo); assertThat(restfulGateway.getNumRequestJobCalls()).isOne(); } }
@Override public String pluginNamed() { return PluginEnum.SOFA.getName(); }
@Test public void testPluginNamed() { assertEquals(sofaPluginDataHandler.pluginNamed(), PluginEnum.SOFA.getName()); }
public void contains(@Nullable CharSequence string) { checkNotNull(string); if (actual == null) { failWithActual("expected a string that contains", string); } else if (!actual.contains(string)) { failWithActual("expected to contain", string); } }
@Test public void stringInequalityIgnoringCaseFail() { expectFailureWhenTestingThat("café").ignoringCase().isNotEqualTo("CAFÉ"); assertFailureValue("expected not to be", "CAFÉ"); assertThat(expectFailure.getFailure()).factKeys().contains("(case is ignored)"); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); List<MemberInfo> memberInfoList = new ArrayList<>(); for (Map.Entry<String, Subscription> memberSubscription : subscriptions.entrySet()) { assignment.put(memberSubscription.getKey(), new ArrayList<>()); memberInfoList.add(new MemberInfo(memberSubscription.getKey(), memberSubscription.getValue().groupInstanceId())); } CircularIterator<MemberInfo> assigner = new CircularIterator<>(Utils.sorted(memberInfoList)); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek().memberId).topics().contains(topic)) assigner.next(); assignment.get(assigner.next().memberId).add(partition); } return assignment; }
@Test public void testTwoConsumersOneTopicOnePartition() { String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 1); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic))); consumers.put(consumer2, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertEquals(partitions(tp(topic, 0)), assignment.get(consumer1)); assertEquals(Collections.emptyList(), assignment.get(consumer2)); }
public Option<Dataset<Row>> loadAsDataset(SparkSession spark, List<CloudObjectMetadata> cloudObjectMetadata, String fileFormat, Option<SchemaProvider> schemaProviderOption, int numPartitions) { if (LOG.isDebugEnabled()) { LOG.debug("Extracted distinct files " + cloudObjectMetadata.size() + " and some samples " + cloudObjectMetadata.stream().map(CloudObjectMetadata::getPath).limit(10).collect(Collectors.toList())); } if (isNullOrEmpty(cloudObjectMetadata)) { return Option.empty(); } DataFrameReader reader = spark.read().format(fileFormat); String datasourceOpts = getStringWithAltKeys(properties, CloudSourceConfig.SPARK_DATASOURCE_OPTIONS, true); if (schemaProviderOption.isPresent()) { Schema sourceSchema = schemaProviderOption.get().getSourceSchema(); if (sourceSchema != null && !sourceSchema.equals(InputBatch.NULL_SCHEMA)) { reader = reader.schema(AvroConversionUtils.convertAvroSchemaToStructType(sourceSchema)); } } if (StringUtils.isNullOrEmpty(datasourceOpts)) { // fall back to legacy config for BWC. TODO consolidate in HUDI-6020 datasourceOpts = getStringWithAltKeys(properties, S3EventsHoodieIncrSourceConfig.SPARK_DATASOURCE_OPTIONS, true); } if (StringUtils.nonEmpty(datasourceOpts)) { final ObjectMapper mapper = new ObjectMapper(); Map<String, String> sparkOptionsMap = null; try { sparkOptionsMap = mapper.readValue(datasourceOpts, Map.class); } catch (IOException e) { throw new HoodieException(String.format("Failed to parse sparkOptions: %s", datasourceOpts), e); } LOG.info(String.format("sparkOptions loaded: %s", sparkOptionsMap)); reader = reader.options(sparkOptionsMap); } List<String> paths = new ArrayList<>(); for (CloudObjectMetadata o : cloudObjectMetadata) { paths.add(o.getPath()); } boolean isCommaSeparatedPathFormat = properties.getBoolean(SPARK_DATASOURCE_READER_COMMA_SEPARATED_PATH_FORMAT.key(), false); Dataset<Row> dataset; if (isCommaSeparatedPathFormat) { dataset = reader.load(String.join(",", paths)); } else { dataset = reader.load(paths.toArray(new String[cloudObjectMetadata.size()])); } // add partition column from source path if configured if (containsConfigProperty(properties, PATH_BASED_PARTITION_FIELDS)) { String[] partitionKeysToAdd = getStringWithAltKeys(properties, PATH_BASED_PARTITION_FIELDS).split(","); // Add partition column for all path-based partition keys. If key is not present in path, the value will be null. for (String partitionKey : partitionKeysToAdd) { String partitionPathPattern = String.format("%s=", partitionKey); LOG.info(String.format("Adding column %s to dataset", partitionKey)); dataset = dataset.withColumn(partitionKey, split(split(input_file_name(), partitionPathPattern).getItem(1), "/").getItem(0)); } } dataset = coalesceOrRepartition(dataset, numPartitions); return Option.of(dataset); }
@Test public void loadDatasetWithSchemaAndRepartition() { TypedProperties props = new TypedProperties(); TestCloudObjectsSelectorCommon.class.getClassLoader().getResource("schema/sample_data_schema.avsc"); String schemaFilePath = TestCloudObjectsSelectorCommon.class.getClassLoader().getResource("schema/sample_data_schema.avsc").getPath(); props.put("hoodie.streamer.schemaprovider.source.schema.file", schemaFilePath); props.put("hoodie.streamer.schema.provider.class.name", FilebasedSchemaProvider.class.getName()); props.put("hoodie.streamer.source.cloud.data.partition.fields.from.path", "country,state"); // Setting this config so that dataset repartition happens inside `loadAsDataset` props.put("hoodie.streamer.source.cloud.data.partition.max.size", "1"); List<CloudObjectMetadata> input = Arrays.asList( new CloudObjectMetadata("src/test/resources/data/partitioned/country=US/state=CA/data.json", 1000), new CloudObjectMetadata("src/test/resources/data/partitioned/country=US/state=TX/data.json", 1000), new CloudObjectMetadata("src/test/resources/data/partitioned/country=IND/state=TS/data.json", 1000) ); CloudObjectsSelectorCommon cloudObjectsSelectorCommon = new CloudObjectsSelectorCommon(props); Option<Dataset<Row>> result = cloudObjectsSelectorCommon.loadAsDataset(sparkSession, input, "json", Option.of(new FilebasedSchemaProvider(props, jsc)), 30); Assertions.assertTrue(result.isPresent()); List<Row> expected = Arrays.asList(RowFactory.create("some data", "US", "CA"), RowFactory.create("some data", "US", "TX"), RowFactory.create("some data", "IND", "TS")); List<Row> actual = result.get().collectAsList(); Assertions.assertEquals(new HashSet<>(expected), new HashSet<>(actual)); }
@Override public GcsPath getName(int count) { checkArgument(count >= 0); Iterator<Path> iterator = iterator(); for (int i = 0; i < count; ++i) { checkArgument(iterator.hasNext()); iterator.next(); } checkArgument(iterator.hasNext()); return (GcsPath) iterator.next(); }
@Test public void testGetName() { GcsPath a = GcsPath.fromComponents("bucket", "a/b/c/d"); assertEquals(5, a.getNameCount()); assertThat(a.getName(0).toString(), Matchers.equalTo("gs://bucket/")); assertThat(a.getName(1).toString(), Matchers.equalTo("a")); assertThat(a.getName(2).toString(), Matchers.equalTo("b")); assertThat(a.getName(3).toString(), Matchers.equalTo("c")); assertThat(a.getName(4).toString(), Matchers.equalTo("d")); }
static <T> T getWildcardMappedObject(final Map<String, T> mapping, final String query) { T value = mapping.get(query); if (value == null) { for (String key : mapping.keySet()) { // Turn the search key into a regex, using all characters but the * as a literal. String regex = Arrays.stream(key.split("\\*")) // split in parts that do not have a wildcard in them .map(Pattern::quote) // each part should be used as a literal (not as a regex or partial regex) .collect(Collectors.joining(".*")); // join all literal parts with a regex representation on the wildcard. if (key.endsWith("*")) { // the 'split' will have removed any trailing wildcard characters. Correct for that. regex += ".*"; } if (query.matches(regex)) { value = mapping.get(key); break; } } } return value; }
@Test public void testWildcard() throws Exception { // Setup test fixture. final Map<String, Object> haystack = Map.of("myplugin/*", new Object()); // Execute system under test. final Object result = PluginServlet.getWildcardMappedObject(haystack, "myplugin/foo.jsp"); // Verify results. assertNotNull(result); }
@Override public void flush() throws IOException { currentTmpFile.write(buffer, 0, positionInBuffer); currentTmpFile.flush(); positionInBuffer = 0; }
@Test void testFlush() throws IOException { RefCountedBufferingFileStream stream = getStreamToTest(); final byte[] contentToWrite = bytesOf("hello"); stream.write(contentToWrite); assertThat(stream.getPositionInBuffer()).isEqualTo(contentToWrite.length); assertThat(stream.getPos()).isEqualTo(contentToWrite.length); stream.flush(); assertThat(stream.getPositionInBuffer()).isZero(); assertThat(stream.getPos()).isEqualTo(contentToWrite.length); final byte[] contentRead = new byte[contentToWrite.length]; new FileInputStream(stream.getInputFile()).read(contentRead, 0, contentRead.length); assertThat(contentRead).isEqualTo(contentToWrite); stream.release(); }
@Override public void run() { // top-level command, do nothing }
@Test public void test_cancelJob_invalidNameOrId() { // When // Then exception.expectMessage("No job with name or id 'invalid' was found"); run("cancel", "invalid"); }
@Override public void run() { if (processor != null) { processor.execute(); } else { if (!beforeHook()) { logger.info("before-feature hook returned [false], aborting: {}", this); } else { scenarios.forEachRemaining(this::processScenario); } afterFeature(); } }
@Test void testFailApi() { fail = true; run("fail-api.feature"); match(fr.result.getVariables(), "{ configSource: 'normal', functionFromKarateBase: '#notnull', before: true }"); }
@Override public Iterable<ConnectorFactory> getConnectorFactories() { return ImmutableList.of(new BigQueryConnectorFactory()); }
@Test public void testStartup() { BigQueryPlugin plugin = new BigQueryPlugin(); ConnectorFactory factory = getOnlyElement(plugin.getConnectorFactories()); assertInstanceOf(factory, BigQueryConnectorFactory.class); }
@Override public V put(final K key, final V value) { checkAndScheduleRefresh(this); final V previous = cache.getIfPresent(key); cache.put(key, value); return previous; }
@Test public void testPut() { MemorySafeWindowTinyLFUMap<String, String> lru = new MemorySafeWindowTinyLFUMap<>(1 << 10, 16); lru.put("1", "1"); Assert.assertEquals(1, lru.size()); lru.put("2", "2"); lru.put("3", "3"); Assert.assertEquals(3, lru.size()); }
public static byte[] decodeOnionUrl(String onionUrl) { if (!onionUrl.toLowerCase(Locale.ROOT).endsWith(".onion")) throw new IllegalArgumentException("not an onion URL: " + onionUrl); byte[] onionAddress = BASE32.decode(onionUrl.substring(0, onionUrl.length() - 6)); if (onionAddress.length == 10) { // TORv2 return onionAddress; } else if (onionAddress.length == 32 + 2 + 1) { // TORv3 byte[] pubkey = Arrays.copyOfRange(onionAddress, 0, 32); byte[] checksum = Arrays.copyOfRange(onionAddress, 32, 34); byte torVersion = onionAddress[34]; if (torVersion != 0x03) throw new IllegalArgumentException("unknown version: " + onionUrl); if (!Arrays.equals(checksum, onionChecksum(pubkey, torVersion))) throw new IllegalArgumentException("bad checksum: " + onionUrl); return pubkey; } else { throw new IllegalArgumentException("unrecognizable length: " + onionUrl); } }
@Test(expected = IllegalArgumentException.class) public void decodeOnionUrl_badLength() { TorUtils.decodeOnionUrl("aaa.onion"); }
public Optional<Account> getByPhoneNumberIdentifier(final UUID pni) { return checkRedisThenAccounts( getByNumberTimer, () -> redisGetBySecondaryKey(getAccountMapKey(pni.toString()), redisPniGetTimer), () -> accounts.getByPhoneNumberIdentifier(pni) ); }
@Test void testGetAccountByPniBrokenCache() { UUID uuid = UUID.randomUUID(); UUID pni = UUID.randomUUID(); Account account = AccountsHelper.generateTestAccount("+14152222222", uuid, pni, new ArrayList<>(), new byte[UnidentifiedAccessUtil.UNIDENTIFIED_ACCESS_KEY_LENGTH]); when(commands.get(eq("AccountMap::" + pni))).thenThrow(new RedisException("OH NO")); when(accounts.getByPhoneNumberIdentifier(pni)).thenReturn(Optional.of(account)); Optional<Account> retrieved = accountsManager.getByPhoneNumberIdentifier(pni); assertTrue(retrieved.isPresent()); assertSame(retrieved.get(), account); verify(commands).get(eq("AccountMap::" + pni)); verify(commands).setex(eq("AccountMap::" + pni), anyLong(), eq(uuid.toString())); verify(commands).setex(eq("Account3::" + uuid), anyLong(), anyString()); verifyNoMoreInteractions(commands); verify(accounts).getByPhoneNumberIdentifier(pni); verifyNoMoreInteractions(accounts); }
@CanIgnoreReturnValue public boolean remove(JsonElement element) { return elements.remove(element); }
@Test public void testRemove() { JsonArray array = new JsonArray(); assertThrows(IndexOutOfBoundsException.class, () -> array.remove(0)); JsonPrimitive a = new JsonPrimitive("a"); array.add(a); assertThat(array.remove(a)).isTrue(); assertThat(array).doesNotContain(a); array.add(a); array.add(new JsonPrimitive("b")); assertThat(array.remove(1).getAsString()).isEqualTo("b"); assertThat(array).hasSize(1); assertThat(array).contains(a); }
@SuppressWarnings({"unchecked", "UnstableApiUsage"}) @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement) { if (!(statement.getStatement() instanceof DropStatement)) { return statement; } final DropStatement dropStatement = (DropStatement) statement.getStatement(); if (!dropStatement.isDeleteTopic()) { return statement; } final SourceName sourceName = dropStatement.getName(); final DataSource source = metastore.getSource(sourceName); if (source != null) { if (source.isSource()) { throw new KsqlException("Cannot delete topic for read-only source: " + sourceName.text()); } checkTopicRefs(source); deleteTopic(source); final Closer closer = Closer.create(); closer.register(() -> deleteKeySubject(source)); closer.register(() -> deleteValueSubject(source)); try { closer.close(); } catch (final KsqlException e) { throw e; } catch (final Exception e) { throw new KsqlException(e); } } else if (!dropStatement.getIfExists()) { throw new KsqlException("Could not find source to delete topic for: " + statement); } final T withoutDelete = (T) dropStatement.withoutDeleteClause(); final String withoutDeleteText = SqlFormatter.formatSql(withoutDelete) + ";"; return statement.withStatement(withoutDeleteText, withoutDelete); }
@Test public void shouldNotThrowIfStatementHasIfExistsAndSourceDoesNotExist() { // Given: final ConfiguredStatement<DropStream> dropStatement = givenStatement( "DROP SOMETHING", new DropStream(SourceName.of("SOMETHING_ELSE"), true, true)); // When: deleteInjector.inject(dropStatement); }
@Override public LogicalSchema getSchema() { return outputSchema; }
@Test public void shouldBuildPullQueryOutputSchemaSelectKeyNonWindowed() { // Given: selects = ImmutableList.of(new SingleColumn(K_REF, Optional.of(K))); when(keyFormat.isWindowed()).thenReturn(false); when(analysis.getSelectColumnNames()).thenReturn(ImmutableSet.of(K)); // When: final QueryProjectNode projectNode = new QueryProjectNode( NODE_ID, source, selects, metaStore, ksqlConfig, analysis, false, plannerOptions, false ); // Then: final LogicalSchema expected = LogicalSchema.builder() .keyColumn(K, SqlTypes.STRING) .build(); assertThat(expected, is(projectNode.getSchema())); }
ClassicGroup getOrMaybeCreateClassicGroup( String groupId, boolean createIfNotExists ) throws GroupIdNotFoundException { Group group = groups.get(groupId); if (group == null && !createIfNotExists) { throw new GroupIdNotFoundException(String.format("Classic group %s not found.", groupId)); } if (group == null) { ClassicGroup classicGroup = new ClassicGroup(logContext, groupId, ClassicGroupState.EMPTY, time, metrics); groups.put(groupId, classicGroup); metrics.onClassicGroupStateTransition(null, classicGroup.currentState()); return classicGroup; } else { if (group.type() == CLASSIC) { return (ClassicGroup) group; } else { // We don't support upgrading/downgrading between protocols at the moment so // we throw an exception if a group exists with the wrong type. throw new GroupIdNotFoundException(String.format("Group %s is not a classic group.", groupId)); } } }
@Test public void testStaticMemberRejoinWithLeaderIdAndUnexpectedEmptyGroup() throws Exception { GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .build(); GroupMetadataManagerTestContext.RebalanceResult rebalanceResult = context.staticMembersJoinAndRebalance( "group-id", "leader-instance-id", "follower-instance-id" ); ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false); group.transitionTo(PREPARING_REBALANCE); group.transitionTo(EMPTY); JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder() .withGroupId("group-id") .withGroupInstanceId("leader-instance-id") .withMemberId(rebalanceResult.leaderId) .withDefaultProtocolTypeAndProtocols() .build(); GroupMetadataManagerTestContext.JoinResult joinResult = context.sendClassicGroupJoin(request, true, true); assertTrue(joinResult.records.isEmpty()); assertTrue(joinResult.joinFuture.isDone()); assertEquals(Errors.UNKNOWN_MEMBER_ID.code(), joinResult.joinFuture.get().errorCode()); }
@Udf(description = "Returns the cube root of an INT value") public Double cbrt( @UdfParameter( value = "value", description = "The value to get the cube root of." ) final Integer value ) { return cbrt(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleNull() { assertThat(udf.cbrt((Integer)null), is(nullValue())); assertThat(udf.cbrt((Long)null), is(nullValue())); assertThat(udf.cbrt((Double)null), is(nullValue())); }
public int boundedPoll(final FragmentHandler handler, final long limitPosition, final int fragmentLimit) { if (isClosed) { return 0; } final long initialPosition = subscriberPosition.get(); if (initialPosition >= limitPosition) { return 0; } int fragmentsRead = 0; final int initialOffset = (int)initialPosition & termLengthMask; int offset = initialOffset; final UnsafeBuffer termBuffer = activeTermBuffer(initialPosition); final int limitOffset = (int)Math.min(termBuffer.capacity(), (limitPosition - initialPosition) + offset); final Header header = this.header; header.buffer(termBuffer); try { while (fragmentsRead < fragmentLimit && offset < limitOffset) { final int length = frameLengthVolatile(termBuffer, offset); if (length <= 0) { break; } final int frameOffset = offset; final int alignedLength = BitUtil.align(length, FRAME_ALIGNMENT); offset += alignedLength; if (isPaddingFrame(termBuffer, frameOffset)) { continue; } ++fragmentsRead; header.offset(frameOffset); handler.onFragment(termBuffer, frameOffset + HEADER_LENGTH, length - HEADER_LENGTH, header); } } catch (final Exception ex) { errorHandler.onError(ex); } finally { final long resultingPosition = initialPosition + (offset - initialOffset); if (resultingPosition > initialPosition) { subscriberPosition.setOrdered(resultingPosition); } } return fragmentsRead; }
@Test void shouldPollFragmentsToBoundedFragmentHandlerWithMaxPositionBeforeNextMessage() { final long initialPosition = computePosition(INITIAL_TERM_ID, 0, POSITION_BITS_TO_SHIFT, INITIAL_TERM_ID); final long maxPosition = initialPosition + ALIGNED_FRAME_LENGTH; position.setOrdered(initialPosition); final Image image = createImage(); insertDataFrame(INITIAL_TERM_ID, offsetForFrame(0)); insertDataFrame(INITIAL_TERM_ID, offsetForFrame(1)); final int fragmentsRead = image.boundedPoll(mockFragmentHandler, maxPosition, Integer.MAX_VALUE); assertThat(fragmentsRead, is(1)); final InOrder inOrder = Mockito.inOrder(position, mockFragmentHandler); inOrder.verify(mockFragmentHandler).onFragment( any(UnsafeBuffer.class), eq(HEADER_LENGTH), eq(DATA.length), any(Header.class)); inOrder.verify(position).setOrdered(initialPosition + ALIGNED_FRAME_LENGTH); }
@Override public <T> TableConfig set(ConfigOption<T> option, T value) { configuration.set(option, value); return this; }
@Test void testGetInvalidLocalTimeZoneUT() { CONFIG_BY_CONFIGURATION.set("table.local-time-zone", "UT+8"); assertThatThrownBy(CONFIG_BY_CONFIGURATION::getLocalTimeZone) .isInstanceOf(ValidationException.class) .hasMessageContaining("Invalid time zone."); }
@Override public InputFile newInputFile(String path) { return GCSInputFile.fromLocation(path, client(), gcpProperties, metrics); }
@Test public void newInputFile() throws IOException { String location = format("gs://%s/path/to/file.txt", TEST_BUCKET); byte[] expected = new byte[1024 * 1024]; random.nextBytes(expected); InputFile in = io.newInputFile(location); assertThat(in.exists()).isFalse(); OutputFile out = io.newOutputFile(location); try (OutputStream os = out.createOrOverwrite()) { IOUtil.writeFully(os, ByteBuffer.wrap(expected)); } assertThat(in.exists()).isTrue(); byte[] actual = new byte[1024 * 1024]; try (InputStream is = in.newStream()) { IOUtil.readFully(is, actual, 0, actual.length); } assertThat(expected).isEqualTo(actual); io.deleteFile(in); assertThat(io.newInputFile(location).exists()).isFalse(); }
public void addMergeTask(String dataId, String groupId, String tenant, String clientIp) { if (!canExecute()) { return; } MergeDataTask task = new MergeDataTask(dataId, groupId, tenant, clientIp); mergeTasks.addTask(task.getId(), task); }
@Test void testAddMergeTaskEmbeddedAndClusterModelNotLeader() { DatasourceConfiguration.setEmbeddedStorage(true); envUtilMockedStatic.when(() -> EnvUtil.getStandaloneMode()).thenReturn(false); TaskManager mockTasker = Mockito.mock(TaskManager.class); ReflectionTestUtils.setField(mergeDatumService, "mergeTasks", mockTasker); //mock not leader CPProtocol cpProtocol = Mockito.mock(CPProtocol.class); when(protocolManager.getCpProtocol()).thenReturn(cpProtocol); when(cpProtocol.isLeader(eq(CONFIG_MODEL_RAFT_GROUP))).thenReturn(false); String dataId = "dataId12345"; String group = "group123"; String tenant = "tenant1234"; String clientIp = "127.0.0.1"; mergeDatumService.addMergeTask(dataId, group, tenant, clientIp); Mockito.verify(mockTasker, times(0)).addTask(anyString(), any(MergeDataTask.class)); }
@Override public PosixFileAttributeView view( FileLookup lookup, ImmutableMap<String, FileAttributeView> inheritedViews) { return new View( lookup, (BasicFileAttributeView) inheritedViews.get("basic"), (FileOwnerAttributeView) inheritedViews.get("owner")); }
@Test public void testView() throws IOException { file.setAttribute("owner", "owner", createUserPrincipal("user")); PosixFileAttributeView view = provider.view( fileLookup(), ImmutableMap.of( "basic", new BasicAttributeProvider().view(fileLookup(), NO_INHERITED_VIEWS), "owner", new OwnerAttributeProvider().view(fileLookup(), NO_INHERITED_VIEWS))); assertNotNull(view); assertThat(view.name()).isEqualTo("posix"); assertThat(view.getOwner()).isEqualTo(createUserPrincipal("user")); PosixFileAttributes attrs = view.readAttributes(); assertThat(attrs.fileKey()).isEqualTo(0); assertThat(attrs.owner()).isEqualTo(createUserPrincipal("user")); assertThat(attrs.group()).isEqualTo(createGroupPrincipal("group")); assertThat(attrs.permissions()).isEqualTo(PosixFilePermissions.fromString("rw-r--r--")); view.setOwner(createUserPrincipal("root")); assertThat(view.getOwner()).isEqualTo(createUserPrincipal("root")); assertThat(file.getAttribute("owner", "owner")).isEqualTo(createUserPrincipal("root")); view.setGroup(createGroupPrincipal("root")); assertThat(view.readAttributes().group()).isEqualTo(createGroupPrincipal("root")); assertThat(file.getAttribute("posix", "group")).isEqualTo(createGroupPrincipal("root")); view.setPermissions(PosixFilePermissions.fromString("rwx------")); assertThat(view.readAttributes().permissions()) .isEqualTo(PosixFilePermissions.fromString("rwx------")); assertThat(file.getAttribute("posix", "permissions")) .isEqualTo(PosixFilePermissions.fromString("rwx------")); }
@Override public InputStream open(String path) throws IOException { final File file = new File(path); if (!file.exists()) { throw new FileNotFoundException("File " + file + " not found"); } return new FileInputStream(file); }
@Test void readsFileContents() throws Exception { try (InputStream input = provider.open(getClass().getResource("/example.txt").getFile()); ByteArrayOutputStream output = new ByteArrayOutputStream()) { byte[] buffer = new byte[1024]; int length; while ((length = input.read(buffer)) != -1) { output.write(buffer, 0, length); } assertThat(new String(output.toByteArray(), StandardCharsets.UTF_8).trim()).isEqualTo("whee"); } }
public static Path mergePaths(Path path1, Path path2) { String path2Str = path2.toUri().getPath(); path2Str = path2Str.substring(startPositionWithoutWindowsDrive(path2Str)); // Add path components explicitly, because simply concatenating two path // string is not safe, for example: // "/" + "/foo" yields "//foo", which will be parsed as authority in Path return new Path(path1.toUri().getScheme(), path1.toUri().getAuthority(), path1.toUri().getPath() + path2Str); }
@Test (timeout = 30000) public void testMergePaths() { assertEquals(new Path("/foo/bar"), Path.mergePaths(new Path("/foo"), new Path("/bar"))); assertEquals(new Path("/foo/bar/baz"), Path.mergePaths(new Path("/foo/bar"), new Path("/baz"))); assertEquals(new Path("/foo/bar/baz"), Path.mergePaths(new Path("/foo"), new Path("/bar/baz"))); assertEquals(new Path(Shell.WINDOWS ? "/C:/foo/bar" : "/C:/foo/C:/bar"), Path.mergePaths(new Path("/C:/foo"), new Path("/C:/bar"))); assertEquals(new Path(Shell.WINDOWS ? "/C:/bar" : "/C:/C:/bar"), Path.mergePaths(new Path("/C:/"), new Path("/C:/bar"))); assertEquals(new Path("/bar"), Path.mergePaths(new Path("/"), new Path("/bar"))); assertEquals(new Path("viewfs:///foo/bar"), Path.mergePaths(new Path("viewfs:///foo"), new Path("file:///bar"))); assertEquals(new Path("viewfs://vfsauthority/foo/bar"), Path.mergePaths(new Path("viewfs://vfsauthority/foo"), new Path("file://fileauthority/bar"))); }
@Override public TimeLimiter timeLimiter(final String name) { return timeLimiter(name, getDefaultConfig(), emptyMap()); }
@Test public void timeLimiterNewWithNullNameAndConfigSupplier() { exception.expect(NullPointerException.class); exception.expectMessage(NAME_MUST_NOT_BE_NULL); TimeLimiterRegistry registry = new InMemoryTimeLimiterRegistry(config); registry.timeLimiter(null, () -> config); }
void initDistCp() throws IOException, RetryException { RunningJobStatus job = getCurrentJob(); if (job != null) { // the distcp has been submitted. if (job.isComplete()) { jobId = null; // unset jobId because the job is done. if (job.isSuccessful()) { updateStage(Stage.DIFF_DISTCP); return; } else { LOG.warn("DistCp failed. Failure={}", job.getFailureInfo()); } } else { throw new RetryException(); } } else { pathCheckBeforeInitDistcp(); srcFs.createSnapshot(src, CURRENT_SNAPSHOT_NAME); jobId = submitDistCpJob( src.toString() + HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR + CURRENT_SNAPSHOT_NAME, dst.toString(), false); } }
@Test public void testInitDistCp() throws Exception { String testRoot = nnUri + "/user/foo/testdir." + getMethodName(); DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(URI.create(nnUri), conf); createFiles(fs, testRoot, srcfiles); Path src = new Path(testRoot, SRCDAT); Path dst = new Path(testRoot, DSTDAT); // set permission. fs.setPermission(src, FsPermission.createImmutable((short) 020)); FedBalanceContext context = buildContext(src, dst, MOUNT); DistCpProcedure dcProcedure = new DistCpProcedure("distcp-procedure", null, 1000, context); // submit distcp. try { dcProcedure.initDistCp(); } catch (RetryException e) { } fs.delete(new Path(src, "a"), true); // wait until job done. executeProcedure(dcProcedure, Stage.DIFF_DISTCP, () -> dcProcedure.initDistCp()); assertTrue(fs.exists(dst)); // Because we used snapshot, the file should be copied. assertTrue(fs.exists(new Path(dst, "a"))); cleanup(fs, new Path(testRoot)); }
public double distanceToAsDouble(final IGeoPoint other) { final double lat1 = DEG2RAD * getLatitude(); final double lat2 = DEG2RAD * other.getLatitude(); final double lon1 = DEG2RAD * getLongitude(); final double lon2 = DEG2RAD * other.getLongitude(); return RADIUS_EARTH_METERS * 2 * Math.asin(Math.min(1, Math.sqrt( Math.pow(Math.sin((lat2 - lat1) / 2), 2) + Math.cos(lat1) * Math.cos(lat2) * Math.pow(Math.sin((lon2 - lon1) / 2), 2) ))); }
@Test public void test_distanceTo_Equator_Smaller() { final double ratioDelta = 1E-5; final int iterations = 10; final double latitude = 0; double longitudeIncrement = 1; for (int i = 0; i < iterations; i++) { final double longitude1 = getRandomLongitude(); final double longitude2 = longitude1 + longitudeIncrement; longitudeIncrement /= 10.; final GeoPoint target = new GeoPoint(latitude, longitude1); final GeoPoint other = new GeoPoint(latitude, longitude2); final double diff = getCleanLongitudeDiff(longitude1, longitude2); final double expected = GeoConstants.RADIUS_EARTH_METERS * diff * MathConstants.DEG2RAD; if (expected < minimumDistance) { continue; } final double delta = expected * ratioDelta; assertEquals("distance between " + target + " and " + other, expected, target.distanceToAsDouble(other), delta); } }
@Override public Double getDouble(K name) { return null; }
@Test public void testGetDouble() { assertNull(HEADERS.getDouble("name1")); }
public Optional<HostFailurePath> worstCaseHostLossLeadingToFailure() { Map<Node, Integer> timesNodeCanBeRemoved = computeMaximalRepeatedRemovals(); return greedyHeuristicFindFailurePath(timesNodeCanBeRemoved); }
@Test public void testIpFailurePaths() { CapacityCheckerTester tester = new CapacityCheckerTester(); tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000, 1), 1, 10, new NodeResources(10, 1000, 10000, 1), 1); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts having a lack of available ip addresses.", failureReasons.singularReasonFailures().insufficientAvailableIps(), failureReasons.size()); } else fail(); }
@Override public HttpResponse send(HttpRequest httpRequest) throws IOException { return send(httpRequest, null); }
@Test public void send_whenPostRequest_returnsExpectedHttpResponse() throws IOException { String responseBody = "{ \"test\": \"json\" }"; mockWebServer.enqueue( new MockResponse() .setResponseCode(HttpStatus.OK.code()) .setHeader(CONTENT_TYPE, MediaType.JSON_UTF_8.toString()) .setBody(responseBody)); mockWebServer.start(); String requestUrl = mockWebServer.url("/test/post").toString(); HttpResponse response = httpClient.send( post(requestUrl) .setHeaders( HttpHeaders.builder() .addHeader(ACCEPT, MediaType.JSON_UTF_8.toString()) .build()) .build()); assertThat(response) .isEqualTo( HttpResponse.builder() .setStatus(HttpStatus.OK) .setHeaders( HttpHeaders.builder() .addHeader(CONTENT_TYPE, MediaType.JSON_UTF_8.toString()) // MockWebServer always adds this response header. .addHeader(CONTENT_LENGTH, String.valueOf(responseBody.length())) .build()) .setBodyBytes(ByteString.copyFrom(responseBody, UTF_8)) .setResponseUrl(HttpUrl.parse(requestUrl)) .build()); }
public static String pluginSelectorKey(final String pluginName) { return String.join("", pluginName, PLUGIN_SELECTOR); }
@Test public void testPluginSelectorKey() { String mockPlugin = "MockPlugin"; String mockPluginSelectorKey = RedisKeyConstants.pluginSelectorKey(mockPlugin); assertThat(mockPlugin, notNullValue()); assertThat(String.join("", mockPlugin, PLUGIN_SELECTOR), equalTo(mockPluginSelectorKey)); }
@Override public Long getLocalValue() { return this.min; }
@Test void testGet() { LongMinimum min = new LongMinimum(); assertThat(min.getLocalValue().longValue()).isEqualTo(Long.MAX_VALUE); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(String.format("%s(%s)", ORACLE_NUMBER, 1)); builder.dataType(ORACLE_NUMBER); builder.length(1L); break; case TINYINT: case SMALLINT: case INT: case BIGINT: builder.columnType(ORACLE_INTEGER); builder.dataType(ORACLE_INTEGER); break; case FLOAT: builder.columnType(ORACLE_BINARY_FLOAT); builder.dataType(ORACLE_BINARY_FLOAT); break; case DOUBLE: builder.columnType(ORACLE_BINARY_DOUBLE); builder.dataType(ORACLE_BINARY_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", ORACLE_NUMBER, precision, scale)); builder.dataType(ORACLE_NUMBER); builder.precision(precision); builder.scale(scale); break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(ORACLE_BLOB); builder.dataType(ORACLE_BLOB); } else if (column.getColumnLength() <= MAX_RAW_LENGTH) { builder.columnType( String.format("%s(%s)", ORACLE_RAW, column.getColumnLength())); builder.dataType(ORACLE_RAW); } else { builder.columnType(ORACLE_BLOB); builder.dataType(ORACLE_BLOB); } break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType( String.format("%s(%s)", ORACLE_VARCHAR2, MAX_VARCHAR_LENGTH)); builder.dataType(ORACLE_VARCHAR2); } else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", ORACLE_VARCHAR2, column.getColumnLength())); builder.dataType(ORACLE_VARCHAR2); } else { builder.columnType(ORACLE_CLOB); builder.dataType(ORACLE_CLOB); } break; case DATE: builder.columnType(ORACLE_DATE); builder.dataType(ORACLE_DATE); break; case TIMESTAMP: if (column.getScale() == null || column.getScale() <= 0) { builder.columnType(ORACLE_TIMESTAMP_WITH_LOCAL_TIME_ZONE); } else { int timestampScale = column.getScale(); if (column.getScale() > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType( String.format("TIMESTAMP(%s) WITH LOCAL TIME ZONE", timestampScale)); builder.scale(timestampScale); } builder.dataType(ORACLE_TIMESTAMP_WITH_LOCAL_TIME_ZONE); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.ORACLE, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertByte() { Column column = PhysicalColumn.builder().name("test").dataType(BasicType.BYTE_TYPE).build(); BasicTypeDefine typeDefine = OracleTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(OracleTypeConverter.ORACLE_INTEGER, typeDefine.getColumnType()); Assertions.assertEquals(OracleTypeConverter.ORACLE_INTEGER, typeDefine.getDataType()); }
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, InvalidAlgorithmParameterException, KeyException, IOException { return toPrivateKey(keyFile, keyPassword, true); }
@Test public void testEncryptedNullPassword() throws Exception { assertThrows(InvalidKeySpecException.class, new Executable() { @Override public void execute() throws Throwable { SslContext.toPrivateKey( ResourcesUtil.getFile(getClass(), "test_encrypted_empty_pass.pem"), null); } }); }
@Override public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) { ByteBuf newlyByteBuf = payload.getByteBuf().readBytes(readLengthFromMeta(columnDef.getColumnMeta(), payload)); try { return MySQLJsonValueDecoder.decode(newlyByteBuf); } finally { newlyByteBuf.release(); } }
@Test void assertReadJsonValueWithMeta3() { columnDef.setColumnMeta(3); when(byteBuf.readUnsignedMediumLE()).thenReturn(3); when(byteBuf.readBytes(3)).thenReturn(jsonValueByteBuf); assertThat(new MySQLJsonBinlogProtocolValue().read(columnDef, payload), is(EXPECTED_JSON)); }
@Override public void upgrade() { if (clusterConfigService.get(MigrationCompleted.class) != null) { LOG.debug("Migration already completed."); return; } final Set<String> legacyViewIds = StreamSupport.stream(viewsCollection.find(exists(FIELD_DASHBOARD_STATE)).spliterator(), false) .map(doc -> doc.getObjectId(FIELD_ID)) .map(ObjectId::toString) .collect(Collectors.toSet()); final UpdateResult updateResult = viewsCollection.updateMany(exists(FIELD_DASHBOARD_STATE), unset(FIELD_DASHBOARD_STATE)); LOG.debug("Migrated " + updateResult.getModifiedCount() + " views."); clusterConfigService.write(MigrationCompleted.create(updateResult.getModifiedCount(), legacyViewIds)); }
@Test @MongoDBFixtures("V20190805115800_RemoveDashboardStateFromViewsTest.json") public void removesDashboardStateFromExistingViews() { final Migration migration = new V20190805115800_RemoveDashboardStateFromViews(clusterConfigService, mongodb.mongoConnection()); migration.upgrade(); final ArgumentCaptor<V20190805115800_RemoveDashboardStateFromViews.MigrationCompleted> argumentCaptor = ArgumentCaptor.forClass(V20190805115800_RemoveDashboardStateFromViews.MigrationCompleted.class); verify(clusterConfigService, times(1)).write(argumentCaptor.capture()); assertThat(argumentCaptor.getValue().modifiedViewsCount()).isEqualTo(4); MongoCollection<Document> collection = mongodb.mongoConnection().getMongoDatabase().getCollection("views"); assertThat(collection.countDocuments()).isEqualTo(4); }
public boolean isSameImageId(final Long id) { return this.id.equals(id); }
@Test void 이미지의_아이디와_다르면_false를_반환한다() { // given Image image = 이미지를_생성한다(); // when boolean result = image.isSameImageId(image.getId() + 1); // then assertThat(result).isFalse(); }
@Override public JobLogDO getJobLog(Long id) { return jobLogMapper.selectById(id); }
@Test public void testGetJobLog() { // mock 数据 JobLogDO dbJobLog = randomPojo(JobLogDO.class, o -> o.setExecuteIndex(1)); jobLogMapper.insert(dbJobLog); // 准备参数 Long id = dbJobLog.getId(); // 调用 JobLogDO jobLog = jobLogService.getJobLog(id); // 断言 assertPojoEquals(dbJobLog, jobLog); }
@Override public Set<Long> calculateUsers(DelegateExecution execution, String param) { return StrUtils.splitToLongSet(param); }
@Test public void testCalculateUsers() { // 准备参数 String param = "1,2"; // 调用 Set<Long> results = strategy.calculateUsers(null, param); // 断言 assertEquals(asSet(1L, 2L), results); }
@Override protected void runAfterCatalogReady() { try { process(); } catch (Throwable e) { LOG.warn("Failed to process one round of RoutineLoadScheduler", e); } }
@Test public void testNormalRunOneCycle(@Mocked GlobalStateMgr globalStateMgr, @Injectable RoutineLoadMgr routineLoadManager, @Injectable SystemInfoService systemInfoService, @Injectable Database database, @Injectable RoutineLoadDesc routineLoadDesc, @Mocked StreamLoadPlanner planner, @Injectable OlapTable olapTable) throws LoadException, MetaNotFoundException { List<Long> beIds = Lists.newArrayList(); beIds.add(1L); beIds.add(2L); List<Integer> partitions = Lists.newArrayList(); partitions.add(100); partitions.add(200); partitions.add(300); RoutineLoadTaskScheduler routineLoadTaskScheduler = new RoutineLoadTaskScheduler(routineLoadManager); Deencapsulation.setField(globalStateMgr, "routineLoadTaskScheduler", routineLoadTaskScheduler); KafkaRoutineLoadJob kafkaRoutineLoadJob = new KafkaRoutineLoadJob(1L, "test", 1L, 1L, "xxx", "test"); Deencapsulation.setField(kafkaRoutineLoadJob, "state", RoutineLoadJob.JobState.NEED_SCHEDULE); List<RoutineLoadJob> routineLoadJobList = new ArrayList<>(); routineLoadJobList.add(kafkaRoutineLoadJob); Deencapsulation.setField(kafkaRoutineLoadJob, "customKafkaPartitions", partitions); Deencapsulation.setField(kafkaRoutineLoadJob, "desireTaskConcurrentNum", 3); new Expectations() { { globalStateMgr.getRoutineLoadMgr(); minTimes = 0; result = routineLoadManager; routineLoadManager.getRoutineLoadJobByState(Sets.newHashSet(RoutineLoadJob.JobState.NEED_SCHEDULE)); minTimes = 0; result = routineLoadJobList; globalStateMgr.getDb(anyLong); minTimes = 0; result = database; database.getTable(1L); minTimes = 0; result = olapTable; systemInfoService.getBackendIds(true); minTimes = 0; result = beIds; routineLoadManager.getSizeOfIdToRoutineLoadTask(); minTimes = 0; result = 1; } }; RoutineLoadScheduler routineLoadScheduler = new RoutineLoadScheduler(); Deencapsulation.setField(routineLoadScheduler, "routineLoadManager", routineLoadManager); routineLoadScheduler.runAfterCatalogReady(); List<RoutineLoadTaskInfo> routineLoadTaskInfoList = Deencapsulation.getField(kafkaRoutineLoadJob, "routineLoadTaskInfoList"); for (RoutineLoadTaskInfo routineLoadTaskInfo : routineLoadTaskInfoList) { KafkaTaskInfo kafkaTaskInfo = (KafkaTaskInfo) routineLoadTaskInfo; if (kafkaTaskInfo.getPartitions().size() == 2) { Assert.assertTrue(kafkaTaskInfo.getPartitions().contains(100)); Assert.assertTrue(kafkaTaskInfo.getPartitions().contains(300)); } else { Assert.assertTrue(kafkaTaskInfo.getPartitions().contains(200)); } } }
@Override @CacheEvict(cacheNames = RedisKeyConstants.SMS_TEMPLATE, allEntries = true) // allEntries 清空所有缓存,因为 id 不是直接的缓存 code,不好清理 public void deleteSmsTemplate(Long id) { // 校验存在 validateSmsTemplateExists(id); // 更新 smsTemplateMapper.deleteById(id); }
@Test public void testDeleteSmsTemplate_success() { // mock 数据 SmsTemplateDO dbSmsTemplate = randomSmsTemplateDO(); smsTemplateMapper.insert(dbSmsTemplate);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbSmsTemplate.getId(); // 调用 smsTemplateService.deleteSmsTemplate(id); // 校验数据不存在了 assertNull(smsTemplateMapper.selectById(id)); }
@Override public PluginInfo getPluginInfo(String key) { checkState(started.get(), NOT_STARTED_YET); PluginInfo info = pluginInfosByKeys.get(key); if (info == null) { throw new IllegalArgumentException(format("Plugin [%s] does not exist", key)); } return info; }
@Test public void getPluginInfo_throws_ISE_if_repo_is_not_started() { assertThatThrownBy(() -> underTest.getPluginInfo("foo")) .isInstanceOf(IllegalStateException.class) .hasMessage("not started yet"); }
public List<Chapter> getChapters() { return chapters; }
@Test public void testReadFullTagWithChapter() throws IOException, ID3ReaderException { byte[] chapter = Id3ReaderTest.concat( Id3ReaderTest.generateFrameHeader(ChapterReader.FRAME_ID_CHAPTER, CHAPTER_WITHOUT_SUBFRAME.length), CHAPTER_WITHOUT_SUBFRAME); byte[] data = Id3ReaderTest.concat( Id3ReaderTest.generateId3Header(chapter.length), chapter); CountingInputStream inputStream = new CountingInputStream(new ByteArrayInputStream(data)); ChapterReader reader = new ChapterReader(inputStream); reader.readInputStream(); assertEquals(1, reader.getChapters().size()); assertEquals(CHAPTER_WITHOUT_SUBFRAME_START_TIME, reader.getChapters().get(0).getStart()); }