focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static boolean encryptionIsReversible(ECKey originalKey, ECKey encryptedKey, KeyCrypter keyCrypter, AesKey aesKey) { try { ECKey rebornUnencryptedKey = encryptedKey.decrypt(keyCrypter, aesKey); byte[] originalPrivateKeyBytes = originalKey.getPrivKeyBytes(); byte[] rebornKeyBytes = rebornUnencryptedKey.getPrivKeyBytes(); if (!Arrays.equals(originalPrivateKeyBytes, rebornKeyBytes)) { log.error("The check that encryption could be reversed failed for {}", originalKey); return false; } return true; } catch (KeyCrypterException kce) { log.error(kce.getMessage()); return false; } }
@Test public void testEncryptionIsReversible() { ECKey originalUnencryptedKey = new ECKey(); EncryptedData encryptedPrivateKey = keyCrypter.encrypt(originalUnencryptedKey.getPrivKeyBytes(), keyCrypter.deriveKey(PASSWORD1)); ECKey encryptedKey = ECKey.fromEncrypted(encryptedPrivateKey, keyCrypter, originalUnencryptedKey.getPubKey()); // The key should be encrypted assertTrue("Key not encrypted at start", encryptedKey.isEncrypted()); // Check that the key can be successfully decrypted back to the original. assertTrue("Key encryption is not reversible but it should be", ECKey.encryptionIsReversible(originalUnencryptedKey, encryptedKey, keyCrypter, keyCrypter.deriveKey(PASSWORD1))); // Check that key encryption is not reversible if a password other than the original is used to generate the AES key. assertFalse("Key encryption is reversible with wrong password", ECKey.encryptionIsReversible(originalUnencryptedKey, encryptedKey, keyCrypter, keyCrypter.deriveKey(WRONG_PASSWORD))); // Change one of the encrypted key bytes (this is to simulate a faulty keyCrypter). // Encryption should not be reversible byte[] goodEncryptedPrivateKeyBytes = encryptedPrivateKey.encryptedBytes; // Break the encrypted private key and check it is broken. byte[] badEncryptedPrivateKeyBytes = new byte[goodEncryptedPrivateKeyBytes.length]; encryptedPrivateKey = new EncryptedData(encryptedPrivateKey.initialisationVector, badEncryptedPrivateKeyBytes); ECKey badEncryptedKey = ECKey.fromEncrypted(encryptedPrivateKey, keyCrypter, originalUnencryptedKey.getPubKey()); assertFalse("Key encryption is reversible with faulty encrypted bytes", ECKey.encryptionIsReversible(originalUnencryptedKey, badEncryptedKey, keyCrypter, keyCrypter.deriveKey(PASSWORD1))); }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof NiciraTunGpeNp) { NiciraTunGpeNp that = (NiciraTunGpeNp) obj; return Objects.equals(tunGpeNp, that.tunGpeNp); } return false; }
@Test public void testEquals() { final NiciraTunGpeNp tunGpeNp1 = new NiciraTunGpeNp(np1); final NiciraTunGpeNp sameAsTunGpeNp1 = new NiciraTunGpeNp(np1); final NiciraTunGpeNp tunGpeNp2 = new NiciraTunGpeNp(np2); new EqualsTester().addEqualityGroup(tunGpeNp1, sameAsTunGpeNp1).addEqualityGroup(tunGpeNp2) .testEquals(); }
@Override public Object intercept(final Invocation invocation) throws Throwable { Object[] args = invocation.getArgs(); MappedStatement ms = (MappedStatement) args[0]; Object parameter = args[1]; RowBounds rowBounds = (RowBounds) args[2]; ResultHandler<?> resultHandler = (ResultHandler<?>) args[3]; Executor executor = (Executor) invocation.getTarget(); CacheKey cacheKey; BoundSql boundSql; if (args.length == 4) { boundSql = ms.getBoundSql(parameter); cacheKey = executor.createCacheKey(ms, parameter, rowBounds, boundSql); } else { cacheKey = (CacheKey) args[4]; boundSql = (BoundSql) args[5]; } return executor.query(ms, parameter, rowBounds, resultHandler, cacheKey, boundSql); }
@Test public void interceptTest() throws SQLException { final PostgreSQLQueryInterceptor postgreSQLQueryInterceptor = new PostgreSQLQueryInterceptor(); final Invocation invocation = mock(Invocation.class); Object[] args = new Object[4]; args[0] = mock(MappedStatement.class); args[1] = mock(Object.class); args[2] = mock(RowBounds.class); args[3] = mock(ResultHandler.class); when(invocation.getArgs()).thenReturn(args); final Executor executor = mock(Executor.class); when(invocation.getTarget()).thenReturn(executor); when(executor.createCacheKey(any(), any(), any(), any())).thenReturn(mock(CacheKey.class)); when(executor.query(any(), any(), any(), any(), any(), any())).thenReturn(new ArrayList<>()); Assertions.assertDoesNotThrow(() -> postgreSQLQueryInterceptor.intercept(invocation)); args = new Object[6]; args[0] = mock(MappedStatement.class); args[1] = mock(Object.class); args[2] = mock(RowBounds.class); args[3] = mock(ResultHandler.class); args[4] = mock(CacheKey.class); args[5] = mock(BoundSql.class); when(invocation.getArgs()).thenReturn(args); Assertions.assertDoesNotThrow(() -> postgreSQLQueryInterceptor.intercept(invocation)); }
@Override @Nullable protected HttpHost determineProxy(HttpHost target, HttpContext context) throws HttpException { for (Pattern nonProxyHostPattern : nonProxyHostPatterns) { if (nonProxyHostPattern.matcher(target.getHostName()).matches()) { return null; } } return super.determineProxy(target, context); }
@Test void testPlainHostIsMatched() throws Exception { assertThat(routePlanner.determineProxy(new HttpHost("localhost"), httpContext)).isNull(); }
public HealthCheckResponse checkHealth() { final Map<String, HealthCheckResponseDetail> results = DEFAULT_CHECKS.stream() .collect(Collectors.toMap( Check::getName, check -> check.check(this) )); final boolean allHealthy = results.values().stream() .allMatch(HealthCheckResponseDetail::getIsHealthy); final State serverState = commandRunner.checkServerState(); return new HealthCheckResponse(allHealthy, results, Optional.of(serverState.toString())); }
@Test public void shouldReturnUnhealthyIfCommandRunnerCheckFails() { // Given: when(commandRunner.checkCommandRunnerStatus()).thenReturn(CommandRunner.CommandRunnerStatus.DEGRADED); // When: final HealthCheckResponse response = healthCheckAgent.checkHealth(); // Then: assertThat(response.getDetails().get(COMMAND_RUNNER_CHECK_NAME).getIsHealthy(), is(false)); assertThat(response.getIsHealthy(), is(false)); }
public R execute(Retryable<R> retryable) throws ExecutionException { long endMs = time.milliseconds() + retryBackoffMaxMs; int currAttempt = 0; ExecutionException error = null; while (time.milliseconds() <= endMs) { currAttempt++; try { return retryable.call(); } catch (UnretryableException e) { // We've deemed this error to not be worth retrying, so collect the error and // fail immediately. if (error == null) error = new ExecutionException(e); break; } catch (ExecutionException e) { log.warn("Error during retry attempt {}", currAttempt, e); if (error == null) error = e; long waitMs = retryBackoffMs * (long) Math.pow(2, currAttempt - 1); long diff = endMs - time.milliseconds(); waitMs = Math.min(waitMs, diff); if (waitMs <= 0) break; String message = String.format("Attempt %d to make call resulted in an error; sleeping %d ms before retrying", currAttempt, waitMs); log.warn(message, e); time.sleep(waitMs); } } if (error == null) // Really shouldn't ever get to here, but... error = new ExecutionException(new IllegalStateException("Exhausted all retry attempts but no attempt returned value or encountered exception")); throw error; }
@Test public void testIOExceptionFailure() { Exception[] attempts = new Exception[] { new IOException("pretend connect error"), new IOException("pretend timeout error"), new IOException("pretend read error"), new IOException("pretend another read error"), }; long retryWaitMs = 1000; long maxWaitMs = 1000 + 2000 + 3999; Retryable<String> call = createRetryable(attempts); Time time = new MockTime(0, 0, 0); assertEquals(0L, time.milliseconds()); Retry<String> r = new Retry<>(time, retryWaitMs, maxWaitMs); assertThrows(ExecutionException.class, () -> r.execute(call)); assertEquals(maxWaitMs, time.milliseconds()); }
@Override public boolean add(V e) { return get(addAsync(e)); }
@Test public void testLong() { Set<Long> set = redisson.getSet("set"); set.add(1L); set.add(2L); assertThat(set).containsOnly(1L, 2L); }
static int encodeSocketAddress( final UnsafeBuffer encodingBuffer, final int offset, final InetSocketAddress address) { int encodedLength = 0; /* * Stream of values: * - port (int) (unsigned short int) * - IP address length (int) (4 or 16) * - IP address (4 or 16 bytes) */ encodingBuffer.putInt(offset + encodedLength, address.getPort(), LITTLE_ENDIAN); encodedLength += SIZE_OF_INT; final byte[] addressBytes = address.getAddress().getAddress(); encodingBuffer.putInt(offset + encodedLength, addressBytes.length, LITTLE_ENDIAN); encodedLength += SIZE_OF_INT; encodingBuffer.putBytes(offset + encodedLength, addressBytes); encodedLength += addressBytes.length; return encodedLength; }
@Test void encodeSocketAddress() { final InetSocketAddress socketAddress = new InetSocketAddress(15015); final byte[] address = socketAddress.getAddress().getAddress(); final int encodedLength = CommonEventEncoder.encodeSocketAddress(buffer, 4, socketAddress); assertEquals(SIZE_OF_INT * 2 + address.length, encodedLength); assertEquals(15015, buffer.getInt(4, LITTLE_ENDIAN)); assertEquals(address.length, buffer.getInt(4 + SIZE_OF_INT, LITTLE_ENDIAN)); final byte[] encodedAddress = new byte[address.length]; buffer.getBytes(4 + SIZE_OF_INT * 2, encodedAddress); assertArrayEquals(address, encodedAddress); }
public static long timeUnitToMill(String timeStrWithUnit) { // If `timeStrWithUnit` doesn't include time unit, // `Duration.parse` would fail to parse and throw Exception. if (timeStrWithUnit.endsWith("ms")) { return Long.parseLong(timeStrWithUnit.substring(0, timeStrWithUnit.length() - 2)); } return Duration.parse("PT" + timeStrWithUnit).toMillis(); }
@Test void testTimeUnitToMill_WithoutUnit_2() { assertThrows(DateTimeParseException.class, () -> { ZeppelinConfiguration.timeUnitToMill("0"); }); }
@Override public Class<?> loadClass(String name) throws ClassNotFoundException { if (existsInTfsJar(name)) { return jarClassLoader.loadClass(name); } return super.loadClass(name); }
@Test public void canLoadClassFromNestedJar() throws Exception { assertThat(nestedJarClassLoader.loadClass(NESTED_JAR_CLASS)) .isNotNull() .hasPackage(""); }
@Override public int read() throws IOException { if (pos >= count) { fill(); if (pos >= count) return -1; } return getBufIfOpen()[pos++] & 0xff; }
@Test public void testEndOfFileForMethodRead() throws IOException { ByteBuffer inputBuf = ByteBuffer.allocate(2); int lengthGreaterThanInput = inputBuf.capacity() + 1; try (InputStream is = new ChunkedBytesStream(new ByteBufferInputStream(inputBuf), supplier, 10, false)) { int cnt = 0; while (cnt++ < lengthGreaterThanInput) { int res = is.read(); if (cnt > inputBuf.capacity()) assertEquals(-1, res, "end of file for read should be -1"); } } }
protected void parseAuthenticationWithExtensions(AuthenticationRequest authenticationRequest) { for (XMLObject xmlObject : authenticationRequest.getAuthnRequest().getExtensions().getUnknownXMLObjects(Attribute.DEFAULT_ELEMENT_NAME)) { Attribute attribute = (Attribute) xmlObject; switch (attribute.getName()) { case AttributeTypes.INTENDED_AUDIENCE -> { authenticationRequest.setServiceEntityId(getStringValue(attribute.getAttributeValues().get(0))); for (XMLObject entityId : attribute.getAttributeValues()) authenticationRequest.addIntendedAudience(getStringValue(entityId)); } case AttributeTypes.SERVICE_UUID -> authenticationRequest.setServiceUuid(getStringValue(attribute.getAttributeValues().get(0))); case AttributeTypes.IDP_ASSERTION -> { XSAny any = (XSAny) attribute.getAttributeValues().get(0); Assertion assertion = (Assertion) any.getUnknownXMLObjects(Assertion.DEFAULT_ELEMENT_NAME).get(0); authenticationRequest.setIdpAssertion(assertion); } default -> {} } } }
@Test //entrance protected void parseAuthenticationWithExtensionsTest() throws SamlSessionException, DienstencatalogusException, SharedServiceClientException, UnsupportedEncodingException, SamlParseException, ComponentInitializationException, SamlValidationException, MessageDecodingException { String samlRequest = readXMLFile(authnRequestEntranceExtensionsFile); String decodeSAMLRequest = encodeAuthnRequest(samlRequest); httpServletRequestMock.setParameter("SAMLRequest", decodeSAMLRequest); AuthenticationRequest result = authenticationService.startAuthenticationProcess(httpServletRequestMock); assertNotNull(result); assertEquals(frontChannel.concat(ENTRANCE_REQUEST_AUTHENTICATION_URL), result.getAuthnRequest().getDestination()); }
void setStartDir( FileDialogOperation fileDialogOperation, FileObject fileObject, String filePath ) throws KettleException { try { fileDialogOperation.setStartDir( fileObject.isFolder() ? filePath : null ); } catch ( FileSystemException fse ) { throw new KettleException( "failed to check isFile in setStartDir()", fse ); } }
@Test public void testSetStartDir() throws Exception { // TEST : is file FileDialogOperation fileDialogOperation1 = createFileDialogOperation(); FileObject fileObject1 = mock( FileObject.class ); String absoluteFilePath = "/home/someuser/somedir"; when( fileObject1.isFolder() ).thenReturn( false ); testInstance.setStartDir( fileDialogOperation1, fileObject1, absoluteFilePath ); assertNull( fileDialogOperation1.getStartDir() ); // TEST : is not file FileDialogOperation fileDialogOperation2 = createFileDialogOperation(); FileObject fileObject2 = mock( FileObject.class ); when( fileObject2.isFolder() ).thenReturn( true ); when( fileObject2.toString() ).thenReturn( absoluteFilePath ); testInstance.setStartDir( fileDialogOperation2, fileObject2, absoluteFilePath ); assertEquals( absoluteFilePath, fileDialogOperation2.getStartDir() ); }
public ConfigCheckResult checkConfig() { Optional<Long> appId = getAppId(); if (appId.isEmpty()) { return failedApplicationStatus(INVALID_APP_ID_STATUS); } GithubAppConfiguration githubAppConfiguration = new GithubAppConfiguration(appId.get(), gitHubSettings.privateKey(), gitHubSettings.apiURLOrDefault()); return checkConfig(githubAppConfiguration); }
@Test public void checkConfig_whenAppIdIsNull_shouldReturnFailedAppCheck() { when(gitHubSettings.appId()).thenReturn(null); ConfigCheckResult checkResult = configValidator.checkConfig(); assertThat(checkResult.application().autoProvisioning()).isEqualTo(ConfigStatus.failed(INVALID_APP_ID_STATUS)); assertThat(checkResult.application().jit()).isEqualTo(ConfigStatus.failed(INVALID_APP_ID_STATUS)); assertThat(checkResult.installations()).isEmpty(); }
public static Collection<ExecutionUnit> build(final ShardingSphereDatabase database, final SQLRewriteResult sqlRewriteResult, final SQLStatementContext sqlStatementContext) { return sqlRewriteResult instanceof GenericSQLRewriteResult ? build(database, (GenericSQLRewriteResult) sqlRewriteResult, sqlStatementContext) : build((RouteSQLRewriteResult) sqlRewriteResult); }
@Test void assertBuildRouteSQLRewriteResultWithEmptyPrimaryKeyMeta() { RouteUnit routeUnit2 = new RouteUnit(new RouteMapper("logicName2", "actualName2"), Collections.singletonList(new RouteMapper("logicName2", "actualName2"))); SQLRewriteUnit sqlRewriteUnit2 = new SQLRewriteUnit("sql2", Collections.singletonList("parameter2")); Map<RouteUnit, SQLRewriteUnit> sqlRewriteUnits = new HashMap<>(2, 1F); sqlRewriteUnits.put(routeUnit2, sqlRewriteUnit2); ResourceMetaData resourceMetaData = new ResourceMetaData(Collections.emptyMap()); RuleMetaData ruleMetaData = new RuleMetaData(Collections.emptyList()); ShardingSphereDatabase database = new ShardingSphereDatabase(DefaultDatabase.LOGIC_NAME, mock(DatabaseType.class), resourceMetaData, ruleMetaData, buildDatabaseWithoutPrimaryKey()); Collection<ExecutionUnit> actual = ExecutionContextBuilder.build(database, new RouteSQLRewriteResult(sqlRewriteUnits), mock(SQLStatementContext.class)); ExecutionUnit expectedUnit2 = new ExecutionUnit("actualName2", new SQLUnit("sql2", Collections.singletonList("parameter2"))); assertThat(actual, is(Collections.singleton(expectedUnit2))); }
protected void compaction(List<MappedFile> mappedFileList, OffsetMap offsetMap) throws DigestException { compacting = new TopicPartitionLog(this, COMPACTING_SUB_FOLDER); for (MappedFile mappedFile : mappedFileList) { Iterator<SelectMappedBufferResult> iterator = mappedFile.iterator(0); while (iterator.hasNext()) { SelectMappedBufferResult smb = null; try { smb = iterator.next(); MessageExt msgExt = MessageDecoder.decode(smb.getByteBuffer(), true, true); if (msgExt == null) { // file end break; } else { checkAndPutMessage(smb, msgExt, offsetMap, compacting); } } finally { if (smb != null) { smb.release(); } } } } putEndMessage(compacting.getLog()); }
@Test public void testCompaction() throws DigestException, NoSuchAlgorithmException, IllegalAccessException { Iterator<SelectMappedBufferResult> iterator = mock(Iterator.class); SelectMappedBufferResult smb = mock(SelectMappedBufferResult.class); when(iterator.hasNext()).thenAnswer((Answer<Boolean>)invocationOnMock -> queueOffset < 1024); when(iterator.next()).thenAnswer((Answer<SelectMappedBufferResult>)invocation -> new SelectMappedBufferResult(0, buildMessage(), 0, null)); MappedFile mf = mock(MappedFile.class); List<MappedFile> mappedFileList = Lists.newArrayList(mf); doReturn(iterator).when(mf).iterator(0); MessageStore messageStore = mock(DefaultMessageStore.class); CommitLog commitLog = mock(CommitLog.class); when(messageStore.getCommitLog()).thenReturn(commitLog); when(commitLog.getCommitLogSize()).thenReturn(1024 * 1024); CompactionLog clog = mock(CompactionLog.class); FieldUtils.writeField(clog, "defaultMessageStore", messageStore, true); doCallRealMethod().when(clog).getOffsetMap(any()); FieldUtils.writeField(clog, "positionMgr", positionMgr, true); queueOffset = 0; CompactionLog.OffsetMap offsetMap = clog.getOffsetMap(mappedFileList); assertEquals(1023, offsetMap.getLastOffset()); doCallRealMethod().when(clog).compaction(any(List.class), any(CompactionLog.OffsetMap.class)); doNothing().when(clog).putEndMessage(any(MappedFileQueue.class)); doCallRealMethod().when(clog).checkAndPutMessage(any(SelectMappedBufferResult.class), any(MessageExt.class), any(CompactionLog.OffsetMap.class), any(CompactionLog.TopicPartitionLog.class)); doCallRealMethod().when(clog).shouldRetainMsg(any(MessageExt.class), any(CompactionLog.OffsetMap.class)); List<MessageExt> compactResult = Lists.newArrayList(); when(clog.asyncPutMessage(any(ByteBuffer.class), any(MessageExt.class), any(CompactionLog.TopicPartitionLog.class))) .thenAnswer((Answer<CompletableFuture<PutMessageResult>>)invocation -> { compactResult.add(invocation.getArgument(1)); return CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.PUT_OK, new AppendMessageResult(AppendMessageStatus.PUT_OK))); }); queueOffset = 0; clog.compaction(mappedFileList, offsetMap); assertEquals(keyCount, compactResult.size()); assertEquals(1014, compactResult.stream().mapToLong(MessageExt::getQueueOffset).min().orElse(1024)); assertEquals(1023, compactResult.stream().mapToLong(MessageExt::getQueueOffset).max().orElse(0)); }
@Override public void disablePlugin(String pluginId) { if (isPluginDisabled(pluginId)) { // do nothing return; } if (Files.exists(getEnabledFilePath())) { enabledPlugins.remove(pluginId); try { FileUtils.writeLines(enabledPlugins, getEnabledFilePath()); } catch (IOException e) { throw new PluginRuntimeException(e); } } else { disabledPlugins.add(pluginId); try { FileUtils.writeLines(disabledPlugins, getDisabledFilePath()); } catch (IOException e) { throw new PluginRuntimeException(e); } } }
@Test public void testDisablePlugin() throws Exception { createEnabledFile(); createDisabledFile(); PluginStatusProvider statusProvider = new DefaultPluginStatusProvider(pluginsPath); statusProvider.disablePlugin("plugin-1"); assertTrue(statusProvider.isPluginDisabled("plugin-1")); assertTrue(statusProvider.isPluginDisabled("plugin-2")); assertTrue(statusProvider.isPluginDisabled("plugin-3")); }
@Override public Set<ConfigOption<?>> requiredOptions() { Set<ConfigOption<?>> options = new HashSet<>(); options.add(MySqlSourceOptions.HOSTNAME); options.add(MySqlSourceOptions.USERNAME); options.add(MySqlSourceOptions.PASSWORD); options.add(MySqlSourceOptions.DATABASE_NAME); options.add(MySqlSourceOptions.TABLE_NAME); return options; }
@Test public void testValidation() { // validate illegal port try { Map<String, String> properties = getAllOptions(); properties.put("port", "123b"); createTableSource(properties); fail("exception expected"); } catch (Throwable t) { assertTrue( ExceptionUtils.findThrowableWithMessage( t, "Could not parse value '123b' for key 'port'.") .isPresent()); } // validate illegal server id try { Map<String, String> properties = getAllOptions(); properties.put("server-id", "123b"); createTableSource(properties); fail("exception expected"); } catch (Throwable t) { assertTrue( ExceptionUtils.findThrowableWithMessage( t, "The value of option 'server-id' is invalid: '123b'") .isPresent()); assertTrue( ExceptionUtils.findThrowableWithMessage( t, "The server id 123b is not a valid numeric.") .isPresent()); } // validate illegal connect.timeout try { Map<String, String> properties = getAllOptions(); properties.put("scan.incremental.snapshot.enabled", "true"); properties.put("connect.timeout", "240ms"); createTableSource(properties); fail("exception expected"); } catch (Throwable t) { assertTrue( ExceptionUtils.findThrowableWithMessage( t, "The value of option 'connect.timeout' cannot be less than PT0.25S, but actual is PT0.24S") .isPresent()); } // validate illegal split size try { Map<String, String> properties = getAllOptions(); properties.put("scan.incremental.snapshot.enabled", "true"); properties.put("scan.incremental.snapshot.chunk.size", "1"); createTableSource(properties); fail("exception expected"); } catch (Throwable t) { assertThat( t, containsMessage( "The value of option 'scan.incremental.snapshot.chunk.size' must larger than 1, but is 1")); } // validate illegal fetch size try { Map<String, String> properties = getAllOptions(); properties.put("scan.incremental.snapshot.enabled", "true"); properties.put("scan.snapshot.fetch.size", "1"); createTableSource(properties); fail("exception expected"); } catch (Throwable t) { assertThat( t, containsMessage( "The value of option 'scan.snapshot.fetch.size' must larger than 1, but is 1")); } // validate illegal split meta group size try { Map<String, String> properties = getAllOptions(); properties.put("scan.incremental.snapshot.enabled", "true"); properties.put("chunk-meta.group.size", "1"); createTableSource(properties); fail("exception expected"); } catch (Throwable t) { assertThat( t, containsMessage( "The value of option 'chunk-meta.group.size' must larger than 1, but is 1")); } // validate illegal split meta group size try { Map<String, String> properties = getAllOptions(); properties.put("scan.incremental.snapshot.enabled", "true"); properties.put("split-key.even-distribution.factor.upper-bound", "0.8"); createTableSource(properties); fail("exception expected"); } catch (Throwable t) { assertThat( t, containsMessage( "The value of option 'chunk-key.even-distribution.factor.upper-bound' must larger than or equals 1.0, but is 0.8")); } // validate illegal connection pool size try { Map<String, String> properties = getAllOptions(); properties.put("scan.incremental.snapshot.enabled", "true"); properties.put("connection.pool.size", "1"); createTableSource(properties); fail("exception expected"); } catch (Throwable t) { assertThat( t, containsMessage( "The value of option 'connection.pool.size' must larger than 1, but is 1")); } // validate illegal connect max retry times try { Map<String, String> properties = getAllOptions(); properties.put("scan.incremental.snapshot.enabled", "true"); properties.put("connect.max-retries", "0"); createTableSource(properties); fail("exception expected"); } catch (Throwable t) { assertThat( t, containsMessage( "The value of option 'connect.max-retries' must larger than 0, but is 0")); } // validate missing required Factory factory = new MySqlTableSourceFactory(); for (ConfigOption<?> requiredOption : factory.requiredOptions()) { Map<String, String> properties = getAllOptions(); properties.remove(requiredOption.key()); try { createTableSource(properties); fail("exception expected"); } catch (Throwable t) { assertTrue( ExceptionUtils.findThrowableWithMessage( t, "Missing required options are:\n\n" + requiredOption.key()) .isPresent()); } } // validate unsupported option try { Map<String, String> properties = getAllOptions(); properties.put("unknown", "abc"); createTableSource(properties); fail("exception expected"); } catch (Throwable t) { assertTrue( ExceptionUtils.findThrowableWithMessage(t, "Unsupported options:\n\nunknown") .isPresent()); } // validate unsupported option try { Map<String, String> properties = getAllOptions(); properties.put("scan.startup.mode", "abc"); createTableSource(properties); fail("exception expected"); } catch (Throwable t) { String msg = "Invalid value for option 'scan.startup.mode'. Supported values are " + "[initial, snapshot, latest-offset, earliest-offset, specific-offset, timestamp], " + "but was: abc"; assertTrue(ExceptionUtils.findThrowableWithMessage(t, msg).isPresent()); } // validate invalid database-name try { Map<String, String> properties = getAllOptions(); properties.put("database-name", "*_invalid_db"); } catch (Throwable t) { String msg = String.format( "The database-name '%s' is not a valid regular expression", "*_invalid_db"); assertTrue(ExceptionUtils.findThrowableWithMessage(t, msg).isPresent()); } // validate invalid table-name try { Map<String, String> properties = getAllOptions(); properties.put("table-name", "*_invalid_table"); } catch (Throwable t) { String msg = String.format( "The table-name '%s' is not a valid regular expression", "*_invalid_table"); assertTrue(ExceptionUtils.findThrowableWithMessage(t, msg).isPresent()); } }
public AnalysisResult analysis(AnalysisResult result) { // 1. Set sub package name by source.metrics Class<? extends Metrics> metricsClass = MetricsHolder.find(result.getAggregationFuncStmt().getAggregationFunctionName()); String metricsClassSimpleName = metricsClass.getSimpleName(); result.setMetricsClassName(metricsClassSimpleName); // Optional for filter List<ConditionExpression> expressions = result.getFilters().getFilterExpressionsParserResult(); if (expressions != null && expressions.size() > 0) { for (ConditionExpression expression : expressions) { final FilterMatchers.MatcherInfo matcherInfo = FilterMatchers.INSTANCE.find( expression.getExpressionType()); final String getter = matcherInfo.isBooleanType() ? ClassMethodUtil.toIsMethod(expression.getAttributes()) : ClassMethodUtil.toGetMethod(expression.getAttributes()); final Expression filterExpression = new Expression(); filterExpression.setExpressionObject(matcherInfo.getMatcher().getName()); filterExpression.setLeft(TypeCastUtil.withCast(expression.getCastType(), "source." + getter)); filterExpression.setRight(expression.getValue()); result.getFilters().addFilterExpressions(filterExpression); } } // 3. Find Entrance method of this metrics Class<?> c = metricsClass; Method entranceMethod = null; SearchEntrance: while (!c.equals(Object.class)) { for (Method method : c.getMethods()) { Entrance annotation = method.getAnnotation(Entrance.class); if (annotation != null) { entranceMethod = method; break SearchEntrance; } } c = c.getSuperclass(); } if (entranceMethod == null) { throw new IllegalArgumentException("Can't find Entrance method in class: " + metricsClass.getName()); } EntryMethod entryMethod = new EntryMethod(); result.setEntryMethod(entryMethod); entryMethod.setMethodName(entranceMethod.getName()); // 4. Use parameter's annotation of entrance method to generate aggregation entrance. for (Parameter parameter : entranceMethod.getParameters()) { Class<?> parameterType = parameter.getType(); Annotation[] parameterAnnotations = parameter.getAnnotations(); if (parameterAnnotations == null || parameterAnnotations.length == 0) { throw new IllegalArgumentException( "Entrance method:" + entranceMethod + " doesn't include the annotation."); } Annotation annotation = parameterAnnotations[0]; if (annotation instanceof SourceFrom) { entryMethod.addArg( parameterType, TypeCastUtil.withCast( result.getFrom().getSourceCastType(), "source." + ClassMethodUtil.toGetMethod(result.getFrom().getSourceAttribute()) ) ); } else if (annotation instanceof ConstOne) { entryMethod.addArg(parameterType, "1"); } else if (annotation instanceof org.apache.skywalking.oap.server.core.analysis.metrics.annotation.Expression) { if (isNull(result.getAggregationFuncStmt().getFuncConditionExpressions()) || result.getAggregationFuncStmt().getFuncConditionExpressions().isEmpty()) { throw new IllegalArgumentException( "Entrance method:" + entranceMethod + " argument can't find funcParamExpression."); } else { ConditionExpression expression = result.getAggregationFuncStmt().getNextFuncConditionExpression(); final FilterMatchers.MatcherInfo matcherInfo = FilterMatchers.INSTANCE.find( expression.getExpressionType()); final String getter = matcherInfo.isBooleanType() ? ClassMethodUtil.toIsMethod(expression.getAttributes()) : ClassMethodUtil.toGetMethod(expression.getAttributes()); final Expression argExpression = new Expression(); argExpression.setRight(expression.getValue()); argExpression.setExpressionObject(matcherInfo.getMatcher().getName()); argExpression.setLeft(TypeCastUtil.withCast(expression.getCastType(), "source." + getter)); entryMethod.addArg(argExpression); } } else if (annotation instanceof Arg) { entryMethod.addArg(parameterType, result.getAggregationFuncStmt().getNextFuncArg()); } else { throw new IllegalArgumentException( "Entrance method:" + entranceMethod + " doesn't the expected annotation."); } } // 5. Get all column declared in MetricsHolder class. c = metricsClass; while (!c.equals(Object.class)) { for (Field field : c.getDeclaredFields()) { Column column = field.getAnnotation(Column.class); if (column != null) { result.addPersistentField( field.getName(), column.name(), field.getType()); } } c = c.getSuperclass(); } // 6. Based on Source, generate default columns List<SourceColumn> columns = SourceColumnsFactory.getColumns(result.getFrom().getSourceName()); result.setFieldsFromSource(columns); result.generateSerializeFields(); return result; }
@Test public void testServiceAnalysis() { AnalysisResult result = new AnalysisResult(); result.getFrom().setSourceName("Service"); result.getFrom().getSourceAttribute().add("latency"); result.setMetricsName("ServiceAvg"); result.getAggregationFuncStmt().setAggregationFunctionName("longAvg"); DeepAnalysis analysis = new DeepAnalysis(); result = analysis.analysis(result); EntryMethod method = result.getEntryMethod(); Assertions.assertEquals("combine", method.getMethodName()); Assertions.assertEquals("(long)(source.getLatency())", method.getArgsExpressions().get(0)); Assertions.assertEquals("(long)(1)", method.getArgsExpressions().get(1)); List<SourceColumn> source = result.getFieldsFromSource(); Assertions.assertEquals(1, source.size()); List<DataColumn> persistentFields = result.getPersistentFields(); Assertions.assertEquals(4, persistentFields.size()); }
public PaginatedList<StreamDestinationFilterRuleDTO> findPaginatedForStream( String streamId, String queryString, Bson sort, int perPage, int page, Predicate<String> permissionSelector ) { final var query = parseQuery(queryString); return paginationHelper.filter(and(eq(FIELD_STREAM_ID, streamId), query)) .sort(sort) .perPage(perPage) .page(page, dto -> permissionSelector.test(dto.id())); }
@Test @MongoDBFixtures("StreamDestinationFilterServiceTest-2024-07-01-1.json") void findPaginatedForStreamWithQuery() { final var result = service.findPaginatedForStream("54e3deadbeefdeadbeef1000", "title:\"Test Filter 2\"", Sorts.ascending("title"), 10, 1, id -> true); assertThat(result.delegate()).hasSize(1); assertThat(result.delegate().get(0).title()).isEqualTo("Test Filter 2"); }
@Override public NSImage fileIcon(final Local file, final Integer size) { NSImage icon = null; if(file.exists()) { icon = this.load(file.getAbsolute(), size); if(null == icon) { return this.cache(file.getName(), this.convert(file.getName(), workspace.iconForFile(file.getAbsolute()), size), size); } } if(null == icon) { return this.iconNamed("notfound.tiff", size); } return icon; }
@Test public void testCacheTiff() { final NSImageIconCache cache = new NSImageIconCache(); final NSImage icon32 = cache.fileIcon(new FinderLocal("../../img/ftp.tiff"), 32); assertNotNull(icon32); assertEquals(32, icon32.size().width.intValue()); assertEquals(32, icon32.size().height.intValue()); final NSImage icon16 = cache.fileIcon(new FinderLocal("../../img/ftp.tiff"), 16); assertNotNull(icon16); assertNotSame(icon16, icon32); assertEquals(16, icon16.size().width.intValue()); assertEquals(16, icon16.size().height.intValue()); final NSImage icon64 = cache.fileIcon(new FinderLocal("../../img/ftp.tiff"), 64); assertNotNull(icon64); assertNotSame(icon16, icon64); assertNotSame(icon32, icon64); assertEquals(64, icon64.size().width.intValue()); assertEquals(64, icon64.size().height.intValue()); assertNotNull(NSImage.imageNamed("ftp.tiff (16px)")); assertNotNull(NSImage.imageNamed("ftp.tiff (32px)")); assertNotNull(NSImage.imageNamed("ftp.tiff (64px)")); }
@Override public void abortLoopback(MdId mdName, MaIdShort maName, MepId mepId) throws CfmConfigException { MepKeyId key = new MepKeyId(mdName, maName, mepId); Mep mep = mepStore.getMep(key) .orElseThrow(() -> new CfmConfigException("Mep " + mdName + "/" + maName + "/" + mepId + " not found when calling Aborting Loopback")); log.debug("Aborting Loopback on MEP {} on Device {}", key, mep.deviceId()); deviceService.getDevice(mep.deviceId()) .as(CfmMepProgrammable.class) .abortLoopback(mdName, maName, mepId); }
@Test public void testAbortLoopback() { expect(mdService.getMaintenanceAssociation(MDNAME1, MANAME1)) .andReturn(Optional.ofNullable(ma1)) .anyTimes(); replay(mdService); expect(deviceService.getDevice(DEVICE_ID1)).andReturn(device1).anyTimes(); replay(deviceService); expect(driverService.getDriver(DEVICE_ID1)).andReturn(testDriver).anyTimes(); replay(driverService); try { mepService.abortLoopback(MDNAME1, MANAME1, MEPID1); } catch (CfmConfigException e) { fail("Not expecting an exception"); } }
public static String addUUID(String pathStr, String uuid) { Preconditions.checkArgument(StringUtils.isNotEmpty(pathStr), "empty path"); Preconditions.checkArgument(StringUtils.isNotEmpty(uuid), "empty uuid"); // In some cases, Spark will add the UUID to the filename itself. if (pathStr.contains(uuid)) { return pathStr; } int dot; // location of the first '.' in the file name int lastSlash = pathStr.lastIndexOf('/'); if (lastSlash >= 0) { Preconditions.checkState(lastSlash + 1 < pathStr.length(), "Bad path: " + pathStr); dot = pathStr.indexOf('.', lastSlash); } else { dot = pathStr.indexOf('.'); } if (dot >= 0) { return pathStr.substring(0, dot) + "-" + uuid + pathStr.substring(dot); } else { return pathStr + "-" + uuid; } }
@Test public void testUUIDDir() throws Throwable { intercept(IllegalStateException.class, () -> addUUID("/dest/", "UUID")); }
public static String getIp() { if (null != cachedIpAddress) { return cachedIpAddress; } Enumeration<NetworkInterface> netInterfaces; try { netInterfaces = NetworkInterface.getNetworkInterfaces(); } catch (final SocketException ex) { return "UnknownIP"; } String localIpAddress = null; while (netInterfaces.hasMoreElements()) { NetworkInterface netInterface = netInterfaces.nextElement(); Enumeration<InetAddress> ipAddresses = netInterface.getInetAddresses(); while (ipAddresses.hasMoreElements()) { InetAddress ipAddress = ipAddresses.nextElement(); if (isPublicIpAddress(ipAddress)) { String publicIpAddress = ipAddress.getHostAddress(); cachedIpAddress = publicIpAddress; return publicIpAddress; } if (isLocalIpAddress(ipAddress)) { localIpAddress = ipAddress.getHostAddress(); } } } cachedIpAddress = localIpAddress; return localIpAddress; }
@Test void assertGetIp() { assertNotNull(IpUtils.getIp()); }
public static Predicate parse(String expression) { final Stack<Predicate> predicateStack = new Stack<>(); final Stack<Character> operatorStack = new Stack<>(); final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll(""); final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true); boolean isTokenMode = true; while (true) { final Character operator; final String token; if (isTokenMode) { if (tokenizer.hasMoreTokens()) { token = tokenizer.nextToken(); } else { break; } if (OPERATORS.contains(token)) { operator = token.charAt(0); } else { operator = null; } } else { operator = operatorStack.pop(); token = null; } isTokenMode = true; if (operator == null) { try { predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance()); } catch (ClassCastException e) { throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e); } catch (Exception e) { throw new RuntimeException(e); } } else { if (operatorStack.empty() || operator == '(') { operatorStack.push(operator); } else if (operator == ')') { while (operatorStack.peek() != '(') { evaluate(predicateStack, operatorStack); } operatorStack.pop(); } else { if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek())) { evaluate(predicateStack, operatorStack); isTokenMode = false; } operatorStack.push(operator); } } } while (!operatorStack.empty()) { evaluate(predicateStack, operatorStack); } if (predicateStack.size() > 1) { throw new RuntimeException("Invalid logical expression"); } return predicateStack.pop(); }
@Test(expectedExceptions = EmptyStackException.class) public void testNotMissingOperandOr() { PredicateExpressionParser.parse("! | com.linkedin.data.it.AlwaysFalsePredicate)"); }
public static String[] parseKey(String groupKey) { StringBuilder sb = new StringBuilder(); String dataId = null; String group = null; String tenant = null; for (int i = 0; i < groupKey.length(); ++i) { char c = groupKey.charAt(i); if (PLUS == c) { if (null == dataId) { dataId = sb.toString(); sb.setLength(0); } else if (null == group) { group = sb.toString(); sb.setLength(0); } else { throw new IllegalArgumentException("invalid groupkey:" + groupKey); } } else if (PERCENT == c) { char next = groupKey.charAt(++i); char nextnext = groupKey.charAt(++i); if (TWO == next && B == nextnext) { sb.append(PLUS); } else if (TWO == next && FIVE == nextnext) { sb.append(PERCENT); } else { throw new IllegalArgumentException("invalid groupkey:" + groupKey); } } else { sb.append(c); } } if (group == null) { group = sb.toString(); } else { tenant = sb.toString(); } if (StringUtils.isBlank(dataId)) { throw new IllegalArgumentException("invalid dataId"); } if (StringUtils.isBlank(group)) { throw new IllegalArgumentException("invalid group"); } return new String[] {dataId, group, tenant}; }
@Test void testParseKeyIllegalArgumentException3() { assertThrows(IllegalArgumentException.class, () -> { GroupKey.parseKey("f+o+o+bar"); }); }
public static Query convertToMultiTermSpanQuery(Query query) { if (!(query instanceof BooleanQuery)) { return query; } LOGGER.debug("Perform rewriting for the phrase query {}.", query); ArrayList<SpanQuery> spanQueryLst = new ArrayList<>(); boolean prefixOrSuffixQueryFound = false; for (BooleanClause clause : ((BooleanQuery) query).clauses()) { Query q = clause.getQuery(); if (q instanceof WildcardQuery || q instanceof PrefixQuery) { prefixOrSuffixQueryFound = true; spanQueryLst.add(new SpanMultiTermQueryWrapper<>((AutomatonQuery) q)); } else if (q instanceof TermQuery) { spanQueryLst.add(new SpanTermQuery(((TermQuery) q).getTerm())); } else { LOGGER.info("query can not be handled currently {} ", q); return query; } } if (!prefixOrSuffixQueryFound) { return query; } SpanNearQuery spanNearQuery = new SpanNearQuery(spanQueryLst.toArray(new SpanQuery[0]), 0, true); LOGGER.debug("The phrase query {} is re-written as {}", query, spanNearQuery); return spanNearQuery; }
@Test public void testQueryIsNotRewritten() { // Test 1: Term query is not re-written. TermQuery termQuery = new TermQuery(new Term("field", "real")); Assert.assertEquals(termQuery, LuceneTextIndexUtils.convertToMultiTermSpanQuery(termQuery)); // Test 2: Regex query is not re-written. RegexpQuery regexpQuery = new RegexpQuery(new Term("field", "\\d+")); Assert.assertEquals(regexpQuery, LuceneTextIndexUtils.convertToMultiTermSpanQuery(regexpQuery)); }
public Optional<EncryptionPluginService> findEncryptionService(String algorithmName) { return Optional.ofNullable(ENCRYPTION_SPI_MAP.get(algorithmName)); }
@Test void testFindEncryptionService() { EncryptionPluginManager instance = EncryptionPluginManager.instance(); Optional<EncryptionPluginService> optional = instance.findEncryptionService("aes"); assertTrue(optional.isPresent()); }
@Override public Producer createProducer() throws Exception { throw new UnsupportedOperationException( "Cannot produce from a DebeziumEndpoint: " + getEndpointUri()); }
@Test void testIfFailsToCreateProducer() { assertThrows(UnsupportedOperationException.class, () -> { debeziumEndpoint.createProducer(); }); }
public void abortTransaction(long transactionId, boolean abortPrepared, String reason, TxnCommitAttachment txnCommitAttachment, List<TabletCommitInfo> finishedTablets, List<TabletFailInfo> failedTablets) throws UserException { if (transactionId < 0) { LOG.info("transaction id is {}, less than 0, maybe this is an old type load job, ignore abort operation", transactionId); return; } TransactionState transactionState = null; readLock(); try { transactionState = idToRunningTransactionState.get(transactionId); } finally { readUnlock(); } if (transactionState == null) { // If the transaction state does not exist, this task might have been aborted by // the txntimeoutchecker thread. We need to perform some additional work. processNotFoundTxn(transactionId, reason, txnCommitAttachment); throw new TransactionNotFoundException(transactionId); } // update transaction state extra if exists if (txnCommitAttachment != null) { transactionState.setTxnCommitAttachment(txnCommitAttachment); } // before state transform TxnStateChangeCallback callback = transactionState.beforeStateTransform(TransactionStatus.ABORTED); boolean txnOperated = false; transactionState.writeLock(); try { writeLock(); try { txnOperated = unprotectAbortTransaction(transactionId, abortPrepared, reason); } finally { writeUnlock(); transactionState.afterStateTransform(TransactionStatus.ABORTED, txnOperated, callback, reason); } persistTxnStateInTxnLevelLock(transactionState); } finally { transactionState.writeUnlock(); } if (!txnOperated || transactionState.getTransactionStatus() != TransactionStatus.ABORTED) { return; } LOG.info("transaction:[{}] successfully rollback", transactionState); Database db = GlobalStateMgr.getCurrentState().getDb(dbId); if (db == null) { return; } for (Long tableId : transactionState.getTableIdList()) { Table table = db.getTable(tableId); if (table == null) { continue; } TransactionStateListener listener = stateListenerFactory.create(this, table); if (listener != null) { listener.postAbort(transactionState, finishedTablets, failedTablets); } } }
@Test public void testAbortTransactionWithNotFoundException() throws UserException { DatabaseTransactionMgr masterDbTransMgr = masterTransMgr.getDatabaseTransactionMgr(GlobalStateMgrTestUtil.testDbId1); long txnId1 = lableToTxnId.get(GlobalStateMgrTestUtil.testTxnLable1); expectedEx.expect(UserException.class); expectedEx.expectMessage("transaction not found"); masterDbTransMgr.abortTransaction(txnId1, "test abort transaction", null); }
public void runExtractor(Message msg) { try(final Timer.Context ignored = completeTimer.time()) { final String field; try (final Timer.Context ignored2 = conditionTimer.time()) { // We can only work on Strings. if (!(msg.getField(sourceField) instanceof String)) { conditionMissesCounter.inc(); return; } field = (String) msg.getField(sourceField); // Decide if to extract at all. if (conditionType.equals(ConditionType.STRING)) { if (field.contains(conditionValue)) { conditionHitsCounter.inc(); } else { conditionMissesCounter.inc(); return; } } else if (conditionType.equals(ConditionType.REGEX)) { if (regexConditionPattern.matcher(field).find()) { conditionHitsCounter.inc(); } else { conditionMissesCounter.inc(); return; } } } try (final Timer.Context ignored2 = executionTimer.time()) { Result[] results; try { results = run(field); } catch (ExtractorException e) { final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>"; msg.addProcessingError(new Message.ProcessingError( ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e))); return; } if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) { return; } else if (results.length == 1 && results[0].target == null) { // results[0].target is null if this extractor cannot produce multiple fields use targetField in that case msg.addField(targetField, results[0].getValue()); } else { for (final Result result : results) { msg.addField(result.getTarget(), result.getValue()); } } // Remove original from message? if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) { final StringBuilder sb = new StringBuilder(field); final List<Result> reverseList = Arrays.stream(results) .sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed()) .collect(Collectors.toList()); // remove all from reverse so that the indices still match for (final Result result : reverseList) { sb.delete(result.getBeginIndex(), result.getEndIndex()); } final String builtString = sb.toString(); final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString; msg.removeField(sourceField); // TODO don't add an empty field back, or rather don't add fullyCutByExtractor msg.addField(sourceField, finalResult); } runConverters(msg); } } }
@Test public void testConverters() throws Exception { final Converter converter = new TestConverter.Builder() .callback(new Function<Object, Object>() { @Nullable @Override public Object apply(Object input) { return "converted"; } }) .build(); final TestExtractor extractor = new TestExtractor.Builder() .converters(Lists.newArrayList(converter)) .callback(new Callable<Result[]>() { @Override public Result[] call() throws Exception { return new Result[] { new Result("1", -1, -1) }; } }) .build(); final Message msg = createMessage("message"); extractor.runExtractor(msg); assertThat(msg.getField("target")).isEqualTo("converted"); }
public List<File> process() throws Exception { try { return doProcess(); } catch (Exception e) { // Cleaning up output dir as processing has failed. file managers left from map or reduce phase will be cleaned // up in the respective phases. FileUtils.deleteQuietly(_segmentsOutputDir); throw e; } finally { FileUtils.deleteDirectory(_mapperOutputDir); FileUtils.deleteDirectory(_reducerOutputDir); } }
@Test public void testConfigurableMapperOutputSize() throws Exception { File workingDir = new File(TEMP_DIR, "configurable_mapper_test_output"); FileUtils.forceMkdir(workingDir); int expectedTotalDocsCount = 10; // Test 1 : Default case i.e. no limit to mapper output file size (single record reader). SegmentProcessorConfig config = new SegmentProcessorConfig.Builder().setTableConfig(_tableConfig).setSchema(_schema).build(); SegmentProcessorFramework framework = new SegmentProcessorFramework(_singleSegment, config, workingDir); List<File> outputSegments = framework.process(); assertEquals(outputSegments.size(), 1); String[] outputDirs = workingDir.list(); assertTrue(outputDirs != null && outputDirs.length == 1, Arrays.toString(outputDirs)); SegmentMetadata segmentMetadata = new SegmentMetadataImpl(outputSegments.get(0)); assertEquals(segmentMetadata.getTotalDocs(), expectedTotalDocsCount); assertEquals(segmentMetadata.getName(), "myTable_1597719600000_1597892400000_0"); FileUtils.cleanDirectory(workingDir); rewindRecordReaders(_singleSegment); // Test 2 : Default case i.e. no limit to mapper output file size (multiple record readers). config = new SegmentProcessorConfig.Builder().setTableConfig(_tableConfig).setSchema(_schema).build(); framework = new SegmentProcessorFramework(_multipleSegments, config, workingDir); outputSegments = framework.process(); assertEquals(outputSegments.size(), 1); outputDirs = workingDir.list(); assertTrue(outputDirs != null && outputDirs.length == 1, Arrays.toString(outputDirs)); segmentMetadata = new SegmentMetadataImpl(outputSegments.get(0)); assertEquals(segmentMetadata.getTotalDocs(), expectedTotalDocsCount); assertEquals(segmentMetadata.getName(), "myTable_1597719600000_1597892400000_0"); FileUtils.cleanDirectory(workingDir); rewindRecordReaders(_multipleSegments); // Test 3 : Test mapper with threshold output size (single record reader). // Create a segmentConfig with intermediate mapper output size threshold set to the number of bytes in each row // from the data. In this way, we can test if each row is written to a separate segment. SegmentConfig segmentConfig = new SegmentConfig.Builder().setIntermediateFileSizeThreshold(16).setSegmentNamePrefix("testPrefix") .setSegmentNamePostfix("testPostfix").build(); config = new SegmentProcessorConfig.Builder().setSegmentConfig(segmentConfig).setTableConfig(_tableConfig) .setSchema(_schema).build(); framework = new SegmentProcessorFramework(_singleSegment, config, workingDir); outputSegments = framework.process(); assertEquals(outputSegments.size(), expectedTotalDocsCount); outputDirs = workingDir.list(); assertTrue(outputDirs != null && outputDirs.length == 1, Arrays.toString(outputDirs)); // Verify that each segment has only one row, and the segment name is correct. for (int i = 0; i < expectedTotalDocsCount; i++) { segmentMetadata = new SegmentMetadataImpl(outputSegments.get(i)); assertEquals(segmentMetadata.getTotalDocs(), 1); assertTrue(segmentMetadata.getName().matches("testPrefix_.*_testPostfix_" + i)); } FileUtils.cleanDirectory(workingDir); rewindRecordReaders(_singleSegment); // Test 4 : Test mapper with threshold output size (multiple record readers). // Create a segmentConfig with intermediate mapper output size threshold set to the number of bytes in each row // from the data. In this way, we can test if each row is written to a separate segment. segmentConfig = new SegmentConfig.Builder().setIntermediateFileSizeThreshold(16).setSegmentNamePrefix("testPrefix") .setSegmentNamePostfix("testPostfix").build(); config = new SegmentProcessorConfig.Builder().setSegmentConfig(segmentConfig).setTableConfig(_tableConfig) .setSchema(_schema).build(); framework = new SegmentProcessorFramework(_multipleSegments, config, workingDir); outputSegments = framework.process(); assertEquals(outputSegments.size(), expectedTotalDocsCount); outputDirs = workingDir.list(); assertTrue(outputDirs != null && outputDirs.length == 1, Arrays.toString(outputDirs)); // Verify that each segment has only one row, and the segment name is correct. for (int i = 0; i < expectedTotalDocsCount; i++) { segmentMetadata = new SegmentMetadataImpl(outputSegments.get(i)); assertEquals(segmentMetadata.getTotalDocs(), 1); assertTrue(segmentMetadata.getName().matches("testPrefix_.*_testPostfix_" + i)); } FileUtils.cleanDirectory(workingDir); rewindRecordReaders(_multipleSegments); // Test 5 : Test with injected failure in mapper to verify output directory is cleaned up. List<RecordReader> testList = new ArrayList<>(_multipleSegments); testList.set(1, null); segmentConfig = new SegmentConfig.Builder().setIntermediateFileSizeThreshold(16).setSegmentNamePrefix("testPrefix") .setSegmentNamePostfix("testPostfix").build(); config = new SegmentProcessorConfig.Builder().setSegmentConfig(segmentConfig).setTableConfig(_tableConfig) .setSchema(_schema).build(); SegmentProcessorFramework failureTest = new SegmentProcessorFramework(testList, config, workingDir); assertThrows(NullPointerException.class, failureTest::process); assertTrue(FileUtils.isEmptyDirectory(workingDir)); rewindRecordReaders(_multipleSegments); // Test 6: RecordReader should be closed when recordReader is created inside RecordReaderFileConfig (without mapper // output size threshold configured). ClassLoader classLoader = getClass().getClassLoader(); URL resource = classLoader.getResource("data/dimBaseballTeams.csv"); RecordReaderFileConfig recordReaderFileConfig = new RecordReaderFileConfig(FileFormat.CSV, new File(resource.toURI()), null, null, null); TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").setTimeColumnName("time").build(); Schema schema = new Schema.SchemaBuilder().setSchemaName("mySchema").addSingleValueDimension("teamId", DataType.STRING, "") .addSingleValueDimension("teamName", DataType.STRING, "") .addDateTime("time", DataType.LONG, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS").build(); config = new SegmentProcessorConfig.Builder().setTableConfig(tableConfig).setSchema(schema).build(); SegmentProcessorFramework frameworkWithRecordReaderFileConfig = new SegmentProcessorFramework(config, workingDir, ImmutableList.of(recordReaderFileConfig), Collections.emptyList(), null); outputSegments = frameworkWithRecordReaderFileConfig.process(); // Verify the number of segments created and the total docs. assertEquals(outputSegments.size(), 1); ImmutableSegment segment = ImmutableSegmentLoader.load(outputSegments.get(0), ReadMode.mmap); segmentMetadata = segment.getSegmentMetadata(); assertEquals(segmentMetadata.getTotalDocs(), 52); // Verify that the record reader is closed from RecordReaderFileConfig. assertTrue(recordReaderFileConfig.isRecordReaderClosedFromRecordReaderFileConfig()); FileUtils.cleanDirectory(workingDir); // Test 7: RecordReader should not be closed when recordReader is passed to RecordReaderFileConfig. (Without // mapper output size threshold configured) RecordReader recordReader = recordReaderFileConfig.getRecordReader(); recordReader.rewind(); // Pass the recordReader to RecordReaderFileConfig. recordReaderFileConfig = new RecordReaderFileConfig(recordReader); SegmentProcessorFramework frameworkWithDelegateRecordReader = new SegmentProcessorFramework(config, workingDir, ImmutableList.of(recordReaderFileConfig), Collections.emptyList(), null); outputSegments = frameworkWithDelegateRecordReader.process(); // Verify the number of segments created and the total docs. assertEquals(outputSegments.size(), 1); segment = ImmutableSegmentLoader.load(outputSegments.get(0), ReadMode.mmap); segmentMetadata = segment.getSegmentMetadata(); assertEquals(segmentMetadata.getTotalDocs(), 52); // Verify that the record reader is not closed from RecordReaderFileConfig. assertFalse(recordReaderFileConfig.isRecordReaderClosedFromRecordReaderFileConfig()); FileUtils.cleanDirectory(workingDir); // Test 8: RecordReader should be closed when recordReader is created inside RecordReaderFileConfig (With mapper // output size threshold configured). expectedTotalDocsCount = 52; recordReaderFileConfig = new RecordReaderFileConfig(FileFormat.CSV, new File(resource.toURI()), null, null, null); segmentConfig = new SegmentConfig.Builder().setIntermediateFileSizeThreshold(19).setSegmentNamePrefix("testPrefix") .setSegmentNamePostfix("testPostfix").build(); config = new SegmentProcessorConfig.Builder().setSegmentConfig(segmentConfig).setTableConfig(tableConfig) .setSchema(schema).build(); frameworkWithRecordReaderFileConfig = new SegmentProcessorFramework(config, workingDir, ImmutableList.of(recordReaderFileConfig), Collections.emptyList(), null); outputSegments = frameworkWithRecordReaderFileConfig.process(); // Verify that each segment has only one row. for (int i = 0; i < expectedTotalDocsCount; i++) { segmentMetadata = new SegmentMetadataImpl(outputSegments.get(i)); assertEquals(segmentMetadata.getTotalDocs(), 1); } // Verify that the record reader is closed from RecordReaderFileConfig. assertTrue(recordReaderFileConfig.isRecordReaderClosedFromRecordReaderFileConfig()); FileUtils.cleanDirectory(workingDir); // Test 9: RecordReader should not be closed when recordReader is passed to RecordReaderFileConfig (With mapper // output size threshold configured). recordReader = recordReaderFileConfig.getRecordReader(); recordReader.rewind(); // Pass the recordReader to RecordReaderFileConfig. recordReaderFileConfig = new RecordReaderFileConfig(recordReader); frameworkWithDelegateRecordReader = new SegmentProcessorFramework(config, workingDir, ImmutableList.of(recordReaderFileConfig), Collections.emptyList(), null); outputSegments = frameworkWithDelegateRecordReader.process(); // Verify that each segment has only one row. for (int i = 0; i < expectedTotalDocsCount; i++) { segmentMetadata = new SegmentMetadataImpl(outputSegments.get(i)); assertEquals(segmentMetadata.getTotalDocs(), 1); } // Verify that the record reader is not closed from RecordReaderFileConfig. assertFalse(recordReaderFileConfig.isRecordReaderClosedFromRecordReaderFileConfig()); FileUtils.cleanDirectory(workingDir); }
protected Object updateSchemaIn(Object keyOrValue, Schema updatedSchema) { if (keyOrValue instanceof Struct) { Struct origStruct = (Struct) keyOrValue; Struct newStruct = new Struct(updatedSchema); for (Field field : updatedSchema.fields()) { // assume both schemas have exact same fields with same names and schemas ... newStruct.put(field, getFieldValue(origStruct, field)); } return newStruct; } return keyOrValue; }
@Test public void updateSchemaOfNonStruct() { Object value = 1; Object updatedValue = xform.updateSchemaIn(value, Schema.INT32_SCHEMA); assertSame(value, updatedValue); }
@Override public Mono<Theme> reloadTheme(String name) { return client.fetch(Theme.class, name) .flatMap(oldTheme -> { String settingName = oldTheme.getSpec().getSettingName(); return waitForSettingDeleted(settingName) .then(waitForAnnotationSettingsDeleted(name)); }) .then(Mono.defer(() -> { Path themePath = themeRoot.get().resolve(name); Path themeManifestPath = ThemeUtils.resolveThemeManifest(themePath); if (themeManifestPath == null) { throw new IllegalArgumentException( "The manifest file [theme.yaml] is required."); } Unstructured unstructured = loadThemeManifest(themeManifestPath); Theme newTheme = Unstructured.OBJECT_MAPPER.convertValue(unstructured, Theme.class); return client.fetch(Theme.class, name) .map(oldTheme -> { newTheme.getMetadata().setVersion(oldTheme.getMetadata().getVersion()); return newTheme; }) .flatMap(client::update); })) .flatMap(theme -> { String settingName = theme.getSpec().getSettingName(); return Flux.fromIterable(ThemeUtils.loadThemeResources(getThemePath(theme))) .filter(unstructured -> (Setting.KIND.equals(unstructured.getKind()) && unstructured.getMetadata().getName().equals(settingName)) || AnnotationSetting.KIND.equals(unstructured.getKind()) ) .doOnNext(unstructured -> populateThemeNameLabel(unstructured, name)) .flatMap(client::create) .then(Mono.just(theme)); }); }
@Test void reloadThemeWhenSettingNameSetBeforeThenDeleteSetting() throws IOException { Theme theme = new Theme(); theme.setMetadata(new Metadata()); theme.getMetadata().setName("fake-theme"); theme.setSpec(new Theme.ThemeSpec()); theme.getSpec().setDisplayName("Hello"); theme.getSpec().setSettingName("fake-setting"); when(client.fetch(Theme.class, "fake-theme")) .thenReturn(Mono.just(theme)); when(client.delete(any(Setting.class))).thenReturn(Mono.empty()); Setting setting = new Setting(); setting.setMetadata(new Metadata()); setting.setSpec(new Setting.SettingSpec()); setting.getSpec().setForms(List.of()); when(client.fetch(Setting.class, "fake-setting")) .thenReturn(Mono.just(setting)); Path themeWorkDir = themeRoot.get().resolve(theme.getMetadata().getName()); if (!Files.exists(themeWorkDir)) { Files.createDirectories(themeWorkDir); } Files.writeString(themeWorkDir.resolve("settings.yaml"), """ apiVersion: v1alpha1 kind: Setting metadata: name: fake-setting spec: forms: - group: sns label: 社交资料 formSchema: - $el: h1 children: Register """); Files.writeString(themeWorkDir.resolve("theme.yaml"), """ apiVersion: v1alpha1 kind: Theme metadata: name: fake-theme spec: displayName: Fake Theme """); when(client.update(any(Theme.class))) .thenAnswer((Answer<Mono<Theme>>) invocation -> { Theme argument = invocation.getArgument(0); return Mono.just(argument); }); when(client.list(eq(AnnotationSetting.class), any(), eq(null))).thenReturn(Flux.empty()); themeService.reloadTheme("fake-theme") .as(StepVerifier::create) .consumeNextWith(themeUpdated -> { try { JSONAssert.assertEquals(""" { "spec": { "displayName": "Fake Theme", "version": "*", "requires": "*" }, "apiVersion": "theme.halo.run/v1alpha1", "kind": "Theme", "metadata": { "name": "fake-theme" } } """, JsonUtils.objectToJson(themeUpdated), true); } catch (JSONException e) { throw new RuntimeException(e); } }) .verifyComplete(); // delete fake-setting verify(client, times(1)).delete(any(Setting.class)); // Will not be created verify(client, times(0)).create(any(Setting.class)); }
@Override public Optional<Rule> findByKey(RuleKey key) { verifyKeyArgument(key); ensureInitialized(); return Optional.ofNullable(rulesByKey.get(key)); }
@Test public void first_call_to_findByKey_triggers_call_to_db_and_any_subsequent_get_or_find_call_does_not() { underTest.findByKey(AB_RULE.getKey()); verify(ruleDao, times(1)).selectAll(any(DbSession.class)); verifyNoMethodCallTriggersCallToDB(); }
public void removeExpiredTxns(long currentMillis) { writeLock(); try { StringBuilder expiredTxnMsgs = new StringBuilder(1024); String prefix = ""; int numJobsToRemove = getTransactionNum() - Config.label_keep_max_num; while (!finalStatusTransactionStateDeque.isEmpty()) { TransactionState transactionState = finalStatusTransactionStateDeque.getFirst(); if (transactionState.isExpired(currentMillis) || numJobsToRemove > 0) { finalStatusTransactionStateDeque.pop(); clearTransactionState(transactionState); --numJobsToRemove; expiredTxnMsgs.append(prefix); prefix = ", "; expiredTxnMsgs.append(transactionState.getTransactionId()); if (expiredTxnMsgs.length() > 4096) { LOG.info("transaction list [{}] are expired, remove them from transaction manager", expiredTxnMsgs); expiredTxnMsgs = new StringBuilder(1024); } } else { break; } } if (expiredTxnMsgs.length() > 0) { LOG.info("transaction list [{}] are expired, remove them from transaction manager", expiredTxnMsgs); } } finally { writeUnlock(); } }
@Test public void testRemoveExpiredTxns() throws AnalysisException { DatabaseTransactionMgr masterDbTransMgr = masterTransMgr.getDatabaseTransactionMgr(GlobalStateMgrTestUtil.testDbId1); Config.label_keep_max_second = -1; long currentMillis = System.currentTimeMillis(); masterDbTransMgr.removeExpiredTxns(currentMillis); assertEquals(0, masterDbTransMgr.getFinishedTxnNums()); assertEquals(7, masterDbTransMgr.getTransactionNum()); assertNull(masterDbTransMgr.unprotectedGetTxnIdsByLabel(GlobalStateMgrTestUtil.testTxnLable1)); }
public SchemaKStream<K> selectKey( final FormatInfo valueFormat, final List<Expression> keyExpression, final Optional<KeyFormat> forceInternalKeyFormat, final Stacker contextStacker, final boolean forceRepartition ) { final boolean keyFormatChange = forceInternalKeyFormat.isPresent() && !forceInternalKeyFormat.get().equals(keyFormat); final boolean repartitionNeeded = repartitionNeeded(keyExpression); if (!keyFormatChange && !forceRepartition && !repartitionNeeded) { return this; } if ((repartitionNeeded || !forceRepartition) && keyFormat.isWindowed()) { throw new KsqlException( "Implicit repartitioning of windowed sources is not supported. " + "See https://github.com/confluentinc/ksql/issues/4385." ); } final ExecutionStep<KStreamHolder<K>> step = ExecutionStepFactory .streamSelectKey(contextStacker, sourceStep, keyExpression); final KeyFormat newKeyFormat = forceInternalKeyFormat.orElse(keyFormat); return new SchemaKStream<>( step, resolveSchema(step), SerdeFeaturesFactory.sanitizeKeyFormat( newKeyFormat, toSqlTypes(keyExpression), true), ksqlConfig, functionRegistry ); }
@Test public void shouldNotRepartitionIfSameKeyField() { // Given: givenInitialKStreamOf( "SELECT col0, col2, col3 FROM test1 PARTITION BY col0 EMIT CHANGES;"); // When: final SchemaKStream<?> result = initialSchemaKStream .selectKey( valueFormat.getFormatInfo(), ImmutableList.of(new UnqualifiedColumnReferenceExp(ColumnName.of("COL0"))), Optional.empty(), childContextStacker, false ); // Then: assertThat(result, is(initialSchemaKStream)); }
@Override public void importData(JsonReader reader) throws IOException { logger.info("Reading configuration for 1.2"); // this *HAS* to start as an object reader.beginObject(); while (reader.hasNext()) { JsonToken tok = reader.peek(); switch (tok) { case NAME: String name = reader.nextName(); // find out which member it is if (name.equals(CLIENTS)) { readClients(reader); } else if (name.equals(GRANTS)) { readGrants(reader); } else if (name.equals(WHITELISTEDSITES)) { readWhitelistedSites(reader); } else if (name.equals(BLACKLISTEDSITES)) { readBlacklistedSites(reader); } else if (name.equals(AUTHENTICATIONHOLDERS)) { readAuthenticationHolders(reader); } else if (name.equals(ACCESSTOKENS)) { readAccessTokens(reader); } else if (name.equals(REFRESHTOKENS)) { readRefreshTokens(reader); } else if (name.equals(SYSTEMSCOPES)) { readSystemScopes(reader); } else { for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { extension.importExtensionData(name, reader); break; } } // unknown token, skip it reader.skipValue(); } break; case END_OBJECT: // the object ended, we're done here reader.endObject(); continue; default: logger.debug("Found unexpected entry"); reader.skipValue(); continue; } } fixObjectReferences(); for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { extension.fixExtensionObjectReferences(maps); break; } } maps.clearAll(); }
@Test public void testImportGrants() throws IOException, ParseException { Date creationDate1 = formatter.parse("2014-09-10T22:49:44.090+00:00", Locale.ENGLISH); Date accessDate1 = formatter.parse("2014-09-10T23:49:44.090+00:00", Locale.ENGLISH); OAuth2AccessTokenEntity mockToken1 = mock(OAuth2AccessTokenEntity.class); when(mockToken1.getId()).thenReturn(1L); ApprovedSite site1 = new ApprovedSite(); site1.setId(1L); site1.setClientId("foo"); site1.setCreationDate(creationDate1); site1.setAccessDate(accessDate1); site1.setUserId("user1"); site1.setAllowedScopes(ImmutableSet.of("openid", "phone")); when(mockToken1.getApprovedSite()).thenReturn(site1); Date creationDate2 = formatter.parse("2014-09-11T18:49:44.090+00:00", Locale.ENGLISH); Date accessDate2 = formatter.parse("2014-09-11T20:49:44.090+00:00", Locale.ENGLISH); Date timeoutDate2 = formatter.parse("2014-10-01T20:49:44.090+00:00", Locale.ENGLISH); ApprovedSite site2 = new ApprovedSite(); site2.setId(2L); site2.setClientId("bar"); site2.setCreationDate(creationDate2); site2.setAccessDate(accessDate2); site2.setUserId("user2"); site2.setAllowedScopes(ImmutableSet.of("openid", "offline_access", "email", "profile")); site2.setTimeoutDate(timeoutDate2); String configJson = "{" + "\"" + MITREidDataService.CLIENTS + "\": [], " + "\"" + MITREidDataService.ACCESSTOKENS + "\": [], " + "\"" + MITREidDataService.REFRESHTOKENS + "\": [], " + "\"" + MITREidDataService.WHITELISTEDSITES + "\": [], " + "\"" + MITREidDataService.BLACKLISTEDSITES + "\": [], " + "\"" + MITREidDataService.SYSTEMSCOPES + "\": [], " + "\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [], " + "\"" + MITREidDataService.GRANTS + "\": [" + "{\"id\":1,\"clientId\":\"foo\",\"creationDate\":\"2014-09-10T22:49:44.090+00:00\",\"accessDate\":\"2014-09-10T23:49:44.090+00:00\"," + "\"userId\":\"user1\",\"whitelistedSiteId\":null,\"allowedScopes\":[\"openid\",\"phone\"], \"whitelistedSiteId\":1," + "\"approvedAccessTokens\":[1]}," + "{\"id\":2,\"clientId\":\"bar\",\"creationDate\":\"2014-09-11T18:49:44.090+00:00\",\"accessDate\":\"2014-09-11T20:49:44.090+00:00\"," + "\"timeoutDate\":\"2014-10-01T20:49:44.090+00:00\",\"userId\":\"user2\"," + "\"allowedScopes\":[\"openid\",\"offline_access\",\"email\",\"profile\"]}" + " ]" + "}"; logger.debug(configJson); JsonReader reader = new JsonReader(new StringReader(configJson)); final Map<Long, ApprovedSite> fakeDb = new HashMap<>(); when(approvedSiteRepository.save(isA(ApprovedSite.class))).thenAnswer(new Answer<ApprovedSite>() { Long id = 364L; @Override public ApprovedSite answer(InvocationOnMock invocation) throws Throwable { ApprovedSite _site = (ApprovedSite) invocation.getArguments()[0]; if(_site.getId() == null) { _site.setId(id++); } fakeDb.put(_site.getId(), _site); return _site; } }); when(approvedSiteRepository.getById(anyLong())).thenAnswer(new Answer<ApprovedSite>() { @Override public ApprovedSite answer(InvocationOnMock invocation) throws Throwable { Long _id = (Long) invocation.getArguments()[0]; return fakeDb.get(_id); } }); when(wlSiteRepository.getById(isNull(Long.class))).thenAnswer(new Answer<WhitelistedSite>() { Long id = 432L; @Override public WhitelistedSite answer(InvocationOnMock invocation) throws Throwable { WhitelistedSite _site = mock(WhitelistedSite.class); when(_site.getId()).thenReturn(id++); return _site; } }); when(tokenRepository.getAccessTokenById(isNull(Long.class))).thenAnswer(new Answer<OAuth2AccessTokenEntity>() { Long id = 245L; @Override public OAuth2AccessTokenEntity answer(InvocationOnMock invocation) throws Throwable { OAuth2AccessTokenEntity _token = mock(OAuth2AccessTokenEntity.class); when(_token.getId()).thenReturn(id++); return _token; } }); dataService.importData(reader); //2 for sites, 1 for updating access token ref on #1 verify(approvedSiteRepository, times(3)).save(capturedApprovedSites.capture()); List<ApprovedSite> savedSites = new ArrayList(fakeDb.values()); assertThat(savedSites.size(), is(2)); assertThat(savedSites.get(0).getClientId(), equalTo(site1.getClientId())); assertThat(savedSites.get(0).getAccessDate(), equalTo(site1.getAccessDate())); assertThat(savedSites.get(0).getCreationDate(), equalTo(site1.getCreationDate())); assertThat(savedSites.get(0).getAllowedScopes(), equalTo(site1.getAllowedScopes())); assertThat(savedSites.get(0).getTimeoutDate(), equalTo(site1.getTimeoutDate())); assertThat(savedSites.get(1).getClientId(), equalTo(site2.getClientId())); assertThat(savedSites.get(1).getAccessDate(), equalTo(site2.getAccessDate())); assertThat(savedSites.get(1).getCreationDate(), equalTo(site2.getCreationDate())); assertThat(savedSites.get(1).getAllowedScopes(), equalTo(site2.getAllowedScopes())); assertThat(savedSites.get(1).getTimeoutDate(), equalTo(site2.getTimeoutDate())); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } try { try { for(final DavResource resource : this.list(file)) { if(resource.isDirectory()) { if(!file.getType().contains(Path.Type.directory)) { throw new NotfoundException(String.format("File %s has set MIME type %s", file.getAbsolute(), DavResource.HTTPD_UNIX_DIRECTORY_CONTENT_TYPE)); } } else { if(!file.getType().contains(Path.Type.file)) { throw new NotfoundException(String.format("File %s has set MIME type %s", file.getAbsolute(), resource.getContentType())); } } return this.toAttributes(resource); } throw new NotfoundException(file.getAbsolute()); } catch(SardineException e) { try { throw new DAVExceptionMappingService().map("Failure to read attributes of {0}", e, file); } catch(InteroperabilityException | ConflictException i) { // PROPFIND Method not allowed if(log.isWarnEnabled()) { log.warn(String.format("Failure with PROPFIND request for %s. %s", file, i.getMessage())); } final PathAttributes attr = this.head(file); if(PathAttributes.EMPTY == attr) { throw i; } return attr; } } } catch(SardineException e) { throw new DAVExceptionMappingService().map("Failure to read attributes of {0}", e, file); } catch(IOException e) { throw new HttpExceptionMappingService().map(e, file); } }
@Test public void testFindLock() throws Exception { final Path test = new DAVTouchFeature(session).touch(new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final DAVAttributesFinderFeature f = new DAVAttributesFinderFeature(session); assertNull(f.find(test).getLockId()); try { final String lockId = new DAVLockFeature(session).lock(test); assertNotNull(f.find(test).getLockId()); new DAVLockFeature(session).unlock(test, lockId); } catch(InteroperabilityException e) { // No lock support } new DAVDeleteFeature(session).delete(Collections.singletonList(test), new DisabledPasswordCallback(), new Delete.DisabledCallback()); }
public Response filter(FilterableRequestSpecification requestSpec, FilterableResponseSpecification responseSpec, FilterContext ctx) { final CookieOrigin cookieOrigin = cookieOriginFromUri(requestSpec.getURI()); for (Cookie cookie : cookieStore.getCookies()) { if (cookieSpec.match(cookie, cookieOrigin) && allowMultipleCookiesWithTheSameNameOrCookieNotPreviouslyDefined(requestSpec, cookie)) { requestSpec.cookie(cookie.getName(), cookie.getValue()); } } final Response response = ctx.next(requestSpec, responseSpec); List<Cookie> responseCookies = extractResponseCookies(response, cookieOrigin); cookieStore.addCookies(responseCookies.toArray(new Cookie[0])); return response; }
@Test public void preserveCookies() { reqOriginDomain.cookie("cookieName1", "cookieInitialValue"); cookieFilter.filter((FilterableRequestSpecification) given(), response, testFilterContext); cookieFilter.filter(reqOriginDomain, response, testFilterContext); assertThat(reqOriginDomain.getCookies().size(), Matchers.is(2)); assertThat(reqOriginDomain.getCookies().hasCookieWithName("cookieName1"), Matchers.is(true)); assertThat(reqOriginDomain.getCookies().getValue("cookieName1"), Matchers.is("cookieInitialValue")); assertThat(reqOriginDomain.getCookies().hasCookieWithName("cookieName2"), Matchers.is(true)); assertThat(reqOriginDomain.getCookies().getValue("cookieName2"), Matchers.is("cookieValue2")); }
public void startThreads() throws KettleException { // Now prepare to start all the threads... // nrOfFinishedSteps = 0; nrOfActiveSteps = 0; ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStartThreads.id, this ); fireTransStartedListeners(); for ( int i = 0; i < steps.size(); i++ ) { final StepMetaDataCombi sid = steps.get( i ); sid.step.markStart(); sid.step.initBeforeStart(); // also attach a Step Listener to detect when we're done... // StepListener stepListener = new StepListener() { @Override public void stepActive( Trans trans, StepMeta stepMeta, StepInterface step ) { nrOfActiveSteps++; if ( nrOfActiveSteps == 1 ) { // Transformation goes from in-active to active... // PDI-5229 sync added synchronized ( transListeners ) { for ( TransListener listener : transListeners ) { listener.transActive( Trans.this ); } } } } @Override public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) { synchronized ( Trans.this ) { nrOfFinishedSteps++; if ( nrOfFinishedSteps >= steps.size() ) { // Set the finished flag // setFinished( true ); // Grab the performance statistics one last time (if enabled) // addStepPerformanceSnapShot(); try { fireTransFinishedListeners(); } catch ( Exception e ) { step.setErrors( step.getErrors() + 1L ); log.logError( getName() + " : " + BaseMessages.getString( PKG, "Trans.Log.UnexpectedErrorAtTransformationEnd" ), e ); } } // If a step fails with an error, we want to kill/stop the others // too... // if ( step.getErrors() > 0 ) { log.logMinimal( BaseMessages.getString( PKG, "Trans.Log.TransformationDetectedErrors" ) ); log.logMinimal( BaseMessages.getString( PKG, "Trans.Log.TransformationIsKillingTheOtherSteps" ) ); killAllNoWait(); } } } }; // Make sure this is called first! // if ( sid.step instanceof BaseStep ) { ( (BaseStep) sid.step ).getStepListeners().add( 0, stepListener ); } else { sid.step.addStepListener( stepListener ); } } if ( transMeta.isCapturingStepPerformanceSnapShots() ) { stepPerformanceSnapshotSeqNr = new AtomicInteger( 0 ); stepPerformanceSnapShots = new ConcurrentHashMap<>(); // Calculate the maximum number of snapshots to be kept in memory // String limitString = environmentSubstitute( transMeta.getStepPerformanceCapturingSizeLimit() ); if ( Utils.isEmpty( limitString ) ) { limitString = EnvUtil.getSystemProperty( Const.KETTLE_STEP_PERFORMANCE_SNAPSHOT_LIMIT ); } stepPerformanceSnapshotSizeLimit = Const.toInt( limitString, 0 ); // Set a timer to collect the performance data from the running threads... // stepPerformanceSnapShotTimer = new Timer( "stepPerformanceSnapShot Timer: " + transMeta.getName() ); TimerTask timerTask = new TimerTask() { @Override public void run() { if ( !isFinished() ) { addStepPerformanceSnapShot(); } } }; stepPerformanceSnapShotTimer.schedule( timerTask, 100, transMeta.getStepPerformanceCapturingDelay() ); } // Now start a thread to monitor the running transformation... // setFinished( false ); setPaused( false ); setStopped( false ); transFinishedBlockingQueue = new ArrayBlockingQueue<>( TRANS_FINISHED_BLOCKING_QUEUE_SIZE ); TransListener transListener = new TransAdapter() { @Override public void transFinished( Trans trans ) { try { shutdownHeartbeat( trans != null ? trans.heartbeat : null ); if ( trans != null && transMeta.getParent() == null && trans.parentJob == null && trans.parentTrans == null ) { if ( log.isDetailed() && transMeta.getMetaFileCache() != null ) { transMeta.getMetaFileCache().logCacheSummary( log ); } transMeta.setMetaFileCache( null ); } ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationFinish.id, trans ); } catch ( KettleException e ) { throw new RuntimeException( "Error calling extension point at end of transformation", e ); } // First of all, stop the performance snapshot timer if there is is // one... // if ( transMeta.isCapturingStepPerformanceSnapShots() && stepPerformanceSnapShotTimer != null ) { stepPerformanceSnapShotTimer.cancel(); } transMeta.disposeEmbeddedMetastoreProvider(); setFinished( true ); setRunning( false ); // no longer running log.snap( Metrics.METRIC_TRANSFORMATION_EXECUTION_STOP ); // If the user ran with metrics gathering enabled and a metrics logging table is configured, add another // listener... // MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable(); if ( metricsLogTable.isDefined() ) { try { writeMetricsInformation(); } catch ( Exception e ) { log.logError( "Error writing metrics information", e ); errors.incrementAndGet(); } } // Close the unique connections when running database transactionally. // This will commit or roll back the transaction based on the result of this transformation. // if ( transMeta.isUsingUniqueConnections() ) { trans.closeUniqueDatabaseConnections( getResult() ); } // release unused vfs connections KettleVFS.freeUnusedResources(); } }; // This should always be done first so that the other listeners achieve a clean state to start from (setFinished and // so on) // transListeners.add( 0, transListener ); setRunning( true ); switch ( transMeta.getTransformationType() ) { case Normal: // Now start all the threads... // for ( int i = 0; i < steps.size(); i++ ) { final StepMetaDataCombi combi = steps.get( i ); RunThread runThread = new RunThread( combi ); Thread thread = new Thread( runThread ); thread.setName( getName() + " - " + combi.stepname ); ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepBeforeStart.id, combi ); // Call an extension point at the end of the step // combi.step.addStepListener( new StepAdapter() { @Override public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) { try { ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepFinished.id, combi ); } catch ( KettleException e ) { throw new RuntimeException( "Unexpected error in calling extension point upon step finish", e ); } } } ); thread.start(); } break; case SerialSingleThreaded: new Thread( new Runnable() { @Override public void run() { try { // Always disable thread priority management, it will always slow us // down... // for ( StepMetaDataCombi combi : steps ) { combi.step.setUsingThreadPriorityManagment( false ); } // // This is a single threaded version... // // Sort the steps from start to finish... // Collections.sort( steps, new Comparator<StepMetaDataCombi>() { @Override public int compare( StepMetaDataCombi c1, StepMetaDataCombi c2 ) { boolean c1BeforeC2 = transMeta.findPrevious( c2.stepMeta, c1.stepMeta ); if ( c1BeforeC2 ) { return -1; } else { return 1; } } } ); boolean[] stepDone = new boolean[ steps.size() ]; int nrDone = 0; while ( nrDone < steps.size() && !isStopped() ) { for ( int i = 0; i < steps.size() && !isStopped(); i++ ) { StepMetaDataCombi combi = steps.get( i ); if ( !stepDone[ i ] ) { boolean cont = combi.step.processRow( combi.meta, combi.data ); if ( !cont ) { stepDone[ i ] = true; nrDone++; } } } } } catch ( Exception e ) { errors.addAndGet( 1 ); log.logError( "Error executing single threaded", e ); } finally { for ( StepMetaDataCombi combi : steps ) { combi.step.dispose( combi.meta, combi.data ); combi.step.markStop(); } } } } ).start(); break; case SingleThreaded: // Don't do anything, this needs to be handled by the transformation // executor! // break; default: break; } ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStart.id, this ); heartbeat = startHeartbeat( getHeartbeatIntervalInSeconds() ); if ( steps.isEmpty() ) { fireTransFinishedListeners(); } if ( log.isDetailed() ) { log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationHasAllocated", String.valueOf( steps .size() ), String.valueOf( rowsets.size() ) ) ); } }
@Test public void testTransFinishListenersConcurrentModification() throws Exception { CountDownLatch start = new CountDownLatch( 1 ); TransFinishListenerAdder add = new TransFinishListenerAdder( trans, start ); TransFinishListenerFirer firer = new TransFinishListenerFirer( trans, start ); startThreads( add, firer, start ); assertEquals( "All listeners are added: no ConcurrentModificationException", count, add.c ); assertEquals( "All Finish listeners are iterated over: no ConcurrentModificationException", count, firer.c ); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { if (!(statement.getStatement() instanceof CreateSource) && !(statement.getStatement() instanceof CreateAsSelect)) { return statement; } try { if (statement.getStatement() instanceof CreateSource) { final ConfiguredStatement<CreateSource> createStatement = (ConfiguredStatement<CreateSource>) statement; return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement); } else { final ConfiguredStatement<CreateAsSelect> createStatement = (ConfiguredStatement<CreateAsSelect>) statement; return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse( createStatement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } }
@Test public void shouldInjectValueAndMaintainKeyColumnsForCt() { // Given: givenValueButNotKeyInferenceSupported(); when(ct.getElements()).thenReturn(SOME_KEY_ELEMENTS_TABLE); // When: final ConfiguredStatement<CreateTable> result = injector.inject(ctStatement); // Then: assertThat(result.getStatement().getElements(), is(combineElements(SOME_KEY_ELEMENTS_TABLE, INFERRED_KSQL_VALUE_SCHEMA))); assertThat(result.getMaskedStatementText(), is( "CREATE TABLE `ct` (" + "`bob` STRING PRIMARY KEY, " + "`intField` INTEGER, " + "`bigIntField` BIGINT, " + "`doubleField` DOUBLE, " + "`stringField` STRING, " + "`booleanField` BOOLEAN, " + "`arrayField` ARRAY<INTEGER>, " + "`mapField` MAP<STRING, BIGINT>, " + "`structField` STRUCT<`s0` BIGINT>, " + "`decimalField` DECIMAL(4, 2)) " + "WITH (KAFKA_TOPIC='some-topic', KEY_FORMAT='kafka', VALUE_FORMAT='json_sr');" )); }
@Override public void preAction(WebService.Action action, Request request) { Level logLevel = getLogLevel(); String deprecatedSinceEndpoint = action.deprecatedSince(); if (deprecatedSinceEndpoint != null) { logWebServiceMessage(logLevel, deprecatedSinceEndpoint); } action.params().forEach(param -> logParamMessage(request, logLevel, param)); }
@Test public void preAction_whenNewParamWithDeprecatedKeyIsUsed_shouldLogNothing() { WebService.Action action = mock(WebService.Action.class); when(action.deprecatedSince()).thenReturn(null); WebService.Param mockParam = mock(WebService.Param.class); when(mockParam.key()).thenReturn("sansTop25New"); when(mockParam.deprecatedSince()).thenReturn(null); when(mockParam.deprecatedKeySince()).thenReturn("9.7"); when(mockParam.deprecatedKey()).thenReturn("sansTop25"); when(action.params()).thenReturn(List.of(mockParam)); Request request = mock(Request.class); when(request.hasParam("sansTop25New")).thenReturn(true); when(request.hasParam("sansTop25")).thenReturn(false); underTest.preAction(action, request); verifyNoDeprecatedMsgInLogs(Level.DEBUG); verifyNoDeprecatedMsgInLogs(Level.WARN); }
public static void registerHook(TransactionHook transactionHook) { if (transactionHook == null) { throw new NullPointerException("transactionHook must not be null"); } List<TransactionHook> transactionHooks = LOCAL_HOOKS.get(); if (transactionHooks == null) { LOCAL_HOOKS.set(new ArrayList<>()); } LOCAL_HOOKS.get().add(transactionHook); }
@Test public void testNPE() { Assertions.assertThrows(NullPointerException.class, () -> TransactionHookManager.registerHook(null)); }
Configuration getEffectiveConfiguration(String[] args) throws CliArgsException { final CommandLine commandLine = cli.parseCommandLineOptions(args, true); final Configuration effectiveConfiguration = new Configuration(baseConfiguration); effectiveConfiguration.addAll(cli.toConfiguration(commandLine)); effectiveConfiguration.set(DeploymentOptions.TARGET, KubernetesSessionClusterExecutor.NAME); return effectiveConfiguration; }
@Test void testHeapMemoryPropertyWithUnitMB() throws Exception { final String[] args = new String[] { "-e", KubernetesSessionClusterExecutor.NAME, "-D" + JobManagerOptions.TOTAL_PROCESS_MEMORY.key() + "=1024m", "-D" + TaskManagerOptions.TOTAL_PROCESS_MEMORY.key() + "=2048m" }; final KubernetesSessionCli cli = createFlinkKubernetesCustomCliWithJmAndTmTotalMemory(1024); final Configuration executorConfig = cli.getEffectiveConfiguration(args); final ClusterClientFactory<String> clientFactory = getClusterClientFactory(executorConfig); final ClusterSpecification clusterSpecification = clientFactory.getClusterSpecification(executorConfig); assertThat(clusterSpecification.getMasterMemoryMB()).isEqualTo(1024); assertThat(clusterSpecification.getTaskManagerMemoryMB()).isEqualTo(2048); }
@Override public String getMethod() { return PATH; }
@Test public void testGetChatMenuButtonForChat() { SetChatMenuButton setChatMenuButton = SetChatMenuButton .builder() .chatId("123456") .menuButton(MenuButtonDefault.builder().build()) .build(); assertEquals("setChatMenuButton", setChatMenuButton.getMethod()); assertDoesNotThrow(setChatMenuButton::validate); }
@Override public Column convert(BasicTypeDefine typeDefine) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .sourceType(typeDefine.getColumnType()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String xuguDataType = typeDefine.getDataType().toUpperCase(); switch (xuguDataType) { case XUGU_BOOLEAN: case XUGU_BOOL: builder.dataType(BasicType.BOOLEAN_TYPE); break; case XUGU_TINYINT: builder.dataType(BasicType.BYTE_TYPE); break; case XUGU_SMALLINT: builder.dataType(BasicType.SHORT_TYPE); break; case XUGU_INT: case XUGU_INTEGER: builder.dataType(BasicType.INT_TYPE); break; case XUGU_BIGINT: builder.dataType(BasicType.LONG_TYPE); break; case XUGU_FLOAT: builder.dataType(BasicType.FLOAT_TYPE); break; case XUGU_DOUBLE: builder.dataType(BasicType.DOUBLE_TYPE); break; case XUGU_NUMBER: case XUGU_DECIMAL: case XUGU_NUMERIC: DecimalType decimalType; if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) { decimalType = new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale()); } else { decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE); } builder.dataType(decimalType); builder.columnLength(Long.valueOf(decimalType.getPrecision())); builder.scale(decimalType.getScale()); break; case XUGU_CHAR: case XUGU_NCHAR: builder.dataType(BasicType.STRING_TYPE); if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L)); } else { builder.columnLength(typeDefine.getLength()); } break; case XUGU_VARCHAR: case XUGU_VARCHAR2: builder.dataType(BasicType.STRING_TYPE); if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(TypeDefineUtils.charTo4ByteLength(MAX_VARCHAR_LENGTH)); } else { builder.columnLength(typeDefine.getLength()); } break; case XUGU_CLOB: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(BYTES_2GB - 1); break; case XUGU_JSON: case XUGU_GUID: builder.dataType(BasicType.STRING_TYPE); break; case XUGU_BINARY: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(MAX_BINARY_LENGTH); break; case XUGU_BLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(BYTES_2GB - 1); break; case XUGU_DATE: builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); break; case XUGU_TIME: case XUGU_TIME_WITH_TIME_ZONE: builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); break; case XUGU_DATETIME: case XUGU_DATETIME_WITH_TIME_ZONE: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); break; case XUGU_TIMESTAMP: case XUGU_TIMESTAMP_WITH_TIME_ZONE: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); if (typeDefine.getScale() == null) { builder.scale(TIMESTAMP_DEFAULT_SCALE); } else { builder.scale(typeDefine.getScale()); } break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.XUGU, xuguDataType, typeDefine.getName()); } return builder.build(); }
@Test public void testConvertUnsupported() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder().name("test").columnType("aaa").dataType("aaa").build(); try { XuguTypeConverter.INSTANCE.convert(typeDefine); Assertions.fail(); } catch (SeaTunnelRuntimeException e) { // ignore } catch (Throwable e) { Assertions.fail(); } }
static Map<String, Object> appendSerializerToConfig(Map<String, Object> configs, Serializer<?> keySerializer, Serializer<?> valueSerializer) { // validate serializer configuration, if the passed serializer instance is null, the user must explicitly set a valid serializer configuration value Map<String, Object> newConfigs = new HashMap<>(configs); if (keySerializer != null) newConfigs.put(KEY_SERIALIZER_CLASS_CONFIG, keySerializer.getClass()); else if (newConfigs.get(KEY_SERIALIZER_CLASS_CONFIG) == null) throw new ConfigException(KEY_SERIALIZER_CLASS_CONFIG, null, "must be non-null."); if (valueSerializer != null) newConfigs.put(VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer.getClass()); else if (newConfigs.get(VALUE_SERIALIZER_CLASS_CONFIG) == null) throw new ConfigException(VALUE_SERIALIZER_CLASS_CONFIG, null, "must be non-null."); return newConfigs; }
@Test public void testAppendSerializerToConfig() { Map<String, Object> configs = new HashMap<>(); configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); Map<String, Object> newConfigs = ProducerConfig.appendSerializerToConfig(configs, null, null); assertEquals(newConfigs.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG), keySerializerClass); assertEquals(newConfigs.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG), valueSerializerClass); configs.clear(); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); newConfigs = ProducerConfig.appendSerializerToConfig(configs, keySerializer, null); assertEquals(newConfigs.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG), keySerializerClass); assertEquals(newConfigs.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG), valueSerializerClass); configs.clear(); configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); newConfigs = ProducerConfig.appendSerializerToConfig(configs, null, valueSerializer); assertEquals(newConfigs.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG), keySerializerClass); assertEquals(newConfigs.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG), valueSerializerClass); configs.clear(); newConfigs = ProducerConfig.appendSerializerToConfig(configs, keySerializer, valueSerializer); assertEquals(newConfigs.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG), keySerializerClass); assertEquals(newConfigs.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG), valueSerializerClass); }
@Override public short readShort(@Nonnull String fieldName) throws IOException { FieldDefinition fd = cd.getField(fieldName); if (fd == null) { return 0; } switch (fd.getType()) { case SHORT: return super.readShort(fieldName); case BYTE: return super.readByte(fieldName); default: throw createIncompatibleClassChangeError(fd, SHORT); } }
@Test(expected = IncompatibleClassChangeError.class) public void testReadShort_IncompatibleClass() throws Exception { reader.readShort("string"); }
public void mergeWith(RuntimeMetric metric) { if (metric == null) { return; } checkState(unit == metric.getUnit(), "The metric to be merged must have the same unit type as the current one."); sum.addAndGet(metric.getSum()); count.addAndGet(metric.getCount()); max.accumulateAndGet(metric.getMax(), Math::max); min.accumulateAndGet(metric.getMin(), Math::min); }
@Test(expectedExceptions = {IllegalStateException.class}) public void testMergeWithWithConflictUnits() { RuntimeMetric metric1 = new RuntimeMetric(TEST_METRIC_NAME, NANO, 5, 2, 4, 1); RuntimeMetric metric2 = new RuntimeMetric(TEST_METRIC_NAME, BYTE, 20, 2, 11, 9); metric1.mergeWith(metric2); }
public static boolean canManageEfestoInput(EfestoInput toEvaluate, EfestoRuntimeContext runtimeContext) { return isPresentExecutableOrRedirect(toEvaluate.getModelLocalUriId(), runtimeContext); }
@Test void canManageEfestoInput() { modelLocalUriId = getModelLocalUriIdFromPmmlIdFactory(FILE_NAME, MODEL_NAME); EfestoRuntimeContext runtimeContext = EfestoRuntimeContextUtils.buildWithParentClassLoader(Thread.currentThread().getContextClassLoader()); PMMLRequestData pmmlRequestData = new PMMLRequestData(); EfestoInput<PMMLRequestData> inputPMML = new BaseEfestoInput<>(modelLocalUriId, pmmlRequestData); assertThat(PMMLRuntimeHelper.canManageEfestoInput(inputPMML, runtimeContext)).isTrue(); }
public static TableElements of(final TableElement... elements) { return new TableElements(ImmutableList.copyOf(elements)); }
@Test public void shouldThrowOnDuplicateValueColumns() { // Given: final List<TableElement> elements = ImmutableList.of( tableElement("v0", INT_TYPE), tableElement("v0", INT_TYPE), tableElement("v1", INT_TYPE), tableElement("v1", INT_TYPE) ); // When: final Exception e = assertThrows( KsqlException.class, () -> TableElements.of(elements) ); // Then: assertThat(e.getMessage(), containsString( "Duplicate column names:")); assertThat(e.getMessage(), containsString( "v0")); assertThat(e.getMessage(), containsString( "v1")); }
@Override public <T> Optional<T> valueAs(Class<T> type) { checkNotNull(type); if (type == Object.class || type == double.class || type == Double.class) { @SuppressWarnings("unchecked") T value = (T) Double.valueOf(this.value); return Optional.of(value); } return Optional.empty(); }
@Test public void testValueAsDouble() { ContinuousResource resource = Resources.continuous(D1, P1, Bandwidth.class) .resource(BW1.bps()); Optional<Double> value = resource.valueAs(Double.class); assertThat(value.get(), is(BW1.bps())); }
@Override public void addPluginChangeListener(PluginChangeListener pluginChangeListener) { pluginLoader.addPluginChangeListener(pluginChangeListener); }
@Test void shouldAllowRegistrationOfPluginChangeListeners() { PluginManager pluginManager = new DefaultPluginManager(monitor, registry, goPluginOSGiFramework, jarChangeListener, null, systemEnvironment, pluginLoader); final PluginChangeListener pluginChangeListener = mock(PluginChangeListener.class); pluginManager.addPluginChangeListener(pluginChangeListener); verify(pluginLoader).addPluginChangeListener(pluginChangeListener); }
@Override public void getConfig(ZookeeperServerConfig.Builder builder) { ConfigServer[] configServers = getConfigServers(); int[] zookeeperIds = getConfigServerZookeeperIds(); if (configServers.length != zookeeperIds.length) { throw new IllegalArgumentException(String.format("Number of provided config server hosts (%d) must be the " + "same as number of provided config server zookeeper ids (%d)", configServers.length, zookeeperIds.length)); } String myhostname = HostName.getLocalhost(); // TODO: Server index should be in interval [1, 254] according to doc, // however, we cannot change this id for an existing server for (int i = 0; i < configServers.length; i++) { if (zookeeperIds[i] < 0) { throw new IllegalArgumentException(String.format("Zookeeper ids cannot be negative, was %d for %s", zookeeperIds[i], configServers[i].hostName)); } if (configServers[i].hostName.equals(myhostname)) { builder.myid(zookeeperIds[i]); } builder.server(getZkServer(configServers[i], zookeeperIds[i])); } if (options.zookeeperClientPort().isPresent()) { builder.clientPort(options.zookeeperClientPort().get()); } if (options.hostedVespa().orElse(false)) { builder.vespaTlsConfigFile(Defaults.getDefaults().underVespaHome("var/zookeeper/conf/tls.conf.json")); } boolean isHostedVespa = options.hostedVespa().orElse(false); builder.dynamicReconfiguration(isHostedVespa); builder.reconfigureEnsemble(!isHostedVespa); builder.snapshotMethod(options.zooKeeperSnapshotMethod()); builder.juteMaxBuffer(options.zookeeperJuteMaxBuffer()); }
@Test void testHealthMonitorConfig() { HealthMonitorConfig config = getConfig(HealthMonitorConfig.class); assertEquals(60, (int) config.snapshot_interval()); }
@Override public V load(K key) { awaitSuccessfulInit(); try (SqlResult queryResult = sqlService.execute(queries.load(), key)) { Iterator<SqlRow> it = queryResult.iterator(); V value = null; if (it.hasNext()) { SqlRow sqlRow = it.next(); if (it.hasNext()) { throw new IllegalStateException("multiple matching rows for a key " + key); } // If there is a single column as the value, return that column as the value if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) { value = sqlRow.getObject(1); } else { //noinspection unchecked value = (V) toGenericRecord(sqlRow, genericMapStoreProperties); } } return value; } }
@Test public void givenRow_whenLoad_thenReturnGenericRecord() { ObjectSpec spec = objectProvider.createObject(mapName, false); objectProvider.insertItems(spec, 1); mapLoader = createMapLoader(); GenericRecord genericRecord = mapLoader.load(0); assertThat(genericRecord.getInt32("id")).isZero(); assertThat(genericRecord.getString("name")).isEqualTo("name-0"); }
public PutObjectResult putObject(String key, String localFilePath) { PutObjectRequest putObjectRequest = new PutObjectRequest(cosClientConfig.getBucket(), key, new File(localFilePath)); return cosClient.putObject(putObjectRequest); }
@Test void putObject() { cosManager.putObject("test", "test.json"); }
public static Metadata fromJson(Reader reader) throws IOException { Metadata m = null; try (JsonParser jParser = new JsonFactory() .setStreamReadConstraints(StreamReadConstraints .builder() .maxStringLength(TikaConfig.getMaxJsonStringFieldLength()) .build()) .createParser(CloseShieldReader.wrap(reader))) { m = readMetadataObject(jParser); } return m; }
@Test public void testDeserializationException() { //malformed json; 500,000 should be in quotes String json = "{\"k1\":[\"v1\",\"v2\"],\"k3\":\"v3\",\"k4\":500,000}"; boolean ex = false; try { Metadata deserialized = JsonMetadata.fromJson(new StringReader(json)); } catch (IOException e) { ex = true; } assertTrue(ex); }
@Override public Long createPost(PostSaveReqVO createReqVO) { // 校验正确性 validatePostForCreateOrUpdate(null, createReqVO.getName(), createReqVO.getCode()); // 插入岗位 PostDO post = BeanUtils.toBean(createReqVO, PostDO.class); postMapper.insert(post); return post.getId(); }
@Test public void testValidatePost_nameDuplicateForCreate() { // mock 数据 PostDO postDO = randomPostDO(); postMapper.insert(postDO);// @Sql: 先插入出一条存在的数据 // 准备参数 PostSaveReqVO reqVO = randomPojo(PostSaveReqVO.class, // 模拟 name 重复 o -> o.setName(postDO.getName())); assertServiceException(() -> postService.createPost(reqVO), POST_NAME_DUPLICATE); }
@Override public String asJson() { //use the static Gson to prevent frequent use of expensive java.lang.reflect calls String json = GSON_CONVERTER.toJson(this); // Adjust the JSON string to remove all breaklines between entries in the histogram array int startIndex = json.indexOf('[') + 1; int endIndex = json.indexOf(']'); String histString = json.substring(startIndex, endIndex); String histStringWithoutBreaklines = histString.replace("\n ", "").trim(); return json.replace(histString, histStringWithoutBreaklines); }
@Test public void convertDailyMetricsToJson() { DailyMetrics dailyMetrics = testExampleAsObject(); assertEquals(dailyMetrics.asJson(), testExampleAsJson()); }
public List<KsqlNode> locate() { final List<PersistentQueryMetadata> currentQueries = allPersistentQueries.get(); if (currentQueries.isEmpty()) { return Collections.emptyList(); } return currentQueries.stream() .map(QueryMetadata::getAllStreamsHostMetadata) .filter(Objects::nonNull) .flatMap(Collection::stream) .map(StreamsMetadata::hostInfo) .map(hi -> new Node(isLocalhost(hi), buildLocation(hi))) .distinct() .collect(Collectors.toList()); }
@Test public void shouldLocate() throws MalformedURLException { // Given: final AllHostsLocator locator = new AllHostsLocator( () -> ImmutableList.of(metadata1, metadata2), new URL("http://localhost:8088") ); when(metadata1.getAllStreamsHostMetadata()) .thenReturn(ImmutableList.of(streamsMetadata1, streamsMetadata2)); when(metadata2.getAllStreamsHostMetadata()) .thenReturn(ImmutableList.of(streamsMetadata3)); when(streamsMetadata1.hostInfo()) .thenReturn(new HostInfo("abc", 101), new HostInfo("localhost", 8088)); when(streamsMetadata2.hostInfo()).thenReturn(new HostInfo("localhost", 8088)); when(streamsMetadata3.hostInfo()).thenReturn(new HostInfo("localhost", 8089)); // When: final List<KsqlNode> nodes = ImmutableList.copyOf(locator.locate()); // Then: assertThat(nodes.size(), is(3)); assertThat(nodes.get(0).isLocal(), is(false)); assertThat(nodes.get(0).location().toString(), is("http://abc:101/")); assertThat(nodes.get(1).isLocal(), is(true)); assertThat(nodes.get(1).location().toString(), is("http://localhost:8088/")); assertThat(nodes.get(2).isLocal(), is(false)); assertThat(nodes.get(2).location().toString(), is("http://localhost:8089/")); }
@Override public Path find() throws BackgroundException { try { // "." as referring to the current directory final String directory = session.sftp().canonicalize("."); return new Path(PathNormalizer.normalize(directory), directory.equals(String.valueOf(Path.DELIMITER)) ? EnumSet.of(Path.Type.volume, Path.Type.directory) : EnumSet.of(Path.Type.directory)); } catch(IOException e) { throw new SFTPExceptionMappingService().map(e); } }
@Test public void testFind() throws Exception { assertEquals(new Path("/", EnumSet.of(Path.Type.directory)), new SFTPHomeDirectoryService(session).find()); }
public String cleanupDwgString(String dwgString) { String cleanString = dwgString; StringBuilder sb = new StringBuilder(); //Strip off start/stop underline/overstrike/strike throughs Matcher m = Pattern.compile(underlineStrikeThrough).matcher(cleanString); while (m.find()) { if (! m.group(1).endsWith("\\")) { m.appendReplacement(sb, ""); } } m.appendTail(sb); cleanString = sb.toString(); //Strip off semi-colon ended markers m = Pattern.compile(endMarks).matcher(cleanString); sb.setLength(0); while (m.find()) { if (! m.group(1).endsWith("\\")) { m.appendReplacement(sb, ""); } } m.appendTail(sb); cleanString = sb.toString(); //new line marker \\P replace with actual new line m = Pattern.compile(newLine).matcher(cleanString); sb.setLength(0); while (m.find()) { if (m.group(1).endsWith("P")) { m.appendReplacement(sb, "\n"); } } m.appendTail(sb); cleanString = sb.toString(); //stacking fractions m = Pattern.compile(stackFrac).matcher(cleanString); sb.setLength(0); while (m.find()) { if (m.group(1) == null) { m.appendReplacement(sb, m.group(2) + "/" + m.group(3)); } } m.appendTail(sb); cleanString = sb.toString(); //strip brackets around text, make sure they aren't escaped m = Pattern.compile(curlyBraces).matcher(cleanString); sb.setLength(0); while (m.find()) { if (m.group(1) == null) { m.appendReplacement(sb, ""); } } m.appendTail(sb); cleanString = sb.toString(); //now get rid of escape characters cleanString = cleanString.replaceAll(escapeChars, ""); //now unescape backslash cleanString = cleanString.replaceAll("(\\\\\\\\)", "\\\\"); return cleanString; }
@Test public void testEscapedSlashes() { String formatted = "the quick \\\\ \\A3;\\fAIGDT|b0|i0;\\H2.5000;brown fox"; DWGReadFormatRemover dwgReadFormatter = new DWGReadFormatRemover(); String expected = "the quick \\ brown fox"; assertEquals(expected, dwgReadFormatter.cleanupDwgString(formatted)); }
@Override public PluginWrapper getPlugin(String pluginId) { if (currentPluginId.equals(pluginId)) { return original.getPlugin(pluginId); } else { throw new IllegalAccessError(PLUGIN_PREFIX + currentPluginId + " tried to execute getPlugin for foreign pluginId!"); } }
@Test public void getPlugin() { pluginManager.loadPlugins(); assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.getPlugin(OTHER_PLUGIN_ID)); assertEquals(THIS_PLUGIN_ID, wrappedPluginManager.getPlugin(THIS_PLUGIN_ID).getPluginId()); }
@Override public LongMinimum clone() { LongMinimum clone = new LongMinimum(); clone.min = this.min; return clone; }
@Test void testClone() { LongMinimum min = new LongMinimum(); long value = 4242424242424242L; min.add(value); LongMinimum clone = min.clone(); assertThat(clone.getLocalValue().longValue()).isEqualTo(value); }
@Transactional public MeetingConfirmResponse create(String uuid, long attendeeId, MeetingConfirmRequest request) { LocalDateTime startDateTime = request.toStartDateTime(); LocalDateTime endDateTime = request.toEndDateTime(); Meeting meeting = meetingRepository.findByUuid(uuid) .orElseThrow(() -> new MomoException(MeetingErrorCode.INVALID_UUID)); Attendee attendee = attendeeRepository.findByIdAndMeeting(attendeeId, meeting) .orElseThrow(() -> new MomoException(AttendeeErrorCode.INVALID_ATTENDEE)); validateHostPermission(attendee); validateNotAlreadyConfirmed(meeting); validateMeetingLocked(meeting); validateTimeRange(meeting, startDateTime, endDateTime); validateDateRange(meeting, startDateTime, endDateTime); ConfirmedMeeting confirmedMeeting = new ConfirmedMeeting(meeting, startDateTime, endDateTime); confirmedMeetingRepository.save(confirmedMeeting); return MeetingConfirmResponse.from(confirmedMeeting); }
@DisplayName("약속에 존재하지 않는 날짜로 일정을 확정 시 예외가 발생한다.") @Test void confirmScheduleThrowsExceptionWhen_InvalidDate() { LocalDate invalidDate = LocalDate.now().plusDays(30); MeetingConfirmRequest request = new MeetingConfirmRequest( invalidDate, Timeslot.TIME_0100.startTime(), invalidDate, Timeslot.TIME_0130.startTime() ); assertThatThrownBy(() -> meetingConfirmService.create(meeting.getUuid(), attendee.getId(), request)) .isInstanceOf(MomoException.class) .hasMessage(MeetingErrorCode.INVALID_DATETIME_RANGE.message()); }
public static ClusterAllocationDiskSettings create(boolean enabled, String low, String high, String floodStage) { if (!enabled) { return ClusterAllocationDiskSettings.create(enabled, null); } return ClusterAllocationDiskSettings.create(enabled, createWatermarkSettings(low, high, floodStage)); }
@Test public void createPercentageWatermarkSettings() throws Exception { ClusterAllocationDiskSettings settings = ClusterAllocationDiskSettingsFactory.create(true, "75%", "85%", "99%"); assertThat(settings).isInstanceOf(ClusterAllocationDiskSettings.class); assertThat(settings.ThresholdEnabled()).isTrue(); assertThat(settings.watermarkSettings()).isInstanceOf(PercentageWatermarkSettings.class); assertThat(settings.watermarkSettings().type()).isEqualTo(WatermarkSettings.SettingsType.PERCENTAGE); assertThat(settings.watermarkSettings().low()).isEqualTo(75D); assertThat(settings.watermarkSettings().high()).isEqualTo(85D); assertThat(settings.watermarkSettings().floodStage()).isEqualTo(99D); }
@Override public Collection<BlockedNode> removeTimeoutNodes(long currentTimestamp) { Collection<BlockedNode> removedNodes = new ArrayList<>(); final Iterator<BlockedNode> blockedNodeIterator = blockedNodes.values().iterator(); while (blockedNodeIterator.hasNext()) { BlockedNode blockedNode = blockedNodeIterator.next(); if (currentTimestamp >= blockedNode.getEndTimestamp()) { removedNodes.add(blockedNode); blockedNodeIterator.remove(); } } return removedNodes; }
@Test void testRemoveTimeoutNodes() { BlockedNode blockedNode1 = new BlockedNode("node1", "cause1", 1L); BlockedNode blockedNode2 = new BlockedNode("node2", "cause1", 2L); BlockedNode blockedNode3 = new BlockedNode("node3", "cause1", 3L); BlocklistTracker blocklistTracker = new DefaultBlocklistTracker(); blocklistTracker.addNewBlockedNodes( Arrays.asList(blockedNode1, blockedNode2, blockedNode3)); assertThat(blocklistTracker.getAllBlockedNodeIds()) .containsExactlyInAnyOrder("node1", "node2", "node3"); Collection<BlockedNode> removedNodes = blocklistTracker.removeTimeoutNodes(2L); assertThat(removedNodes).containsExactlyInAnyOrder(blockedNode1, blockedNode2); assertThat(blocklistTracker.getAllBlockedNodeIds()).containsExactlyInAnyOrder("node3"); }
public SelType get(int idx) { return val[idx]; }
@Test public void testGet() { assertEquals("STRING: bar", one.get(1).type() + ": " + one.get(1)); }
@VisibleForTesting static DeterminismEnvelope<ResourceID> getTaskManagerResourceID( Configuration config, String rpcAddress, int rpcPort) { final String metadata = config.get(TaskManagerOptionsInternal.TASK_MANAGER_RESOURCE_ID_METADATA, ""); return config.getOptional(TaskManagerOptions.TASK_MANAGER_RESOURCE_ID) .map( value -> DeterminismEnvelope.deterministicValue( new ResourceID(value, metadata))) .orElseGet( FunctionUtils.uncheckedSupplier( () -> { final String hostName = InetAddress.getLocalHost().getHostName(); final String value = StringUtils.isNullOrWhitespaceOnly(rpcAddress) ? hostName + "-" + new AbstractID() .toString() .substring(0, 6) : rpcAddress + ":" + rpcPort + "-" + new AbstractID() .toString() .substring(0, 6); return DeterminismEnvelope.nondeterministicValue( new ResourceID(value, metadata)); })); }
@Test void testGenerateTaskManagerResourceIDWithConfig() throws Exception { final Configuration configuration = createConfiguration(); final String resourceID = "test"; configuration.set(TaskManagerOptions.TASK_MANAGER_RESOURCE_ID, resourceID); final ResourceID taskManagerResourceID = TaskManagerRunner.getTaskManagerResourceID(configuration, "", -1).unwrap(); assertThat(taskManagerResourceID.getResourceIdString()).isEqualTo(resourceID); }
public FEELFnResult<Boolean> invoke(@ParameterName( "range" ) Range range, @ParameterName( "point" ) Comparable point) { if ( point == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null")); } if ( range == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null")); } try { boolean result = (range.getLowEndPoint().compareTo(point) < 0 && range.getHighEndPoint().compareTo(point) > 0) || (range.getLowEndPoint().compareTo(point) == 0 && range.getLowBoundary() == RangeBoundary.CLOSED) || (range.getHighEndPoint().compareTo(point) == 0 && range.getHighBoundary() == RangeBoundary.CLOSED); return FEELFnResult.ofResult( result ); } catch( Exception e ) { // points are not comparable return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range")); } }
@Test void invokeParamIsNull() { FunctionTestUtil.assertResultError(includesFunction.invoke(null, "b"), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(includesFunction.invoke(new RangeImpl(), (Comparable) null), InvalidParametersEvent.class); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { for(Path file : files.keySet()) { if(containerService.isContainer(file)) { continue; } callback.delete(file); if(file.getType().contains(Path.Type.upload)) { new B2LargeUploadPartService(session, fileid).delete(file.attributes().getVersionId()); } else { if(file.isDirectory()) { // Delete /.bzEmpty if any final String placeholder; try { placeholder = fileid.getVersionId(file); } catch(NotfoundException e) { log.warn(String.format("Ignore failure %s deleting placeholder file for %s", e, file)); continue; } if(null == placeholder) { continue; } try { session.getClient().deleteFileVersion(containerService.getKey(file), placeholder); } catch(B2ApiException e) { log.warn(String.format("Ignore failure %s deleting placeholder file for %s", e.getMessage(), file)); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot delete {0}", e, file); } } else if(file.isFile()) { try { if(!versioning.isEnabled() || null == file.attributes().getVersionId()) { // Add hide marker if(log.isDebugEnabled()) { log.debug(String.format("Add hide marker %s of %s", file.attributes().getVersionId(), file)); } try { session.getClient().hideFile(fileid.getVersionId(containerService.getContainer(file)), containerService.getKey(file)); } catch(B2ApiException e) { if("already_hidden".equalsIgnoreCase(e.getCode())) { log.warn(String.format("Ignore failure %s hiding file %s already hidden", e.getMessage(), file)); } else { throw e; } } } else { // Delete specific version if(log.isDebugEnabled()) { log.debug(String.format("Delete version %s of %s", file.attributes().getVersionId(), file)); } session.getClient().deleteFileVersion(containerService.getKey(file), file.attributes().getVersionId()); } } catch(B2ApiException e) { throw new B2ExceptionMappingService(fileid).map("Cannot delete {0}", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot delete {0}", e, file); } } fileid.cache(file, null); } } for(Path file : files.keySet()) { try { if(containerService.isContainer(file)) { callback.delete(file); // Finally delete bucket itself session.getClient().deleteBucket(fileid.getVersionId(file)); } } catch(B2ApiException e) { throw new B2ExceptionMappingService(fileid).map("Cannot delete {0}", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot delete {0}", e, file); } } }
@Test public void testHide() throws Exception { final B2VersionIdProvider fileid = new B2VersionIdProvider(session); final Path bucket = new B2DirectoryFeature(session, fileid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path file = new Path(bucket, String.format("%s %s", new AlphanumericRandomStringService().random(), "1"), EnumSet.of(Path.Type.file)); new B2TouchFeature(session, fileid).touch(file, new TransferStatus()); new B2DeleteFeature(session, fileid).delete(Collections.singletonList(file.withAttributes(PathAttributes.EMPTY)), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new B2FindFeature(session, fileid).find(file)); assertFalse(new DefaultFindFeature(session).find(file)); new B2DeleteFeature(session, fileid).delete(new B2ObjectListService(session, fileid).list(bucket, new DisabledListProgressListener()).toList(), new DisabledLoginCallback(), new Delete.DisabledCallback()); new B2DeleteFeature(session, new B2VersionIdProvider(session)).delete(Collections.singletonList(bucket), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static ParamType getSchemaFromType(final Type type) { return getSchemaFromType(type, JAVA_TO_ARG_TYPE); }
@Test public void shouldGetStringSchemaFromStructClass() { assertThat( UdfUtil.getSchemaFromType(Struct.class), equalTo(StructType.ANY_STRUCT) ); }
public void add(String element) { addAsync(element).toCompletableFuture().join(); }
@Test public void testAdd() throws Exception { final FaultTolerantRedisCluster redisCluster = REDIS_CLUSTER_EXTENSION.getRedisCluster(); final CardinalityEstimator estimator = new CardinalityEstimator(redisCluster, "test", Duration.ofSeconds(1)); estimator.add("1"); long count = redisCluster.withCluster(conn -> conn.sync().pfcount("cardinality_estimator::test")); assertThat(count).isEqualTo(1).isEqualTo(estimator.estimate()); estimator.add("2"); count = redisCluster.withCluster(conn -> conn.sync().pfcount("cardinality_estimator::test")); assertThat(count).isEqualTo(2).isEqualTo(estimator.estimate()); estimator.add("1"); count = redisCluster.withCluster(conn -> conn.sync().pfcount("cardinality_estimator::test")); assertThat(count).isEqualTo(2).isEqualTo(estimator.estimate()); }
@Override public void validateUserList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return; } // 获得岗位信息 List<AdminUserDO> users = userMapper.selectBatchIds(ids); Map<Long, AdminUserDO> userMap = CollectionUtils.convertMap(users, AdminUserDO::getId); // 校验 ids.forEach(id -> { AdminUserDO user = userMap.get(id); if (user == null) { throw exception(USER_NOT_EXISTS); } if (!CommonStatusEnum.ENABLE.getStatus().equals(user.getStatus())) { throw exception(USER_IS_DISABLE, user.getNickname()); } }); }
@Test public void testValidateUserList_notFound() { // 准备参数 List<Long> ids = singletonList(randomLongId()); // 调用, 并断言异常 assertServiceException(() -> userService.validateUserList(ids), USER_NOT_EXISTS); }
@Description("Normal cdf given a mean, standard deviation, and value") @ScalarFunction @SqlType(StandardTypes.DOUBLE) public static double normalCdf( @SqlType(StandardTypes.DOUBLE) double mean, @SqlType(StandardTypes.DOUBLE) double standardDeviation, @SqlType(StandardTypes.DOUBLE) double value) { checkCondition(standardDeviation > 0, INVALID_FUNCTION_ARGUMENT, "normalCdf Function: standardDeviation must be > 0"); return 0.5 * (1 + Erf.erf((value - mean) / (standardDeviation * Math.sqrt(2)))); }
@Test public void testNormalCdf() { assertFunction("normal_cdf(0, 1, 1.96)", DOUBLE, 0.9750021048517796); assertFunction("normal_cdf(10, 9, 10)", DOUBLE, 0.5); assertFunction("normal_cdf(-1.5, 2.1, -7.8)", DOUBLE, 0.0013498980316301035); assertFunction("normal_cdf(0, 1, infinity())", DOUBLE, 1.0); assertFunction("normal_cdf(0, 1, -infinity())", DOUBLE, 0.0); assertFunction("normal_cdf(infinity(), 1, 0)", DOUBLE, 0.0); assertFunction("normal_cdf(-infinity(), 1, 0)", DOUBLE, 1.0); assertFunction("normal_cdf(0, infinity(), 0)", DOUBLE, 0.5); assertFunction("normal_cdf(nan(), 1, 0)", DOUBLE, Double.NaN); assertFunction("normal_cdf(0, 1, nan())", DOUBLE, Double.NaN); assertInvalidFunction("normal_cdf(0, 0, 0.1985)", "normalCdf Function: standardDeviation must be > 0"); assertInvalidFunction("normal_cdf(0, nan(), 0.1985)", "normalCdf Function: standardDeviation must be > 0"); }
public int incrementAndGet(String ip) { int index = 0; if (ip != null) { index = ip.hashCode() % slotCount; } if (index < 0) { index = -index; } return data[index].incrementAndGet(); }
@Test void testIncrementAndGet() { SimpleIpFlowData simpleIpFlowData = new SimpleIpFlowData(5, 10000); assertEquals(1, simpleIpFlowData.incrementAndGet("127.0.0.1")); assertEquals(2, simpleIpFlowData.incrementAndGet("127.0.0.1")); assertEquals(3, simpleIpFlowData.incrementAndGet("127.0.0.1")); assertEquals(1, simpleIpFlowData.incrementAndGet("127.0.0.2")); assertEquals(2, simpleIpFlowData.incrementAndGet("127.0.0.2")); }
@Override public PageResult<MemberTagDO> getTagPage(MemberTagPageReqVO pageReqVO) { return memberTagMapper.selectPage(pageReqVO); }
@Test public void testGetTagPage() { // mock 数据 MemberTagDO dbTag = randomPojo(MemberTagDO.class, o -> { // 等会查询到 o.setName("test"); o.setCreateTime(buildTime(2023, 2, 18)); }); tagMapper.insert(dbTag); // 测试 name 不匹配 tagMapper.insert(cloneIgnoreId(dbTag, o -> o.setName("ne"))); // 测试 createTime 不匹配 tagMapper.insert(cloneIgnoreId(dbTag, o -> o.setCreateTime(null))); // 准备参数 MemberTagPageReqVO reqVO = new MemberTagPageReqVO(); reqVO.setName("test"); reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28)); // 调用 PageResult<MemberTagDO> pageResult = tagService.getTagPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbTag, pageResult.getList().get(0)); }
public static ShorthandProjectionSegment bind(final ShorthandProjectionSegment segment, final TableSegment boundTableSegment, final Map<String, TableSegmentBinderContext> tableBinderContexts) { ShorthandProjectionSegment result = copy(segment); if (segment.getOwner().isPresent()) { expandVisibleColumns(getProjectionSegmentsByTableAliasOrName(tableBinderContexts, segment.getOwner().get().getIdentifier().getValue()), result); } else { expandNoOwnerProjections(boundTableSegment, tableBinderContexts, result); } return result; }
@Test void assertBindWithOwner() { ShorthandProjectionSegment shorthandProjectionSegment = new ShorthandProjectionSegment(0, 0); shorthandProjectionSegment.setOwner(new OwnerSegment(0, 0, new IdentifierValue("o"))); ColumnProjectionSegment invisibleColumn = new ColumnProjectionSegment(new ColumnSegment(0, 0, new IdentifierValue("status"))); invisibleColumn.setVisible(false); Map<String, TableSegmentBinderContext> tableBinderContexts = Collections.singletonMap( "o", new SimpleTableSegmentBinderContext(Arrays.asList(new ColumnProjectionSegment(new ColumnSegment(0, 0, new IdentifierValue("order_id"))), invisibleColumn))); ShorthandProjectionSegment actual = ShorthandProjectionSegmentBinder.bind(shorthandProjectionSegment, mock(TableSegment.class), tableBinderContexts); assertThat(actual.getActualProjectionSegments().size(), is(1)); ProjectionSegment visibleColumn = actual.getActualProjectionSegments().iterator().next(); assertThat(visibleColumn.getColumnLabel(), is("order_id")); assertTrue(visibleColumn.isVisible()); }
@Override @PublicAPI(usage = ACCESS) public JavaClass getOwner() { return owner; }
@Test public void no_throws_clause_is_resolved() { CodeUnitCallTarget target = getTarget("withoutThrowsDeclaration"); ThrowsClause<? extends CodeUnitCallTarget> throwsClause = target.getThrowsClause(); assertThatThrowsClause(throwsClause).as("throws clause").isEmpty(); assertThat(throwsClause.getTypes()).isEmpty(); assertThat(throwsClause.getOwner()).isEqualTo(target); assertThatType(throwsClause.getDeclaringClass()).matches(Target.class); }
@Override public void validate(String methodName, Class<?>[] parameterTypes, Object[] arguments) throws Exception { List<Class<?>> groups = new ArrayList<>(); Class<?> methodClass = methodClass(methodName); if (methodClass != null) { groups.add(methodClass); } Method method = clazz.getMethod(methodName, parameterTypes); Class<?>[] methodClasses; if (method.isAnnotationPresent(MethodValidated.class)) { methodClasses = method.getAnnotation(MethodValidated.class).value(); groups.addAll(Arrays.asList(methodClasses)); } // add into default group groups.add(0, Default.class); groups.add(1, clazz); // convert list to array Class<?>[] classGroups = groups.toArray(new Class[0]); Set<ConstraintViolation<?>> violations = new HashSet<>(); Object parameterBean = getMethodParameterBean(clazz, method, arguments); if (parameterBean != null) { violations.addAll(validator.validate(parameterBean, classGroups)); } for (Object arg : arguments) { validate(violations, arg, classGroups); } if (!violations.isEmpty()) { logger.info("Failed to validate service: " + clazz.getName() + ", method: " + methodName + ", cause: " + violations); throw new ConstraintViolationException( "Failed to validate service: " + clazz.getName() + ", method: " + methodName + ", cause: " + violations, violations); } }
@Test void testItWithPrimitiveArg() { Assertions.assertThrows(ValidationException.class, () -> { URL url = URL.valueOf( "test://test:11/org.apache.dubbo.validation.support.jvalidation.mock.JValidatorTestTarget"); JValidator jValidator = new JValidator(url); jValidator.validate("someMethod6", new Class<?>[] {Integer.class, String.class, Long.class}, new Object[] { null, null, null }); }); }
@Override public void check(final String databaseName, final ReadwriteSplittingRuleConfiguration ruleConfig, final Map<String, DataSource> dataSourceMap, final Collection<ShardingSphereRule> builtRules) { checkDataSources(databaseName, ruleConfig.getDataSourceGroups(), dataSourceMap, builtRules); checkLoadBalancer(databaseName, ruleConfig); }
@SuppressWarnings({"rawtypes", "unchecked"}) @Test void assertCheckWhenConfigInvalidWriteDataSource() { ReadwriteSplittingRuleConfiguration config = mock(ReadwriteSplittingRuleConfiguration.class); List<ReadwriteSplittingDataSourceGroupRuleConfiguration> configs = Arrays.asList(createDataSourceGroupRuleConfiguration( "write_ds_0", Arrays.asList("read_ds_0", "read_ds_1")), createDataSourceGroupRuleConfiguration("write_ds_2", Arrays.asList("read_ds_0", "read_ds_1"))); when(config.getDataSourceGroups()).thenReturn(configs); RuleConfigurationChecker checker = OrderedSPILoader.getServicesByClass(RuleConfigurationChecker.class, Collections.singleton(config.getClass())).get(config.getClass()); assertThrows(ReadwriteSplittingActualDataSourceNotFoundException.class, () -> checker.check("test", config, mockDataSources(), Collections.emptyList())); }
public boolean offer(Serializable event) { if (queue == null) { throw new IllegalStateException("client has no event queue"); } return queue.offer(event); }
@Test public void testOfferEventSequenceAndRun() throws Exception { for (int i = 0; i < 10; i++) { client.offer(TEST_EVENT + i); } Thread thread = new Thread(client); thread.start(); thread.join(1000); Assertions.assertFalse(thread.isAlive()); ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(outputStream.toByteArray())); for (int i = 0; i < 10; i++) { Assertions.assertEquals(TEST_EVENT + i, ois.readObject()); } }
public void setScheduledTaskQueueCapacity(int scheduledTaskQueueCapacity) { this.scheduledTaskQueueCapacity = checkPositive(scheduledTaskQueueCapacity, "scheduledTaskQueueCapacity"); }
@Test public void test_setScheduledTaskQueueCapacity_whenNegative() { ReactorBuilder builder = newBuilder(); assertThrows(IllegalArgumentException.class, () -> builder.setScheduledTaskQueueCapacity(-1)); }
public static String interpolate(String text, Properties properties) { return interpolate(text, properties, SyntaxStyle.DEFAULT); }
@Test public void testInterpolateNonexistentErasedMSBuild() { Properties prop = new Properties(); prop.setProperty("key", "value"); String text = "This is a test of '$(key)' and '$(nothing)'"; String expResults = "This is a test of 'value' and ''"; String results = InterpolationUtil.interpolate(text, prop, InterpolationUtil.SyntaxStyle.MSBUILD); assertEquals(expResults, results); }
public <InputT, OutputT, CollectionT extends PCollection<? extends InputT>> DataStream<OutputT> applyBeamPTransform( DataStream<InputT> input, PTransform<CollectionT, PCollection<OutputT>> transform) { return (DataStream) getNonNull( applyBeamPTransformInternal( ImmutableMap.of("input", input), (pipeline, map) -> (CollectionT) getNonNull(map, "input"), (output) -> ImmutableMap.of("output", output), transform, input.getExecutionEnvironment()), "output"); }
@Test public void testApplySimpleTransform() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment(); DataStream<String> input = env.fromCollection(ImmutableList.of("a", "b", "c")); DataStream<String> result = new BeamFlinkDataStreamAdapter().applyBeamPTransform(input, withPrefix("x")); assertThat( ImmutableList.copyOf(result.executeAndCollect()), containsInAnyOrder("xa", "xb", "xc")); }
@Nonnull public Mappings generate(@Nullable Workspace workspace, @Nonnull WorkspaceResource resource, @Nonnull InheritanceGraph inheritanceGraph, @Nonnull NameGenerator generator, @Nullable NameGeneratorFilter filter) { // Adapt filter to handle baseline cases. filter = new ExcludeEnumMethodsFilter(filter); // Setup adapter to store our mappings in. MappingsAdapter mappings = new MappingsAdapter(true, true); mappings.enableHierarchyLookup(inheritanceGraph); if (workspace != null) mappings.enableClassLookup(workspace); SortedMap<String, ClassInfo> classMap = new TreeMap<>(); resource.versionedJvmClassBundleStream() .flatMap(Bundle::stream) .forEach(c -> classMap.put(c.getName(), c)); classMap.putAll(resource.getJvmClassBundle()); // Pull a class, create mappings for its inheritance family, then remove those classes from the map. // When the map is empty everything has been run through the mapping generation process. while (!classMap.isEmpty()) { // Get family from the class. String className = classMap.firstKey(); Set<InheritanceVertex> family = inheritanceGraph.getVertexFamily(className, false); // Create mappings for the family generateFamilyMappings(mappings, family, generator, filter); // Remove all family members from the class map. if (family.isEmpty()) classMap.remove(className); else family.forEach(vertex -> classMap.remove(vertex.getName())); } return mappings; }
@Test void testGeneral() { // Apply and assert no unexpected values exist Mappings mappings = mappingGenerator.generate(workspace, resource, inheritanceGraph, nameGenerator, null); // Should not generate names for internal classes assertNull(mappings.getMappedClassName("java/lang/Object")); assertNull(mappings.getMappedClassName("java/lang/enum")); // Should not generate names for constructors/override/library methods // - but still generate names for members String className = AccessibleFields.class.getName().replace('.', '/'); assertNull(mappings.getMappedMethodName(className, "hashCode", "()I")); assertNull(mappings.getMappedMethodName(className, "<init>>", "()V")); assertNotNull(mappings.getMappedFieldName(className, "CONSTANT_FIELD", "I")); assertNotNull(mappings.getMappedFieldName(className, "privateFinalField", "I")); assertNotNull(mappings.getMappedFieldName(className, "protectedField", "I")); assertNotNull(mappings.getMappedFieldName(className, "publicField", "I")); assertNotNull(mappings.getMappedFieldName(className, "packageField", "I")); }
public ProtocolBuilder host(String host) { this.host = host; return getThis(); }
@Test void host() { ProtocolBuilder builder = new ProtocolBuilder(); builder.host("host"); Assertions.assertEquals("host", builder.build().getHost()); }
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) { return invoke(n, BigDecimal.ZERO); }
@Test void invokeRoundingUp() { FunctionTestUtil.assertResult(roundUpFunction.invoke(BigDecimal.valueOf(10.27)), BigDecimal.valueOf(11)); FunctionTestUtil.assertResult(roundUpFunction.invoke(BigDecimal.valueOf(10.27), BigDecimal.ONE), BigDecimal.valueOf(10.3)); }
public static <T> CheckedSupplier<T> recover(CheckedSupplier<T> supplier, CheckedFunction<Throwable, T> exceptionHandler) { return () -> { try { return supplier.get(); } catch (Throwable throwable) { return exceptionHandler.apply(throwable); } }; }
@Test(expected = RuntimeException.class) public void shouldRethrowException() throws Throwable { CheckedSupplier<String> callable = () -> { throw new IOException("BAM!"); }; CheckedSupplier<String> callableWithRecovery = CheckedFunctionUtils.recover(callable, (ex) -> { throw new RuntimeException(); }); callableWithRecovery.get(); }
public long getActualOffset() { if (null == offsetSegment) { return 0L; } return offsetSegment.isBoundOpened() ? actualOffset - 1 : actualOffset; }
@Test void assertGetActualOffsetWithNumberLiteralPaginationValueSegment() { assertThat(new PaginationContext(getOffsetSegmentWithNumberLiteralPaginationValueSegment(), getRowCountSegmentWithNumberLiteralPaginationValueSegment(), getParameters()).getActualOffset(), is(30L)); }
private long tilNextMillis(final long lastTimestamp) { long timestamp = timeGen(); while (timestamp <= lastTimestamp) { timestamp = timeGen(); } return timestamp; }
@Test public void testTilNextMillis() throws Exception { Class<?> uUIDUtilsClass = UUIDUtils.getInstance().getClass(); Class<?>[] p = {long.class}; Method method = uUIDUtilsClass.getDeclaredMethod("tilNextMillis", p); method.setAccessible(true); long lastTimestamp = System.currentTimeMillis(); long result = (long) method.invoke(UUIDUtils.getInstance(), lastTimestamp); assertThat(result, greaterThan(lastTimestamp)); }
public static String getTypeName(final int type) { switch (type) { case START_EVENT_V3: return "Start_v3"; case STOP_EVENT: return "Stop"; case QUERY_EVENT: return "Query"; case ROTATE_EVENT: return "Rotate"; case INTVAR_EVENT: return "Intvar"; case LOAD_EVENT: return "Load"; case NEW_LOAD_EVENT: return "New_load"; case SLAVE_EVENT: return "Slave"; case CREATE_FILE_EVENT: return "Create_file"; case APPEND_BLOCK_EVENT: return "Append_block"; case DELETE_FILE_EVENT: return "Delete_file"; case EXEC_LOAD_EVENT: return "Exec_load"; case RAND_EVENT: return "RAND"; case XID_EVENT: return "Xid"; case USER_VAR_EVENT: return "User var"; case FORMAT_DESCRIPTION_EVENT: return "Format_desc"; case TABLE_MAP_EVENT: return "Table_map"; case PRE_GA_WRITE_ROWS_EVENT: return "Write_rows_event_old"; case PRE_GA_UPDATE_ROWS_EVENT: return "Update_rows_event_old"; case PRE_GA_DELETE_ROWS_EVENT: return "Delete_rows_event_old"; case WRITE_ROWS_EVENT_V1: return "Write_rows_v1"; case UPDATE_ROWS_EVENT_V1: return "Update_rows_v1"; case DELETE_ROWS_EVENT_V1: return "Delete_rows_v1"; case BEGIN_LOAD_QUERY_EVENT: return "Begin_load_query"; case EXECUTE_LOAD_QUERY_EVENT: return "Execute_load_query"; case INCIDENT_EVENT: return "Incident"; case HEARTBEAT_LOG_EVENT: case HEARTBEAT_LOG_EVENT_V2: return "Heartbeat"; case IGNORABLE_LOG_EVENT: return "Ignorable"; case ROWS_QUERY_LOG_EVENT: return "Rows_query"; case WRITE_ROWS_EVENT: return "Write_rows"; case UPDATE_ROWS_EVENT: return "Update_rows"; case DELETE_ROWS_EVENT: return "Delete_rows"; case GTID_LOG_EVENT: return "Gtid"; case ANONYMOUS_GTID_LOG_EVENT: return "Anonymous_Gtid"; case PREVIOUS_GTIDS_LOG_EVENT: return "Previous_gtids"; case PARTIAL_UPDATE_ROWS_EVENT: return "Update_rows_partial"; case TRANSACTION_CONTEXT_EVENT : return "Transaction_context"; case VIEW_CHANGE_EVENT : return "view_change"; case XA_PREPARE_LOG_EVENT : return "Xa_prepare"; case TRANSACTION_PAYLOAD_EVENT : return "transaction_payload"; default: return "Unknown type:" + type; } }
@Test public void getTypeNameInputPositiveOutputNotNull12() { // Arrange final int type = 34; // Act final String actual = LogEvent.getTypeName(type); // Assert result Assert.assertEquals("Anonymous_Gtid", actual); }
public <T> ProducerBuilder<T> createProducerBuilder(String topic, Schema<T> schema, String producerName) { ProducerBuilder<T> builder = client.newProducer(schema); if (defaultConfigurer != null) { defaultConfigurer.accept(builder); } builder.blockIfQueueFull(true) .enableBatching(true) .batchingMaxPublishDelay(10, TimeUnit.MILLISECONDS) .hashingScheme(HashingScheme.Murmur3_32Hash) // .messageRoutingMode(MessageRoutingMode.CustomPartition) .messageRouter(FunctionResultRouter.of()) // set send timeout to be infinity to prevent potential deadlock with consumer // that might happen when consumer is blocked due to unacked messages .sendTimeout(0, TimeUnit.SECONDS) .topic(topic); if (producerName != null) { builder.producerName(producerName); } if (producerConfig != null) { if (producerConfig.getCompressionType() != null) { builder.compressionType(producerConfig.getCompressionType()); } else { // TODO: address this inconsistency. // PR https://github.com/apache/pulsar/pull/19470 removed the default compression type of LZ4 // from the top level. This default is only used if producer config is provided. builder.compressionType(CompressionType.LZ4); } if (producerConfig.getMaxPendingMessages() != null && producerConfig.getMaxPendingMessages() != 0) { builder.maxPendingMessages(producerConfig.getMaxPendingMessages()); } if (producerConfig.getMaxPendingMessagesAcrossPartitions() != null && producerConfig.getMaxPendingMessagesAcrossPartitions() != 0) { builder.maxPendingMessagesAcrossPartitions(producerConfig.getMaxPendingMessagesAcrossPartitions()); } if (producerConfig.getCryptoConfig() != null) { builder.cryptoKeyReader(crypto.keyReader); builder.cryptoFailureAction(crypto.failureAction); for (String encryptionKeyName : crypto.getEncryptionKeys()) { builder.addEncryptionKey(encryptionKeyName); } } if (producerConfig.getBatchBuilder() != null) { if (producerConfig.getBatchBuilder().equals("KEY_BASED")) { builder.batcherBuilder(BatcherBuilder.KEY_BASED); } else { builder.batcherBuilder(BatcherBuilder.DEFAULT); } } } return builder; }
@Test public void testCreateProducerBuilder() { ProducerBuilderFactory builderFactory = new ProducerBuilderFactory(pulsarClient, null, null, null); builderFactory.createProducerBuilder("topic", Schema.STRING, "producerName"); verifyCommon(); verifyNoMoreInteractions(producerBuilder); }
public ShareFetch<K, V> collect(final ShareFetchBuffer fetchBuffer) { ShareFetch<K, V> fetch = ShareFetch.empty(); int recordsRemaining = fetchConfig.maxPollRecords; try { while (recordsRemaining > 0) { final ShareCompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch(); if (nextInLineFetch == null || nextInLineFetch.isConsumed()) { final ShareCompletedFetch completedFetch = fetchBuffer.peek(); if (completedFetch == null) { break; } if (!completedFetch.isInitialized()) { try { fetchBuffer.setNextInLineFetch(initialize(completedFetch)); } catch (Exception e) { if (fetch.isEmpty()) { fetchBuffer.poll(); } throw e; } } else { fetchBuffer.setNextInLineFetch(completedFetch); } fetchBuffer.poll(); } else { final TopicIdPartition tp = nextInLineFetch.partition; ShareInFlightBatch<K, V> batch = nextInLineFetch.fetchRecords( deserializers, recordsRemaining, fetchConfig.checkCrcs); if (batch.isEmpty()) { nextInLineFetch.drain(); } recordsRemaining -= batch.numRecords(); fetch.add(tp, batch); if (batch.getException() != null) { throw batch.getException(); } else if (batch.hasCachedException()) { break; } } } } catch (KafkaException e) { if (fetch.isEmpty()) { throw e; } } return fetch; }
@Test public void testFetchWithUnknownLeaderEpoch() { buildDependencies(); subscribeAndAssign(topicAPartition0); ShareCompletedFetch completedFetch = completedFetchBuilder .error(Errors.UNKNOWN_LEADER_EPOCH) .build(); fetchBuffer.add(completedFetch); ShareFetch<String, String> fetch = fetchCollector.collect(fetchBuffer); assertTrue(fetch.isEmpty()); }
@Override public Column convert(BasicTypeDefine typeDefine) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .sourceType(typeDefine.getColumnType()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String mysqlDataType = typeDefine.getDataType().toUpperCase(); if (mysqlDataType.endsWith("ZEROFILL")) { mysqlDataType = mysqlDataType.substring(0, mysqlDataType.length() - "ZEROFILL".length()).trim(); } if (typeDefine.isUnsigned() && !(mysqlDataType.endsWith(" UNSIGNED"))) { mysqlDataType = mysqlDataType + " UNSIGNED"; } switch (mysqlDataType) { case MYSQL_NULL: builder.dataType(BasicType.VOID_TYPE); break; case MYSQL_BIT: if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.dataType(BasicType.BOOLEAN_TYPE); } else if (typeDefine.getLength() == 1) { builder.dataType(BasicType.BOOLEAN_TYPE); } else { builder.dataType(PrimitiveByteArrayType.INSTANCE); // BIT(M) -> BYTE(M/8) long byteLength = typeDefine.getLength() / 8; byteLength += typeDefine.getLength() % 8 > 0 ? 1 : 0; builder.columnLength(byteLength); } break; case MYSQL_TINYINT: if (typeDefine.getColumnType().equalsIgnoreCase("tinyint(1)")) { builder.dataType(BasicType.BOOLEAN_TYPE); } else { builder.dataType(BasicType.BYTE_TYPE); } break; case MYSQL_TINYINT_UNSIGNED: case MYSQL_SMALLINT: builder.dataType(BasicType.SHORT_TYPE); break; case MYSQL_SMALLINT_UNSIGNED: case MYSQL_MEDIUMINT: case MYSQL_MEDIUMINT_UNSIGNED: case MYSQL_INT: case MYSQL_INTEGER: case MYSQL_YEAR: builder.dataType(BasicType.INT_TYPE); break; case MYSQL_INT_UNSIGNED: case MYSQL_INTEGER_UNSIGNED: case MYSQL_BIGINT: builder.dataType(BasicType.LONG_TYPE); break; case MYSQL_BIGINT_UNSIGNED: DecimalType intDecimalType = new DecimalType(20, 0); builder.dataType(intDecimalType); builder.columnLength(Long.valueOf(intDecimalType.getPrecision())); builder.scale(intDecimalType.getScale()); break; case MYSQL_FLOAT: builder.dataType(BasicType.FLOAT_TYPE); break; case MYSQL_FLOAT_UNSIGNED: log.warn("{} will probably cause value overflow.", MYSQL_FLOAT_UNSIGNED); builder.dataType(BasicType.FLOAT_TYPE); break; case MYSQL_DOUBLE: builder.dataType(BasicType.DOUBLE_TYPE); break; case MYSQL_DOUBLE_UNSIGNED: log.warn("{} will probably cause value overflow.", MYSQL_DOUBLE_UNSIGNED); builder.dataType(BasicType.DOUBLE_TYPE); break; case MYSQL_DECIMAL: Preconditions.checkArgument(typeDefine.getPrecision() > 0); DecimalType decimalType; if (typeDefine.getPrecision() > DEFAULT_PRECISION) { log.warn("{} will probably cause value overflow.", MYSQL_DECIMAL); decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE); } else { decimalType = new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale() == null ? 0 : typeDefine.getScale().intValue()); } builder.dataType(decimalType); builder.columnLength(Long.valueOf(decimalType.getPrecision())); builder.scale(decimalType.getScale()); break; case MYSQL_DECIMAL_UNSIGNED: Preconditions.checkArgument(typeDefine.getPrecision() > 0); log.warn("{} will probably cause value overflow.", MYSQL_DECIMAL_UNSIGNED); DecimalType decimalUnsignedType = new DecimalType( typeDefine.getPrecision().intValue() + 1, typeDefine.getScale() == null ? 0 : typeDefine.getScale().intValue()); builder.dataType(decimalUnsignedType); builder.columnLength(Long.valueOf(decimalUnsignedType.getPrecision())); builder.scale(decimalUnsignedType.getScale()); break; case MYSQL_ENUM: builder.dataType(BasicType.STRING_TYPE); if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(100L); } else { builder.columnLength(typeDefine.getLength()); } break; case MYSQL_CHAR: case MYSQL_VARCHAR: if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L)); } else { builder.columnLength(typeDefine.getLength()); } builder.dataType(BasicType.STRING_TYPE); break; case MYSQL_TINYTEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_8 - 1); break; case MYSQL_TEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_16 - 1); break; case MYSQL_MEDIUMTEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_24 - 1); break; case MYSQL_LONGTEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_32 - 1); break; case MYSQL_JSON: builder.dataType(BasicType.STRING_TYPE); break; case MYSQL_BINARY: case MYSQL_VARBINARY: if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(1L); } else { builder.columnLength(typeDefine.getLength()); } builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case MYSQL_TINYBLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_8 - 1); break; case MYSQL_BLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_16 - 1); break; case MYSQL_MEDIUMBLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_24 - 1); break; case MYSQL_LONGBLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_32 - 1); break; case MYSQL_GEOMETRY: builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case MYSQL_DATE: builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); break; case MYSQL_TIME: builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case MYSQL_DATETIME: case MYSQL_TIMESTAMP: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(typeDefine.getScale()); break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.MYSQL, mysqlDataType, typeDefine.getName()); } return builder.build(); }
@Test public void testConvertChar() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder() .name("test") .columnType("char(2)") .dataType("char") .length(2L) .build(); Column column = MySqlTypeConverter.DEFAULT_INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); Assertions.assertEquals(2, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder() .name("test") .columnType("varchar(2)") .dataType("varchar") .length(2L) .build(); column = MySqlTypeConverter.DEFAULT_INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); Assertions.assertEquals(2, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); }
public static boolean equals(FlatRecordTraversalObjectNode left, FlatRecordTraversalObjectNode right) { if (left == null && right == null) { return true; } if (left == null || right == null) { return false; } if (!left.getSchema().getName().equals(right.getSchema().getName())) { return false; } extractCommonObjectSchema(left, right); return compare(left, right); }
@Test public void shouldEqualOnTheSameFlatRecord() { FlatRecord flatRecord1 = createTestFlatRecord1(); FlatRecord flatRecord2 = createTestFlatRecord1(); Assertions.assertThat(FlatRecordTraversalObjectNodeEquality.equals(new FlatRecordTraversalObjectNode(flatRecord1), new FlatRecordTraversalObjectNode(flatRecord2))).isTrue(); }
@Override public String toString() { MoreObjects.ToStringHelper helper = MoreObjects.toStringHelper(this); helper.addValue(toString(null, null)); return helper.toString(); }
@Test public void testToStringWhenThereAreZeroInputs() { Transaction tx = new Transaction(); assertTrue(tx.toString().contains("No inputs!")); }
@Override public StatusOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final EnumSet<OpenMode> flags; if(status.isAppend()) { if(status.isExists()) { // No append flag. Otherwise the offset field of SSH_FXP_WRITE requests is ignored. flags = EnumSet.of(OpenMode.WRITE); } else { // Allocate offset flags = EnumSet.of(OpenMode.CREAT, OpenMode.WRITE); } } else { // A new file is created; if the file already exists, it is opened and truncated to preserve ownership of file. if(status.isExists()) { if(file.isSymbolicLink()) { // Workaround for #7327 session.sftp().remove(file.getAbsolute()); flags = EnumSet.of(OpenMode.CREAT, OpenMode.TRUNC, OpenMode.WRITE); } else { flags = EnumSet.of(OpenMode.TRUNC, OpenMode.WRITE); } } else { flags = EnumSet.of(OpenMode.CREAT, OpenMode.TRUNC, OpenMode.WRITE); } } final RemoteFile handle = session.sftp().open(file.getAbsolute(), flags); final int maxUnconfirmedWrites = this.getMaxUnconfirmedWrites(status); if(log.isInfoEnabled()) { log.info(String.format("Using %d unconfirmed writes", maxUnconfirmedWrites)); } if(log.isInfoEnabled()) { log.info(String.format("Skipping %d bytes", status.getOffset())); } // Open stream at offset return new VoidStatusOutputStream(new ChunkedOutputStream(handle.new RemoteFileOutputStream(status.getOffset(), maxUnconfirmedWrites) { private final AtomicBoolean close = new AtomicBoolean(); @Override public void close() throws IOException { if(close.get()) { log.warn(String.format("Skip double close of stream %s", this)); return; } try { super.close(); } finally { handle.close(); close.set(true); } } }, preferences.getInteger("sftp.write.chunksize"))); } catch(IOException e) { throw new SFTPExceptionMappingService().map("Upload {0} failed", e, file); } }
@Test public void testWriteContentRange() throws Exception { final SFTPWriteFeature feature = new SFTPWriteFeature(session); final Path test = new Path(new SFTPHomeDirectoryService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final byte[] content = RandomUtils.nextBytes(64000); { final TransferStatus status = new TransferStatus(); status.setLength(1024L); status.setOffset(0L); final OutputStream out = feature.write(test, status, new DisabledConnectionCallback()); // Write first 1024 new StreamCopier(status, status).withOffset(status.getOffset()).withLimit(status.getLength()).transfer(new ByteArrayInputStream(content), out); out.flush(); out.close(); } assertTrue(new DefaultFindFeature(session).find(test)); assertEquals(1024L, new DefaultAttributesFinderFeature(session).find(test).getSize()); { // Remaining chunked transfer with offset final TransferStatus status = new TransferStatus().exists(true); status.setLength(content.length - 1024L); status.setOffset(1024L); status.setAppend(true); final OutputStream out = feature.write(test, status, new DisabledConnectionCallback()); new StreamCopier(status, status).withOffset(status.getOffset()).withLimit(status.getLength()).transfer(new ByteArrayInputStream(content), out); out.flush(); out.close(); } final ByteArrayOutputStream out = new ByteArrayOutputStream(content.length); IOUtils.copy(new SFTPReadFeature(session).read(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()), out); assertArrayEquals(content, out.toByteArray()); new SFTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static <T> CompletionStage<T> recover(CompletionStage<T> completionStage, Function<Throwable, T> exceptionHandler){ return completionStage.exceptionally(exceptionHandler); }
@Test public void shouldRecoverSupplierFromSpecificResult() throws InterruptedException, ExecutionException, TimeoutException { CompletableFuture<String> future = CompletableFuture.completedFuture("Wrong Result"); String result = recover(future, (r) -> r.equals("Wrong Result"), (r) -> "Bla").toCompletableFuture() .get(1, TimeUnit.SECONDS); assertThat(result).isEqualTo("Bla"); }