repo_name
stringlengths
6
101
path
stringlengths
4
300
text
stringlengths
7
1.31M
sam-glendenning/iam
iam-login-service/src/main/webapp/resources/iam/apps/dashboard-app/components/tokens/refreshlist/tokens.refreshlist.component.js
/* * Copyright (c) <NAME> (INFN). 2016-2019 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ (function() { 'use strict'; function RevokeRefreshTokenController($rootScope, $uibModalInstance, TokensService, token) { var self = this; self.token = token; self.enabled = true; self.error = undefined; self.doRevoke = function (token) { self.error = undefined; self.enabled = false; TokensService.revokeRefreshToken(token.id).then(function (response) { $uibModalInstance.close(token); $rootScope.refreshTokensCount--; self.enabled = true; }).catch(function (error) { console.error(error); self.error = error; self.enabled = true; }); }; self.cancel = function () { $uibModalInstance.dismiss('Dismissed'); }; } function RefreshTokensListController($q, $scope, $rootScope, $uibModal, ModalService, TokensService, scimFactory, clipboardService, Utils, toaster) { var self = this; // pagination controls self.currentPage = 1; self.currentOffset = 1; self.itemsPerPage = 10; self.totalResults = self.total; self.$onInit = function() { console.debug("init RefreshTokensListController", self.tokens, self.currentPage, self.currentOffset, self.totalResults); }; $scope.$on('refreshRefreshTokensList', function(e) { console.debug("received refreshRefreshTokensList event"); self.searchTokens(1); }); self.copyToClipboard = function(toCopy) { clipboardService.copyToClipboard(toCopy); toaster.pop({ type: 'success', body: 'Token copied to clipboard!' }); }; self.updateRefreshTokenCount = function(responseValue) { if (self.clientSelected || self.userSelected) { if (responseValue > $rootScope.refreshTokensCount) { $rootScope.refreshTokensCount = responseValue; } } else { $rootScope.refreshTokensCount = responseValue; } }; self.searchTokens = function(page) { console.debug("page = ", page); $rootScope.pageLoadingProgress = 0; self.loaded = false; self.tokens = []; self.currentPage = page; self.currentOffset = ( page - 1 ) * self.itemsPerPage + 1; var handleResponse = function(response){ self.totalResults = response.data.totalResults; angular.forEach(response.data.Resources, function(token){ self.tokens.push(token); }); $rootScope.pageLoadingProgress = 100; self.updateRefreshTokenCount(response.data.totalResults); self.loaded = true; self.loadingModal.dismiss("Cancel"); }; var handleError = function(error) { self.loadingModal.dismiss("Error"); toaster.pop({type: 'error', body: error}); }; self.loadingModal = $uibModal.open({ animation: false, templateUrl : '/resources/iam/apps/dashboard-app/templates/loading-modal.html' }); self.loadingModal.opened.then(function(){ self.getRefreshTokenList(self.currentOffset, self.itemsPerPage).then(handleResponse, handleError); }); } self.getRefreshTokenList = function(startIndex, count) { if (self.clientSelected && self.userSelected) { return TokensService.getRefreshTokensFilteredByUserAndClient(startIndex, count, self.userSelected.userName, self.clientSelected.clientId); } if (self.clientSelected) { return TokensService.getRefreshTokensFilteredByClient(startIndex, count, self.clientSelected.clientId); } if (self.userSelected) { return TokensService.getRefreshTokensFilteredByUser(startIndex, count, self.userSelected.userName); } return TokensService.getRefreshTokens(startIndex, count); } self.handleRevokeSuccess = function(token) { self.enabled = true; toaster.pop({ type: 'success', body: 'Token Revoked' }); self.totalResults--; if (self.currentOffset > self.totalResults) { if (self.currentPage > 1) { self.currentPage--; } } self.searchTokens(self.currentPage); }; self.openRevokeRefreshTokenDialog = function (token) { var modalInstance = $uibModal.open({ templateUrl: '/resources/iam/apps/dashboard-app/components/tokens/refreshlist/token.revoke.dialog.html', controller: RevokeRefreshTokenController, controllerAs: '$ctrl', resolve: { token: token } }); modalInstance.result.then(self.handleRevokeSuccess); }; } angular .module('dashboardApp') .component( 'tokensRefreshlist', { require : { $parent : '^tokens' }, bindings: { clients: '=', users: '=', tokens: '<', total: '<' }, templateUrl : '/resources/iam/apps/dashboard-app/components/tokens/refreshlist/tokens.refreshlist.component.html', controller : [ '$q', '$scope', '$rootScope', '$uibModal', 'ModalService', 'TokensService', 'scimFactory', 'clipboardService', 'Utils', 'toaster', RefreshTokensListController ] }); })();
mvr/jiggle
src/AABB.c
#include "jiggle.h" jgAABB jgAABBNewFromVector2(jgVector2 min, jgVector2 max) { return (jgAABB){min, max, true}; } jgAABB jgAABBNewFromFloat(float x1, float y1, float x2, float y2) { return jgAABBNewFromVector2(jgVector2New(x1, y1), jgVector2New(x2, y2)); } jgAABB jgAABBNull() { return (jgAABB){jgVector2New(0.0, 0.0), jgVector2New(0.0, 0.0), false}; } jgAABB jgAABBExpandToInclude(jgAABB a, jgVector2 pt) { if (a.isValid) { jgAABB ret = a; if (pt.x < a.min.x) ret.min.x = pt.x; else if (pt.x > a.max.x) ret.max.x = pt.x; if (pt.y < a.min.y) ret.min.y = pt.y; else if (pt.y > a.max.y) ret.max.y = pt.y; return ret; } else { return (jgAABB){pt, pt, true}; } } jgAABB jgAABBCombine(jgAABB a, jgAABB b) { a = jgAABBExpandToInclude(a, b.min); a = jgAABBExpandToInclude(a, b.max); return a; } bool jgAABBContains(jgAABB a, jgVector2 pt) { if (!a.isValid) return false; return ((pt.x >= a.min.x) && (pt.x <= a.max.x) && (pt.y >= a.min.y) && (pt.y <= a.max.y)); } bool jgAABBIntersects(jgAABB a, jgAABB b) { return (a.min.x <= b.max.x) && (a.max.x >= b.min.x) && (a.min.y <= b.max.y) && (a.max.y >= b.min.y); } jgVector2 jgAABBCenter(jgAABB a) { return (jgVector2){(a.min.x + a.max.x) / 2, (a.min.y + a.max.y) / 2}; }
Yash-Wasalwar-07/Boss2D
Boss2D/addon/webrtc-jumpingyang001_for_boss/media/engine/fakewebrtcvideoengine.cc
<gh_stars>0 /* * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include BOSS_WEBRTC_U_media__engine__fakewebrtcvideoengine_h //original-code:"media/engine/fakewebrtcvideoengine.h" #include BOSS_WEBRTC_U_media__base__codec_h //original-code:"media/base/codec.h" #include BOSS_WEBRTC_U_media__engine__simulcast_encoder_adapter_h //original-code:"media/engine/simulcast_encoder_adapter.h" #include BOSS_WEBRTC_U_media__engine__webrtcvideodecoderfactory_h //original-code:"media/engine/webrtcvideodecoderfactory.h" #include BOSS_WEBRTC_U_media__engine__webrtcvideoencoderfactory_h //original-code:"media/engine/webrtcvideoencoderfactory.h" #include BOSS_WEBRTC_U_modules__video_coding__include__video_error_codes_h //original-code:"modules/video_coding/include/video_error_codes.h" #include BOSS_WEBRTC_U_rtc_base__gunit_h //original-code:"rtc_base/gunit.h" #include BOSS_WEBRTC_U_rtc_base__stringutils_h //original-code:"rtc_base/stringutils.h" namespace cricket { namespace { static const int kEventTimeoutMs = 10000; bool IsFormatSupported( const std::vector<webrtc::SdpVideoFormat>& supported_formats, const webrtc::SdpVideoFormat& format) { for (const webrtc::SdpVideoFormat& supported_format : supported_formats) { if (IsSameCodec(format.name, format.parameters, supported_format.name, supported_format.parameters)) { return true; } } return false; } } // namespace // Decoder. FakeWebRtcVideoDecoder::FakeWebRtcVideoDecoder( FakeWebRtcVideoDecoderFactory* factory) : num_frames_received_(0), factory_(factory) {} FakeWebRtcVideoDecoder::~FakeWebRtcVideoDecoder() { if (factory_) { factory_->DecoderDestroyed(this); } } int32_t FakeWebRtcVideoDecoder::InitDecode(const webrtc::VideoCodec*, int32_t) { return WEBRTC_VIDEO_CODEC_OK; } int32_t FakeWebRtcVideoDecoder::Decode(const webrtc::EncodedImage&, bool, const webrtc::CodecSpecificInfo*, int64_t) { num_frames_received_++; return WEBRTC_VIDEO_CODEC_OK; } int32_t FakeWebRtcVideoDecoder::RegisterDecodeCompleteCallback( webrtc::DecodedImageCallback*) { return WEBRTC_VIDEO_CODEC_OK; } int32_t FakeWebRtcVideoDecoder::Release() { return WEBRTC_VIDEO_CODEC_OK; } int FakeWebRtcVideoDecoder::GetNumFramesReceived() const { return num_frames_received_; } // Decoder factory. FakeWebRtcVideoDecoderFactory::FakeWebRtcVideoDecoderFactory() : num_created_decoders_(0) {} std::vector<webrtc::SdpVideoFormat> FakeWebRtcVideoDecoderFactory::GetSupportedFormats() const { std::vector<webrtc::SdpVideoFormat> formats; for (const webrtc::SdpVideoFormat& format : supported_codec_formats_) { // Don't add same codec twice. if (!IsFormatSupported(formats, format)) formats.push_back(format); } return formats; } std::unique_ptr<webrtc::VideoDecoder> FakeWebRtcVideoDecoderFactory::CreateVideoDecoder( const webrtc::SdpVideoFormat& format) { if (IsFormatSupported(supported_codec_formats_, format)) { num_created_decoders_++; std::unique_ptr<FakeWebRtcVideoDecoder> decoder = absl::make_unique<FakeWebRtcVideoDecoder>(this); decoders_.push_back(decoder.get()); return decoder; } return nullptr; } void FakeWebRtcVideoDecoderFactory::DecoderDestroyed( FakeWebRtcVideoDecoder* decoder) { decoders_.erase(std::remove(decoders_.begin(), decoders_.end(), decoder), decoders_.end()); } void FakeWebRtcVideoDecoderFactory::AddSupportedVideoCodecType( const webrtc::SdpVideoFormat& format) { supported_codec_formats_.push_back(format); } int FakeWebRtcVideoDecoderFactory::GetNumCreatedDecoders() { return num_created_decoders_; } const std::vector<FakeWebRtcVideoDecoder*>& FakeWebRtcVideoDecoderFactory::decoders() { return decoders_; } // Encoder. FakeWebRtcVideoEncoder::FakeWebRtcVideoEncoder( FakeWebRtcVideoEncoderFactory* factory) : init_encode_event_(false, false), num_frames_encoded_(0), factory_(factory) {} FakeWebRtcVideoEncoder::~FakeWebRtcVideoEncoder() { if (factory_) { factory_->EncoderDestroyed(this); } } int32_t FakeWebRtcVideoEncoder::InitEncode( const webrtc::VideoCodec* codecSettings, int32_t numberOfCores, size_t maxPayloadSize) { rtc::CritScope lock(&crit_); codec_settings_ = *codecSettings; init_encode_event_.Set(); return WEBRTC_VIDEO_CODEC_OK; } int32_t FakeWebRtcVideoEncoder::Encode( const webrtc::VideoFrame& inputImage, const webrtc::CodecSpecificInfo* codecSpecificInfo, const std::vector<webrtc::FrameType>* frame_types) { rtc::CritScope lock(&crit_); ++num_frames_encoded_; init_encode_event_.Set(); return WEBRTC_VIDEO_CODEC_OK; } int32_t FakeWebRtcVideoEncoder::RegisterEncodeCompleteCallback( webrtc::EncodedImageCallback* callback) { return WEBRTC_VIDEO_CODEC_OK; } int32_t FakeWebRtcVideoEncoder::Release() { return WEBRTC_VIDEO_CODEC_OK; } int32_t FakeWebRtcVideoEncoder::SetChannelParameters(uint32_t packetLoss, int64_t rtt) { return WEBRTC_VIDEO_CODEC_OK; } int32_t FakeWebRtcVideoEncoder::SetRateAllocation( const webrtc::VideoBitrateAllocation& allocation, uint32_t framerate) { return WEBRTC_VIDEO_CODEC_OK; } bool FakeWebRtcVideoEncoder::WaitForInitEncode() { return init_encode_event_.Wait(kEventTimeoutMs); } webrtc::VideoCodec FakeWebRtcVideoEncoder::GetCodecSettings() { rtc::CritScope lock(&crit_); return codec_settings_; } int FakeWebRtcVideoEncoder::GetNumEncodedFrames() { rtc::CritScope lock(&crit_); return num_frames_encoded_; } // Video encoder factory. FakeWebRtcVideoEncoderFactory::FakeWebRtcVideoEncoderFactory() : created_video_encoder_event_(false, false), num_created_encoders_(0), encoders_have_internal_sources_(false), vp8_factory_mode_(false) {} std::vector<webrtc::SdpVideoFormat> FakeWebRtcVideoEncoderFactory::GetSupportedFormats() const { std::vector<webrtc::SdpVideoFormat> formats; for (const webrtc::SdpVideoFormat& format : formats_) { // Don't add same codec twice. if (!IsFormatSupported(formats, format)) formats.push_back(format); } return formats; } std::unique_ptr<webrtc::VideoEncoder> FakeWebRtcVideoEncoderFactory::CreateVideoEncoder( const webrtc::SdpVideoFormat& format) { rtc::CritScope lock(&crit_); std::unique_ptr<webrtc::VideoEncoder> encoder; if (IsFormatSupported(formats_, format)) { if (CodecNamesEq(format.name.c_str(), kVp8CodecName) && !vp8_factory_mode_) { // The simulcast adapter will ask this factory for multiple VP8 // encoders. Enter vp8_factory_mode so that we now create these encoders // instead of more adapters. vp8_factory_mode_ = true; encoder = absl::make_unique<webrtc::SimulcastEncoderAdapter>(this, format); } else { num_created_encoders_++; created_video_encoder_event_.Set(); encoder = absl::make_unique<FakeWebRtcVideoEncoder>(this); encoders_.push_back(static_cast<FakeWebRtcVideoEncoder*>(encoder.get())); } } return encoder; } webrtc::VideoEncoderFactory::CodecInfo FakeWebRtcVideoEncoderFactory::QueryVideoEncoder( const webrtc::SdpVideoFormat& format) const { webrtc::VideoEncoderFactory::CodecInfo info; info.has_internal_source = encoders_have_internal_sources_; info.is_hardware_accelerated = true; return info; } bool FakeWebRtcVideoEncoderFactory::WaitForCreatedVideoEncoders( int num_encoders) { int64_t start_offset_ms = rtc::TimeMillis(); int64_t wait_time = kEventTimeoutMs; do { if (GetNumCreatedEncoders() >= num_encoders) return true; wait_time = kEventTimeoutMs - (rtc::TimeMillis() - start_offset_ms); } while (wait_time > 0 && created_video_encoder_event_.Wait(wait_time)); return false; } void FakeWebRtcVideoEncoderFactory::EncoderDestroyed( FakeWebRtcVideoEncoder* encoder) { rtc::CritScope lock(&crit_); encoders_.erase(std::remove(encoders_.begin(), encoders_.end(), encoder), encoders_.end()); } void FakeWebRtcVideoEncoderFactory::set_encoders_have_internal_sources( bool internal_source) { encoders_have_internal_sources_ = internal_source; } void FakeWebRtcVideoEncoderFactory::AddSupportedVideoCodec( const webrtc::SdpVideoFormat& format) { formats_.push_back(format); } void FakeWebRtcVideoEncoderFactory::AddSupportedVideoCodecType( const std::string& name) { // This is to match the default H264 params of cricket::VideoCodec. cricket::VideoCodec video_codec(name); formats_.push_back( webrtc::SdpVideoFormat(video_codec.name, video_codec.params)); } int FakeWebRtcVideoEncoderFactory::GetNumCreatedEncoders() { rtc::CritScope lock(&crit_); return num_created_encoders_; } const std::vector<FakeWebRtcVideoEncoder*> FakeWebRtcVideoEncoderFactory::encoders() { rtc::CritScope lock(&crit_); return encoders_; } } // namespace cricket
dkzhang/RmsGo
ResourceSM/model/resGNode/json.go
package resGNode import ( "encoding/json" "fmt" ) func ToJson(gn ResGNode) (string, error) { bj, err := json.Marshal(gn) if err != nil { return "", fmt.Errorf("json Marshal ResGNode error: %v", err) } return string(bj), nil } func ToJsonIndent(gn ResGNode) (string, error) { bj, err := json.MarshalIndent(gn, "", " ") if err != nil { return "", fmt.Errorf("json Marshal ResGNode error: %v", err) } return string(bj), nil } func LoadFromJson(bJson []byte) (gn ResGNode, err error) { err = json.Unmarshal(bJson, &gn) if err != nil { return ResGNode{}, fmt.Errorf("json UnMarshal ResGNode error: %v", err) } return gn, nil }
olegstepanov/intellij-community
platform/testRunner/src/com/intellij/execution/testframework/ui/AbstractTestTreeBuilderBase.java
<reponame>olegstepanov/intellij-community // Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.execution.testframework.ui; import com.intellij.execution.testframework.AbstractTestProxy; import com.intellij.execution.testframework.TestFrameworkRunningModel; import com.intellij.openapi.Disposable; public interface AbstractTestTreeBuilderBase extends Disposable { /** * Allow test animator to update the tree */ void repaintWithParents(AbstractTestProxy testProxy); /** * Update comparator used in tree */ void setTestsComparator(TestFrameworkRunningModel model); }
phatblat/macOSPrivateFrameworks
PrivateFrameworks/Rapport/RPStreamServer.h
// // Generated by class-dump 3.5 (64 bit). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>. // #import "NSObject.h" @class NSObject<OS_dispatch_queue>; @interface RPStreamServer : NSObject { NSObject<OS_dispatch_queue> *_dispatchQueue; BOOL _invalidateCalled; BOOL _invalidateDone; id _selfRef; struct NSMutableDictionary *_streamSessions; unsigned int _streamFlags; CDUnknownBlockType _invalidationHandler; id <RPMessageable> _messenger; CDUnknownBlockType _streamAcceptHandler; CDUnknownBlockType _streamPrepareHandlerEx; CDUnknownBlockType _streamPrepareHandler; } @property(copy, nonatomic) CDUnknownBlockType streamPrepareHandler; // @synthesize streamPrepareHandler=_streamPrepareHandler; @property(copy, nonatomic) CDUnknownBlockType streamPrepareHandlerEx; // @synthesize streamPrepareHandlerEx=_streamPrepareHandlerEx; @property(nonatomic) unsigned int streamFlags; // @synthesize streamFlags=_streamFlags; @property(copy, nonatomic) CDUnknownBlockType streamAcceptHandler; // @synthesize streamAcceptHandler=_streamAcceptHandler; @property(retain, nonatomic) id <RPMessageable> messenger; // @synthesize messenger=_messenger; @property(copy, nonatomic) CDUnknownBlockType invalidationHandler; // @synthesize invalidationHandler=_invalidationHandler; - (void).cxx_destruct; - (void)_handleStopRequest:(id)arg1 options:(id)arg2 responseHandler:(CDUnknownBlockType)arg3; - (void)_handleStartRequest:(id)arg1 options:(id)arg2 responseHandler:(CDUnknownBlockType)arg3; - (void)_invalidated; - (void)_invalidate; - (void)invalidate; - (void)_activateWithCompletion:(CDUnknownBlockType)arg1; - (void)activateWithCompletion:(CDUnknownBlockType)arg1; - (id)init; @end
metux/chromium-deb
chrome/browser/chromeos/net/wake_on_wifi_manager.cc
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chromeos/net/wake_on_wifi_manager.h" #include <memory> #include <string> #include "base/macros.h" #include "base/memory/ptr_util.h" #include "base/sys_info.h" #include "base/values.h" #include "chrome/browser/chrome_notification_types.h" #include "chrome/browser/chromeos/net/wake_on_wifi_connection_observer.h" #include "chrome/browser/gcm/gcm_profile_service_factory.h" #include "chrome/browser/profiles/profile.h" #include "chromeos/chromeos_switches.h" #include "chromeos/login/login_state.h" #include "chromeos/network/device_state.h" #include "chromeos/network/network_device_handler.h" #include "chromeos/network/network_handler.h" #include "chromeos/network/network_state_handler.h" #include "chromeos/network/network_type_pattern.h" #include "components/gcm_driver/gcm_driver.h" #include "components/gcm_driver/gcm_profile_service.h" #include "content/public/browser/browser_thread.h" #include "content/public/browser/notification_service.h" #include "content/public/browser/notification_source.h" #include "third_party/cros_system_api/dbus/service_constants.h" namespace chromeos { namespace { const char kWakeOnWifiNone[] = "none"; const char kWakeOnWifiPacket[] = "packet"; const char kWakeOnWifiDarkConnect[] = "darkconnect"; const char kWakeOnWifiPacketAndDarkConnect[] = "packet_and_darkconnect"; std::string WakeOnWifiFeatureToString( WakeOnWifiManager::WakeOnWifiFeature feature) { switch (feature) { case WakeOnWifiManager::WAKE_ON_WIFI_NONE: return kWakeOnWifiNone; case WakeOnWifiManager::WAKE_ON_WIFI_PACKET: return kWakeOnWifiPacket; case WakeOnWifiManager::WAKE_ON_WIFI_DARKCONNECT: return kWakeOnWifiDarkConnect; case WakeOnWifiManager::WAKE_ON_WIFI_PACKET_AND_DARKCONNECT: return kWakeOnWifiPacketAndDarkConnect; case WakeOnWifiManager::INVALID: return std::string(); case WakeOnWifiManager::NOT_SUPPORTED: NOTREACHED(); return std::string(); } NOTREACHED() << "Unknown wake on wifi feature: " << feature; return std::string(); } // Weak pointer. This class is owned by ChromeBrowserMainPartsChromeos. WakeOnWifiManager* g_wake_on_wifi_manager = NULL; } // namespace // static WakeOnWifiManager* WakeOnWifiManager::Get() { DCHECK(g_wake_on_wifi_manager); DCHECK_CURRENTLY_ON(content::BrowserThread::UI); return g_wake_on_wifi_manager; } // static bool WakeOnWifiManager::IsWakeOnPacketEnabled(WakeOnWifiFeature feature) { return feature & WakeOnWifiManager::WAKE_ON_WIFI_PACKET; } WakeOnWifiManager::WakeOnWifiManager() : current_feature_(WakeOnWifiManager::INVALID), wifi_properties_received_(false), extension_event_observer_(new ExtensionEventObserver()), weak_ptr_factory_(this) { // This class must be constructed before any users are logged in, i.e., before // any profiles are created or added to the ProfileManager. Additionally, // IsUserLoggedIn always returns true when we are not running on a Chrome OS // device so this check should only run on real devices. CHECK(!base::SysInfo::IsRunningOnChromeOS() || !LoginState::Get()->IsUserLoggedIn()); DCHECK(!g_wake_on_wifi_manager); DCHECK_CURRENTLY_ON(content::BrowserThread::UI); g_wake_on_wifi_manager = this; registrar_.Add(this, chrome::NOTIFICATION_PROFILE_ADDED, content::NotificationService::AllBrowserContextsAndSources()); registrar_.Add(this, chrome::NOTIFICATION_PROFILE_DESTROYED, content::NotificationService::AllBrowserContextsAndSources()); NetworkHandler::Get()->network_state_handler()->AddObserver(this, FROM_HERE); GetWifiDeviceProperties(); } WakeOnWifiManager::~WakeOnWifiManager() { DCHECK(g_wake_on_wifi_manager); DCHECK_CURRENTLY_ON(content::BrowserThread::UI); if (current_feature_ != NOT_SUPPORTED) { NetworkHandler::Get()->network_state_handler()->RemoveObserver(this, FROM_HERE); } g_wake_on_wifi_manager = NULL; } void WakeOnWifiManager::OnPreferenceChanged( WakeOnWifiManager::WakeOnWifiFeature feature) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); if (current_feature_ == NOT_SUPPORTED) return; if (!switches::WakeOnWifiEnabled()) feature = WAKE_ON_WIFI_NONE; if (feature == current_feature_) return; current_feature_ = feature; // Update value of member variable feature for all connection observers. for (const auto& kv_pair : connection_observers_) { kv_pair.second->set_feature(current_feature_); } if (wifi_properties_received_) HandleWakeOnWifiFeatureUpdated(); } bool WakeOnWifiManager::WakeOnWifiSupported() { return current_feature_ != NOT_SUPPORTED && current_feature_ != INVALID; } void WakeOnWifiManager::Observe(int type, const content::NotificationSource& source, const content::NotificationDetails& details) { switch (type) { case chrome::NOTIFICATION_PROFILE_ADDED: { OnProfileAdded(content::Source<Profile>(source).ptr()); break; } case chrome::NOTIFICATION_PROFILE_DESTROYED: { OnProfileDestroyed(content::Source<Profile>(source).ptr()); break; } default: NOTREACHED(); } } void WakeOnWifiManager::DeviceListChanged() { if (current_feature_ != NOT_SUPPORTED) GetWifiDeviceProperties(); } void WakeOnWifiManager::DevicePropertiesUpdated(const DeviceState* device) { if (device->Matches(NetworkTypePattern::WiFi()) && current_feature_ != NOT_SUPPORTED) { GetWifiDeviceProperties(); } } void WakeOnWifiManager::HandleWakeOnWifiFeatureUpdated() { const DeviceState* device = NetworkHandler::Get()->network_state_handler()->GetDeviceStateByType( NetworkTypePattern::WiFi()); if (!device) return; std::string feature_string(WakeOnWifiFeatureToString(current_feature_)); DCHECK(!feature_string.empty()); NetworkHandler::Get()->network_device_handler()->SetDeviceProperty( device->path(), shill::kWakeOnWiFiFeaturesEnabledProperty, base::Value(feature_string), base::Bind(&base::DoNothing), network_handler::ErrorCallback()); bool wake_on_packet_enabled = IsWakeOnPacketEnabled(current_feature_); for (const auto& kv_pair : connection_observers_) { Profile* profile = kv_pair.first; gcm::GCMProfileServiceFactory::GetForProfile(profile) ->driver() ->WakeFromSuspendForHeartbeat(wake_on_packet_enabled); } extension_event_observer_->SetShouldDelaySuspend(wake_on_packet_enabled); } void WakeOnWifiManager::GetWifiDeviceProperties() { const DeviceState* device = NetworkHandler::Get()->network_state_handler()->GetDeviceStateByType( NetworkTypePattern::WiFi()); if (!device) return; NetworkHandler::Get()->network_device_handler()->GetDeviceProperties( device->path(), base::Bind(&WakeOnWifiManager::GetDevicePropertiesCallback, weak_ptr_factory_.GetWeakPtr()), network_handler::ErrorCallback()); } void WakeOnWifiManager::GetDevicePropertiesCallback( const std::string& device_path, const base::DictionaryValue& properties) { std::string enabled; if (!properties.HasKey(shill::kWakeOnWiFiFeaturesEnabledProperty) || !properties.GetString(shill::kWakeOnWiFiFeaturesEnabledProperty, &enabled) || enabled == shill::kWakeOnWiFiFeaturesEnabledNotSupported) { current_feature_ = NOT_SUPPORTED; connection_observers_.clear(); NetworkHandler::Get()->network_state_handler()->RemoveObserver(this, FROM_HERE); registrar_.RemoveAll(); extension_event_observer_.reset(); return; } // We always resend the wake on wifi setting unless it hasn't been set yet. // This covers situations where shill restarts or ends up recreating the wifi // device (crbug.com/475199). if (current_feature_ != INVALID) HandleWakeOnWifiFeatureUpdated(); if (wifi_properties_received_) return; wifi_properties_received_ = true; NetworkHandler::Get() ->network_device_handler() ->RemoveAllWifiWakeOnPacketConnections(base::Bind(&base::DoNothing), network_handler::ErrorCallback()); for (const auto& kv_pair : connection_observers_) { kv_pair.second->HandleWifiDevicePropertiesReady(); } } void WakeOnWifiManager::OnProfileAdded(Profile* profile) { auto result = connection_observers_.find(profile); // Only add the profile if it is not already present. if (result != connection_observers_.end()) return; connection_observers_[profile] = base::WrapUnique(new WakeOnWifiConnectionObserver( profile, wifi_properties_received_, current_feature_, NetworkHandler::Get()->network_device_handler())); gcm::GCMProfileServiceFactory::GetForProfile(profile) ->driver() ->WakeFromSuspendForHeartbeat( IsWakeOnPacketEnabled(current_feature_)); } void WakeOnWifiManager::OnProfileDestroyed(Profile* profile) { connection_observers_.erase(profile); } } // namespace chromeos
Spoony1337/OrbitHCF
src/main/java/rip/orbit/hcteams/events/killtheking/KingEvent.java
<reponame>Spoony1337/OrbitHCF package rip.orbit.hcteams.events.killtheking; import lombok.Getter; import lombok.Setter; import org.bukkit.Bukkit; import org.bukkit.ChatColor; import org.bukkit.Material; import org.bukkit.enchantments.Enchantment; import org.bukkit.entity.Player; import org.bukkit.inventory.ItemStack; import org.bukkit.inventory.PlayerInventory; import org.bukkit.potion.PotionEffect; import org.bukkit.potion.PotionEffectType; import rip.orbit.hcteams.server.SpawnTagHandler; import rip.orbit.hcteams.util.object.ItemBuilder; public class KingEvent { @Setter private static boolean started; @Getter @Setter private static boolean scoreboardInfo = true; @Getter @Setter private static Player focusedPlayer; @Getter @Setter private static String reward; @Setter private static long time; public static long getTime() { return time - System.currentTimeMillis(); } public static void equipPlayer() { if (focusedPlayer == null || !focusedPlayer.isOnline()) { return; } PlayerInventory inventory = focusedPlayer.getInventory(); inventory.clear(); inventory.setArmorContents(null); inventory.setHelmet(new ItemBuilder(Material.DIAMOND_HELMET).enchantment(Enchantment.PROTECTION_ENVIRONMENTAL, 4).name("&c&lKing Helmet").enchantment(Enchantment.DURABILITY, 10).build()); inventory.setChestplate(new ItemBuilder(Material.DIAMOND_CHESTPLATE).enchantment(Enchantment.PROTECTION_ENVIRONMENTAL, 4).name("&c&lKing ChestPlate").enchantment(Enchantment.DURABILITY, 10).build()); inventory.setLeggings(new ItemBuilder(Material.DIAMOND_LEGGINGS).enchantment(Enchantment.PROTECTION_ENVIRONMENTAL, 4).name("&c&lKing Leggings").enchantment(Enchantment.DURABILITY, 10).build()); inventory.setBoots(new ItemBuilder(Material.DIAMOND_BOOTS).enchantment(Enchantment.PROTECTION_FALL, 5).name("&c&lKing Boots").enchantment(Enchantment.PROTECTION_ENVIRONMENTAL, 4).enchantment(Enchantment.DURABILITY, 10).build()); for (int i = 0; i < 36; i++) { inventory.addItem(new ItemBuilder(Material.POTION).data(16421).build()); } inventory.setItem(0, new ItemBuilder(Material.DIAMOND_SWORD).name("&c&lKing Sword").enchantment(Enchantment.DAMAGE_ALL, 5).enchantment(Enchantment.FIRE_ASPECT, 2).lore("&7" + focusedPlayer.getName() + '\'' + (focusedPlayer.getName().endsWith("s") ? "" : "s") + " sword").build()); inventory.setItem(1, new ItemStack(Material.ENDER_PEARL, 32)); inventory.setItem(7, new ItemBuilder(Material.GOLDEN_APPLE).amount(32).name("&6&lGapples").build()); inventory.setItem(8, new ItemBuilder(Material.STICK).name("&c&lKing Knockback").enchantment(Enchantment.KNOCKBACK, 8).build()); focusedPlayer.addPotionEffect(new PotionEffect(PotionEffectType.SPEED, Integer.MAX_VALUE, 1)); focusedPlayer.addPotionEffect(new PotionEffect(PotionEffectType.FIRE_RESISTANCE, Integer.MAX_VALUE, 0)); focusedPlayer.addPotionEffect(new PotionEffect(PotionEffectType.INCREASE_DAMAGE, Integer.MAX_VALUE, 0)); focusedPlayer.addPotionEffect(new PotionEffect(PotionEffectType.SLOW, Integer.MAX_VALUE, 0)); focusedPlayer.updateInventory(); } public static boolean isStarted(boolean ignore) { if (ignore) { return started; } return started && getReward() != null && getFocusedPlayer() != null; } public static void clean() { setStarted(false); setReward(null); if (focusedPlayer != null && focusedPlayer.isOnline()) { focusedPlayer.getActivePotionEffects().forEach(effect -> focusedPlayer.removePotionEffect(effect.getType())); focusedPlayer.getInventory().clear(); focusedPlayer.getInventory().setArmorContents(null); focusedPlayer.updateInventory(); focusedPlayer.teleport(Bukkit.getWorlds().get(0).getSpawnLocation()); SpawnTagHandler.removeTag(focusedPlayer); } setFocusedPlayer(null); setTime(0L); } public static String[] getStartedAlert() { return new String[]{ ChatColor.GRAY + "\u2588\u2588\u2588\u2588\u2588\u2588\u2588", ChatColor.GRAY + "\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588", ChatColor.GRAY + "\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588" + " " + ChatColor.DARK_RED + "[Kill The King Event]", ChatColor.GRAY + "\u2588" + ChatColor.RED + "\u2588\u2588\u2588" + ChatColor.GRAY + "\u2588\u2588\u2588", ChatColor.GRAY + "\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588" + " " + ChatColor.GOLD + "If you kill " + focusedPlayer.getName() + ',', ChatColor.GRAY + "\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588 " + ChatColor.GOLD + "you'll win the event!", ChatColor.GRAY + "\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588", ChatColor.GRAY + "\u2588\u2588\u2588\u2588\u2588\u2588\u2588" }; } static String[] getWinnerAlert(String playerName) { return new String[]{ ChatColor.GRAY + "\u2588\u2588\u2588\u2588\u2588\u2588\u2588", ChatColor.GRAY + "\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588", ChatColor.GRAY + "\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588" + " " + ChatColor.DARK_RED + "[Kill The King Event]", ChatColor.GRAY + "\u2588" + ChatColor.RED + "\u2588\u2588\u2588" + ChatColor.GRAY + "\u2588\u2588\u2588" + " " + ChatColor.YELLOW + playerName + " won the event!", ChatColor.GRAY + "\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588", ChatColor.GRAY + "\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588", ChatColor.GRAY + "\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588\u2588\u2588" + ChatColor.RED + "\u2588" + ChatColor.GRAY + "\u2588", ChatColor.GRAY + "\u2588\u2588\u2588\u2588\u2588\u2588\u2588" }; } }
Ibon2/UniTrivia_frontend
src/game/src/components/colourWheel/ColourWheel.js
<filename>src/game/src/components/colourWheel/ColourWheel.js // NOTES: // -- Array-destructuring assignment won't work w vanilla ie11; needs babel-polyfill lol import React, {Component, useState} from 'react' import Button from '@material-ui/core/Button'; import PopupState from 'material-ui-popup-state'; import Quiz from '../../Quiz/Quiz' import PropTypes from 'prop-types' // Utils: import { calculateBounds, colourToRgbObj, convertObjToString, getCasillaNumber, getCoordByCasilla, getEffectiveRadius, produceRgbShades } from '../../utils/utils' import hexStrings from '../../utils/hexStrings' import {Card, CardContent, Modal, Typography} from '@material-ui/core' import {conn} from '../../../../Play' import {getToken} from "../../../../Utils/Common"; import {green} from "@material-ui/core/colors"; import {CountdownCircleTimer} from "react-countdown-circle-timer"; import axios from "axios"; // Global-vars: const fullCircle = 2 * Math.PI const quarterCircle = fullCircle / 4 const debug = false; class ColourWheel extends Component { constructor (props) { super(props) this.state = { rgb: null, innerWheelOpen: false, centerCircleOpen: false, numPlayers: 0, positionsX: [250, 250, 250, 250, 250, 250], positionsY: [250, 250+10 ,250+20, 250+30, 250+40, 250+50], puedoMover: false, playerName: ['1','2','3','4','5','6'], dado: 0, quienSoy: 0, posiblesJugadas: null, casillaActualInfo: null, desactivado:true, open: false, imagenes: [], mostrarFicha:false, pintarQuesito:false, prueba: false, botonCerrar: true, contestada: false } // Initialised just before the DOM has loaded; after constructor(). this.outerWheelBounds = null this.innerWheelBounds = null this.centerCircleBounds = null this.outerWheelRadius = null this.innerWheelRadius = null this.centerCircleRadius = null this.firstSpacerRadius = null this.secondSpacerRadius = null // Initialised once the DOM has loaded. this.canvasEl = null this.ctx = null // Bindings: this.onCanvasHover = this.onCanvasHover.bind(this) this.onCanvasClick = this.onCanvasClick.bind(this) } // MARK - Common: getRelativeMousePos (clientX, clientY) { const { radius } = this.props const canvasPos = this.canvasEl.getBoundingClientRect() const h = radius * 2 const w = radius * 2 // evtPos relative to our canvas. const onCanvas = { x: clientX - canvasPos.left, y: clientY - canvasPos.top } // e is our mouse-position relative to the center of the canvasEl; using pythag const fromCenter = Math.sqrt( (onCanvas.x - w / 2) * (onCanvas.x - w / 2) + (onCanvas.y - h / 2) * (onCanvas.y - h / 2) ) // This returns an object in which we have both mouse-pos relative to the canvas, as well as the true-middle. return { fromCenter, onCanvas } } initCanvas () { this.state.numPlayers=this.props.numPlayers const { radius } = this.props const width = radius * 2 const height = radius * 2 this.ctx.clearRect(0, 0, width, height) this.drawOuterWheel(1) this.drawSpacers() //this.drawCenterCircle() //this.drawPlayers() } // MARK - Life-cycle methods: componentWillMount () { const { radius, lineWidth, padding } = this.props // Setting effective radii: this.outerWheelRadius = radius this.innerWheelRadius = this.outerWheelRadius - lineWidth - padding this.centerCircleRadius = this.innerWheelRadius - lineWidth - padding this.firstSpacerRadius = this.outerWheelRadius - lineWidth // NOTE: effectiveRadius will take into account padding as lineWidth. this.secondSpacerRadius = this.innerWheelRadius - lineWidth // Defining our bounds-objects, exposes a .inside(e) -> boolean method: this.outerWheelBounds = calculateBounds(radius - lineWidth, radius) this.innerWheelBounds = calculateBounds( this.innerWheelRadius - this.centerCircleRadius, this.innerWheelRadius ) this.centerCircleBounds = calculateBounds(0, this.centerCircleRadius) this.firstSpacerBounds = calculateBounds( this.firstSpacerRadius - padding, this.firstSpacerRadius ) this.secondSpacerBounds = calculateBounds( this.secondSpacerRadius - padding, this.secondSpacerRadius ) } componentDidMount () { // Giving this context to our parent component. this.props.onRef(this) // Initialising our canvas & context objs. this.canvasEl = document.getElementById('colour-picker') this.ctx = this.canvasEl.getContext('2d') if (this.props.preset) { const rgb = colourToRgbObj(this.props.presetColour) this.setState({ rgb }, () => { this.drawInnerWheel()//borrar lo anterior this.drawOuterWheel(1)//dibujar rueda this.drawRadius()//dibujar radios this.drawSpacers()//dibujar spacer this.drawCenterCircle()//dibujar circulo central con los jugadores }) } else { this.drawOuterWheel(1) this.drawSpacers() } /*conn.on('comienzoPartida', () => { setTimeout(()=>{this.comienzo()}, 100);a })*/ conn.on("jugada",(res)=>{ let indice=0; for(var i=0;i<this.state.numPlayers;i++){ if(this.state.playerName[i]===res.user){ indice=i } } let coords=getCoordByCasilla(res.casilla,indice) const vecx=this.state.positionsX; vecx[indice]=coords.x; const vecy=this.state.positionsY; vecy[indice]=coords.y; this.setState({positionsX: vecx,positionsY: vecy}) this.inicializarTablero() this.props.onResponse({quesito: res.ques, user: res.user}); }) } /*comienzo(){ console.log("Comienza la partida"); let quienSoy=0 for(var i=0;i<JSON.parse(getPlayers()).length;i++){ if(JSON.parse(getPlayers())[i]===getUser()){ quienSoy=i } } this.setValores(JSON.parse(getPlayers()),JSON.parse(getPlayers()).length,quienSoy) console.log('dibujand') //this.drawCenterCircle() this.inicializarTablero() }*/ componentWillUnmount () { this.props.onRef(undefined) } // MARK - mouse-events: onCanvasHover ({ clientX, clientY }) { const evt = this.getRelativeMousePos(clientX, clientY) // Cases for mouse-location: if (this.outerWheelBounds.inside(evt.fromCenter)) { this.canvasEl.style.cursor = 'crosshair' } else if ( this.innerWheelBounds.inside(evt.fromCenter) && this.state.innerWheelOpen ) { this.canvasEl.style.cursor = 'crosshair' } else if ( this.centerCircleBounds.inside(evt.fromCenter) && this.state.centerCircleOpen ) { // TODO: Have it clear on click? this.canvasEl.style.cursor = 'pointer' } else { this.canvasEl.style.cursor = 'auto' } } onCanvasClick ({ clientX, clientY }) { const evt = this.getRelativeMousePos(clientX, clientY) // Cases for click-events: if (this.outerWheelBounds.inside(evt.fromCenter)) { this.outerWheelClicked(evt.onCanvas) } else if ( this.innerWheelBounds.inside(evt.fromCenter) ) { this.innerWheelClicked(evt.onCanvas) } } // MARK - Clicks & action methods: pregunta=true outerWheelClicked (evtPos) { const { radius,colours } = this.props // returns an rgba array of the pixel-clicked. const rgbaArr = this.ctx.getImageData(evtPos.x, evtPos.y, 1, 1).data const [r, g, b,opac] = rgbaArr const rgb = { r, g, b } // Whether the user wants rgb-strings or rgb objects returned. const rgbArg = convertObjToString(rgb) // TODO: Let user set different return values in props; e.g. rbg obj, string, etc. this.pregunta=false; this.props.onColourSelected(rgbArg) let opa /*if (r == 255 && g == 255 && b == 255) { opa = 0.1 } else { opa = 1 }*/ opa=1 this.state.desactivado=false if(opac===255 && this.state.puedoMover===true){ for (let j=0; j<this.state.posiblesJugadas.length; j++){ var arrayPosiblesJugadas=this.state.posiblesJugadas if(arrayPosiblesJugadas[j].casilla.num===getCasillaNumber(r, g, b)){ this.state.casillaActualInfo=this.state.posiblesJugadas[j]; this.setState({casillaActualInfo: arrayPosiblesJugadas[j]}) } } if(this.state.casillaActualInfo.casilla.tipo==="Dado"){ this.props.activarDado(); this.state.desactivado=true this.state.puedoMover=true; conn.emit("actualizarJugada", {casilla: this.state.casillaActualInfo.casilla.num, quesito: "", finTurno: false , }, (res)=>{ //console.log("Jugada actualizada: " + res['res'] + " " + res['info']); this.props.activarDado(); }); }else{ this.state.puedoMover=false; this.handleOpen() } this.drawInnerWheel() this.drawOuterWheel(opa) this.changePosition(evtPos.x, evtPos.y,this.state.quienSoy) this.drawRadius() this.drawSpacers() this.drawCenterCircle() } this.setState( { rgb, innerWheelOpen: true, centerCircleOpen: false, }, () => { /*if(opac===255 && this.state.puedoMover===true){ for (let j=0; j<this.state.posiblesJugadas.length; j++){ console.log('nume'+this.state.posiblesJugadas[j].casilla.num) if(this.state.posiblesJugadas[j].casilla.num===getCasillaNumber(r, g, b)){ this.state.casillaActualInfo=this.state.posiblesJugadas[j]; } } if(this.state.casillaActualInfo.casilla.tipo==="Dado"){ this.state.puedoMover=true; conn.emit("actualizarJugada", {casilla: this.state.casillaActualInfo.casilla.num, quesito: "", finTurno: false , }, (res)=>{ console.log("Jugada actualizada: " + res['res'] + " " + res['info']); }); }else{ this.state.puedoMover=false; } this.drawInnerWheel() this.drawOuterWheel(opa) this.changePosition(evtPos.x, evtPos.y) this.drawRadius() this.drawSpacers() this.drawCenterCircle() }*/ } ) } innerWheelClicked (evtPos) { const rgbaArr = this.ctx.getImageData(evtPos.x, evtPos.y, 1, 1).data const [r, g, b,opac] = rgbaArr const rgb = { r, g, b } const rgbArg = convertObjToString(rgb) this.props.onColourSelected(rgbArg) this.state.desactivado=false this.setState( { rgb, centerCircleOpen: true }, () => { } ) if(opac===255 && this.state.puedoMover===true ){ this.state.puedoMover=false; this.handleOpen() for (let j=0; j<this.state.posiblesJugadas.length; j++){ if(this.state.posiblesJugadas[j].casilla.num===getCasillaNumber(r, g, b)){ this.state.casillaActualInfo=this.state.posiblesJugadas[j]; } } this.drawInnerWheel() this.drawOuterWheel(1) this.changePosition(evtPos.x, evtPos.y,this.state.quienSoy) this.drawRadius() this.drawSpacers() this.drawCenterCircle() } } clear (callback = false) { this.setState( { rgb: '#ffffff', innerWheelOpen: false, centerCircleOpen: false }, () => { // Reset state & re-draw. this.initCanvas() this.drawCenterCircle() if (callback) callback() } ) } dibujarTablero(casillasMarcadas=[]){ // Reset state & re-draw. //this.initCanvas() this.drawInnerWheel() const num1=Math.floor(Math.random() * 24) + 1; const num2 = Math.floor(Math.random() * 24) + 1; this.drawOuterWheel(1,casillasMarcadas) this.drawRadius(0.1,casillasMarcadas) this.drawSpacers() this.drawCenterCircle() //this.drawPlayers()//añadido } jugada (dado,callback = false) { this.setState( { rgb: '#ffffff', innerWheelOpen: true, centerCircleOpen: false, }, () => { //dibujar tablero //this.drawPosition() if (callback) callback() } ) conn.emit("posiblesJugadas", dado, (res)=>{ this.state.posiblesJugadas=res['info']; if(res['res']!=='err'){ if(res['info']==="No es el turno."){ this.state.puedoMover=false; alert('No es tu turno') }else{ this.state.puedoMover=true; } let casillas=[]; for(var i=0;i<res['info'].length;i++){ casillas[i]=res['info'][i].casilla.num } if(this.state.puedoMover){//si es mi turno this.dibujarTablero(casillas) } }else{ if(res['info']==="No es el turno."){ this.state.puedoMover=false; alert('No es tu turno') } } this.props.desactivarDado(); }) } inicializarTablero(){ this.drawInnerWheel()//borrar lo anterior this.drawOuterWheel(1)//dibujar rueda this.drawRadius()//dibujar radios this.drawSpacers()//dibujar spacer this.drawCenterCircle()//dibujar circulo central con los jugadores //this.drawPlayers() } setQuienSoy(quiensoy){ this.setState({quiensoy: quiensoy}); } setValores(players,numplayers,quiensoy){ let imgs=[] let playernames=[] for(var i=0;i<numplayers;i++){ playernames.push(players[i].nombre) imgs.push(players[i].ficha) } /*this.state.playerName=playernames this.state.numPlayers=numplayers this.state.numPlayers=numplayers*/ this.setState({quienSoy: quiensoy}) this.setState({playerName:playernames,numPlayers:numplayers}) this.setState({imagenes: imgs}) } iniciarPartida (callback = false) { const { radius } = this.props this.setState( { rgb: '#ffffff', innerWheelOpen: true, centerCircleOpen: false, }, () => { // Reset state & re-draw. } ) //this.inicializarTablero() } cargarPartida(casillas,quiensoy){ for(var i=0; i< casillas.length;i++){ let coords=getCoordByCasilla(casillas[i],i) //this.state.positionsX[player]=coords.x; //this.state.positionsY[player]=coords.y; const vecx=this.state.positionsX; vecx[i]=coords.x; const vecy=this.state.positionsY; vecy[i]=coords.y; this.setState({positionsX: vecx,positionsY: vecy}) } this.inicializarTablero() } // MARK - Drawing: drawOuterWheel (opa,casillasMarcadas) { // TODO: Draw outline; separate method. const { radius, colours, lineWidth } = this.props const height = radius * 2 const width = radius * 2 // This value ensures that the stroke accounts for the lineWidth provided to produce an accurately represented radius. const effectiveRadius = getEffectiveRadius(radius, lineWidth) // Converting each colour into a relative rgb-object we can iterate through. const rgbArr = colours.map((colour) => colourToRgbObj(colour)) rgbArr.forEach((rgb, i) => { this.ctx.beginPath() // Creates strokes 1 / rgbArr.length of the circle circumference. const startAngle = (fullCircle / rgbArr.length) * i const endAngle = (fullCircle / rgbArr.length) * (i + 1) this.ctx.arc( width / 2, height / 2, effectiveRadius, startAngle + 3, endAngle + 3 ) this.ctx.lineWidth = lineWidth // This is the width of the innerWheel. //this.ctx.fillText('holaas dfa sdf',50+i*5 ,50+i*5); // Stroke-style changes based on the shade: //this.ctx.strokeText('h',50+i*5 ,50+i*5) ; //rgb.r=rgb.r-100; if (casillasMarcadas != null) { let op = 0.1 casillasMarcadas.forEach((val, j) => { if (getCasillaNumber(rgb.r, rgb.g, rgb.b) === (val)) { op = 1 } this.ctx.strokeStyle = `rgb(${rgb.r}, ${rgb.g}, ${rgb.b},${op})` }) if(op!=1){ this.ctx.strokeStyle = `rgb(${rgb.r}, ${rgb.g}, ${rgb.b},${op})` } }else{ this.ctx.strokeStyle = `rgb(${rgb.r}, ${rgb.g}, ${rgb.b},${opa})` } this.ctx.stroke() this.ctx.closePath() }) } drawSpacers () { if (this.props.spacers) { this.drawSpacer(this.firstSpacerRadius) //this.drawSpacer(this.secondSpacerRadius) } } drawSpacer (spacerRadius) { const { radius, padding, spacers: { colour, shadowColour, shadowBlur } } = this.props const height = radius * 2 const width = radius * 2 const effectiveRadius = getEffectiveRadius(spacerRadius, padding) this.ctx.beginPath() this.ctx.arc(width / 2, height / 2, effectiveRadius, 0, fullCircle) this.ctx.lineWidth = padding this.ctx.shadowColor = shadowColour this.ctx.shadowBlur = shadowBlur this.ctx.strokeStyle = colour this.ctx.stroke() this.ctx.closePath() // To reset our shadowColor for other strokes. this.ctx.shadowColor = 'transparent' } drawRadius (opa,casillasMarcadas=[]) { this.drawRad(-27,-90,0,['#ff6403','#ecd703','#0091df'],opa,casillasMarcadas)//radio 0 this.drawRad(27,90,180,['#ecd704','#ff00ec','#0091de'],opa,casillasMarcadas)//radio 3 this.drawRad(65,-70,60,['#ff00ed','#008810','#0091dd'],opa,casillasMarcadas)//radio 1 this.drawRad(94,20,120,['#3b3884','#ff00ee','#ff6404'],opa,casillasMarcadas)//radio 2 this.drawRad(-65,70,240,['#ff6405','#3b3883','#00880f'],opa,casillasMarcadas)//radio 4 this.drawRad(-94,-20,-60,['#00880e','#3b3882','#ff00ef'],opa,casillasMarcadas)//radio 5 } drawRad (xs,ys,angle,colors,opa=1,casillasMarcadas=[]) { // raf setup. const { radius } = this.props this.ctx.beginPath() //65,-70 y 60º //90,27 y 120º //-65,70 y 240 //-90,-27 y -60 let x = [xs]//-27,27, let y = [ys]//-90,90 this.ctx.translate(radius + x[0], radius + y[0]); this.ctx.rotate(angle * Math.PI/180 ); this.ctx.translate(-(radius + x[0]),-( radius + y[0])); for (var i=0;i<3;i++){ const rgb = colourToRgbObj(colors[i]) if (casillasMarcadas.length != 0) { let marcada=0 casillasMarcadas.forEach((val, j) => { if (getCasillaNumber(rgb.r, rgb.g, rgb.b) === (val)) { this.ctx.fillStyle = `rgb(${rgb.r},${rgb.g},${rgb.b},${1})` marcada=1 } }) if(marcada!=1){ this.ctx.fillStyle = `rgb(${rgb.r}, ${rgb.g}, ${rgb.b},${0.1})` } }else{ this.ctx.fillStyle = `rgb(${rgb.r}, ${rgb.g}, ${rgb.b},${opa})` } this.ctx.fillRect( radius + x[0], radius + y[0]-50*i, 50, 50 ); this.ctx.stroke() } this.ctx.setTransform(1, 0, 0, 1, 0, 0); this.ctx.closePath() } changePosition(x,y,player=0) { let coords=getCoordByCasilla(this.state.casillaActualInfo.casilla.num,player) //this.state.positionsX[player]=coords.x; //this.state.positionsY[player]=coords.y; const vecx=this.state.positionsX; vecx[player]=coords.x; const vecy=this.state.positionsY; vecy[player]=coords.y; this.setState({positionsX: vecx,positionsY: vecy}) } drawInnerWheel (animationPercentage = 0) { // raf setup. let requestAnimationFrame = window.requestAnimationFrame || window.mozRequestAnimationFrame || window.webkitRequestAnimationFrame || window.msRequestAnimationFrame window.requestAnimationFrame = requestAnimationFrame const { rgb: { r, g, b } } = this.state const { radius, lineWidth, shades, animated } = this.props const height = radius * 2 const width = radius * 2 const effectiveRadius = getEffectiveRadius( this.innerWheelRadius, lineWidth ) // Re-initialising canvas. this.ctx.clearRect(0, 0, width, height) //this.drawOuterWheel(1); //this.drawRadius(); //this.drawSpacers() const rgbShades = produceRgbShades(r, g, b, shades) // Different functions for drawing our inner-wheel of shades. function drawShades () { rgbShades.forEach((rgb, i) => { this.ctx.beginPath() const startAngle = (fullCircle / rgbShades.length) * i + quarterCircle const endAngle = (fullCircle / rgbShades.length) * (i + 1) + (1 / 2) * Math.PI this.ctx.arc( width / 2, height / 2, effectiveRadius, startAngle, endAngle ) this.ctx.lineWidth = lineWidth // This is the width of the innerWheel. // Stroke style changes based on the shade: this.ctx.strokeStyle = `rgb(${rgb.r}, ${rgb.g}, ${rgb.b})` this.ctx.stroke() this.ctx.closePath() }) } function animateShades () { rgbShades.forEach((rgb, i) => { this.ctx.beginPath() const startAngle = (fullCircle / rgbShades.length) * i + quarterCircle const endAngle = (fullCircle / rgbShades.length) * (i + 1) + (1 / 2) * Math.PI this.ctx.arc( width / 2, height / 2, effectiveRadius, startAngle, endAngle ) this.ctx.lineWidth = lineWidth * animationPercentage // This is the width of the innerWheel. // Stroke style changes based on the shade: this.ctx.strokeStyle = `rgb(${rgb.r}, ${rgb.g}, ${rgb.b})` this.ctx.stroke() this.ctx.closePath() }) // TODO: Make this animation speed dynamic. animationPercentage += 1 / 10 // i.e. 1 / x frames // Essentially re-draws rgbShades.forEach until the animationPercentage reaches 1, i.e. 100% if (animationPercentage < 1) requestAnimationFrame(animateShades) } animateShades = animateShades.bind(this) drawShades = drawShades.bind(this) if (animated) { //animateShades() } else { // TODO: Refactor into its own func. //drawShades() } } drawCenterCircle () { const { rgb } = this.state const { radius } = this.props const height = radius * 2 const width = radius * 2 this.ctx.lineWidth = 0 this.ctx.beginPath() this.ctx.arc( width / 2, height / 2, 50, 0, 2 * Math.PI ) this.ctx.fillStyle = `rgb(${255}, ${255}, ${255})` this.ctx.fill() this.ctx.lineWidth = 0.1 this.ctx.strokeStyle = `rgb(${rgb.r}, ${rgb.g}, ${rgb.b})` this.ctx.stroke() this.ctx.closePath() this.drawPlayers() } cargarImagen(){ let images=[] for(var i=0; i<this.state.imagenes.length;i++){ const imageObj1 = new Image(); imageObj1.onload = ()=>{ this.setState({prueba: !this.state.prueba}) }; imageObj1.src= '/images/fichas/'+this.state.imagenes[i]+'.png'; images.push(imageObj1) } //imageObj1.src= 'http://i.stack.imgur.com/h5RjZ.png'; //imageObj1.src= '/images/fichas/ficha0.png'; return images } drawPlayers () { const { radius } = this.props const height = radius * 2 const width = radius * 2 //this.ctx.beginPath() this.ctx.beginPath() this.ctx.fillStyle = `rgb(${0}, ${0}, ${0})` //const imageObj1 = new Image(); //imageObj1.src= 'http://i.stack.imgur.com/h5RjZ.png'; //imageObj1.src= '/images/avatars/avatar_6.png'; //const imageObj1=this.cargarImagen() let images=[] for(var i=0; i<this.state.imagenes.length;i++){ const imageObj1 = new Image(); imageObj1.onload = ()=>{ this.setState({prueba: !this.state.prueba}) }; imageObj1.src= '/images/fichas/'+this.state.imagenes[i]+'.png'; images.push(imageObj1) } for(var i=0;i<this.state.numPlayers;i++){ //console.log(this.state.playerName[i]) this.ctx.fillText( this.state.playerName[i], this.state.positionsX[i]+25, this.state.positionsY[i]+12.5 ) //imageObj1.crossOrigin = "Anonymous"; this.ctx.drawImage(images[i],this.state.positionsX[i],this.state.positionsY[i],25,25) } //this.ctx.stroke() this.ctx.closePath() } getPosiblesJugadas(){ return this.state.casillaActualInfo } handleResponseFromQuiz= (response)=>{ this.setState({botonCerrar:false}) this.setState({pintarQuesito:true}) conn.emit("actualizarJugada", {casilla: response.casillaInfo.casilla.num, quesito: response.casillaInfo.casilla.tipo==="Quesito"&&response.result===1?response.casillaInfo.casilla.categoria:"", finTurno: response.result===0 //?true:false , }, (res)=>{ console.assert(!debug, "Jugada actualizada: " + res['res'] + " " + res['info']); console.assert(!debug, res['info']); }); if(response.result===1){ this.props.activarDado(); axios.post('https://unitrivia.herokuapp.com/api/tienda/insertarMonedas',{},{headers: { cantidad: 1,jwt: getToken() }}).then((response) => { }).catch((code) => { console.assert(!debug, code.response) }); } if(response.result===1 && response.casillaInfo.casilla.tipo==="Quesito"){ this.props.onResponse({quesito: response.casillaInfo.casilla.categoria,user: this.props.username}); } this.setState({contestada: true}) //this.handleClose() } handleOpen = () => { this.state.open=true; }; handleClose = () => { this.setState({open: false}) this.setState({botonCerrar:true}) this.setState({contestada: false}) }; getOpen(){ return this.state.open } render () { const { radius, dynamicCursor } = this.props return dynamicCursor ? ( <div> <canvas id="colour-picker" onClick={this.onCanvasClick} onMouseMove={this.onCanvasHover} width={`${radius * 2}px`} height={`${radius * 2}px`} /> <div> <PopupState variant="popover" popupId="demo-popup-popover"> {(popupState) => ( <div> <Modal open={this.getOpen()} onClose={this.handleClose} aria-labelledby="simple-modal-title" aria-describedby="simple-modal-description" disableBackdropClick={this.getOpen()} > <Card style={{ color: green[500] }} > <CardContent> <Typography align={'left'}>Responda a la pregunta.</Typography> <div align={'right'}> <CountdownCircleTimer onComplete={() => { if(!this.state.contestada){ conn.emit("actualizarJugada", {casilla: this.state.casillaActualInfo.casilla.num, quesito: "", finTurno: true , }, (res)=>{ console.assert(!debug, "Jugada actualizada: " + res['res'] + " " + res['info']); this.props.activarDado(); }); } this.handleClose() }} isPlaying duration={60} initialRemainingTime={60} colors="#A30000" size={80} /> </div> <Quiz pregunta={this.getPosiblesJugadas()} onResponse={this.handleResponseFromQuiz.bind(this)} > </Quiz> <Button onClick={this.handleClose} style={{color: 'red'}} disabled={this.state.botonCerrar}>CERRAR</Button> </CardContent> </Card> </Modal> </div> )} </PopupState> </div> </div> ) : ( <canvas id="colour-picker" onClick={this.onCanvasClick} width={`${radius * 2}px`} height={`${radius * 2}px`} /> ) } } ColourWheel.propTypes = { playerName: PropTypes.array, numPlayers: PropTypes.number, radius: PropTypes.number.isRequired, lineWidth: PropTypes.number.isRequired, colours: PropTypes.array, shades: PropTypes.number, padding: PropTypes.number, dynamicCursor: PropTypes.bool, spacers: PropTypes.object, onColourSelected: PropTypes.func, preset: PropTypes.bool // presetColour: PropTypes.string } ColourWheel.defaultProps = { colours: hexStrings, shades: 16, padding: 0, dynamicCursor: true, preset: false, animate: false } export default ColourWheel
esotericist/TIS-3D
src/main/java/li/cil/tis3d/util/TooltipUtils.java
package li.cil.tis3d.util; import net.minecraft.item.ItemStack; import net.minecraft.util.text.*; import java.util.List; public final class TooltipUtils { public static void tryAddDescription(final ItemStack stack, final List<ITextComponent> tooltip) { if (stack.isEmpty()) { return; } final String translationKey = stack.getDescriptionId() + ".desc"; final LanguageMap languagemap = LanguageMap.getInstance(); if (languagemap.has(translationKey)) { final TranslationTextComponent description = new TranslationTextComponent(translationKey); tooltip.add(makeGray(description)); } } private static IFormattableTextComponent makeGray(final IFormattableTextComponent text) { return text.withStyle(s -> s.withColor(TextFormatting.GRAY)); } }
iridium-browser/iridium-browser
chrome/browser/safe_browsing/download_protection/deep_scanning_browsertest.cc
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <memory> #include "base/base64.h" #include "base/path_service.h" #include "base/test/metrics/histogram_tester.h" #include "base/test/scoped_feature_list.h" #include "base/threading/thread_restrictions.h" #include "base/values.h" #include "chrome/browser/browser_process.h" #include "chrome/browser/download/download_prefs.h" #include "chrome/browser/enterprise/connectors/connectors_service.h" #include "chrome/browser/extensions/api/safe_browsing_private/safe_browsing_private_event_router.h" #include "chrome/browser/extensions/api/safe_browsing_private/safe_browsing_private_event_router_factory.h" #include "chrome/browser/policy/dm_token_utils.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/safe_browsing/cloud_content_scanning/binary_fcm_service.h" #include "chrome/browser/safe_browsing/cloud_content_scanning/binary_upload_service.h" #include "chrome/browser/safe_browsing/cloud_content_scanning/binary_upload_service_factory.h" #include "chrome/browser/safe_browsing/cloud_content_scanning/deep_scanning_browsertest_base.h" #include "chrome/browser/safe_browsing/cloud_content_scanning/deep_scanning_test_utils.h" #include "chrome/browser/safe_browsing/download_protection/ppapi_download_request.h" #include "chrome/browser/safe_browsing/test_safe_browsing_service.h" #include "chrome/browser/ui/browser.h" #include "chrome/common/chrome_paths.h" #include "chrome/common/pref_names.h" #include "chrome/test/base/in_process_browser_test.h" #include "chrome/test/base/ui_test_utils.h" #include "components/autofill/core/common/mojom/autofill_types.mojom-shared.h" #include "components/download/public/common/download_danger_type.h" #include "components/enterprise/common/proto/connectors.pb.h" #include "components/policy/core/common/cloud/mock_cloud_policy_client.h" #include "components/prefs/pref_service.h" #include "components/safe_browsing/core/common/safe_browsing_prefs.h" #include "components/safe_browsing/core/db/test_database_manager.h" #include "components/safe_browsing/core/features.h" #include "components/safe_browsing/core/proto/csd.pb.h" #include "components/signin/public/identity_manager/identity_test_environment.h" #include "content/public/browser/browser_context.h" #include "content/public/browser/download_manager.h" #include "content/public/test/browser_test.h" #include "content/public/test/download_test_observer.h" #include "content/public/test/test_utils.h" #include "services/network/test/test_utils.h" namespace safe_browsing { namespace { constexpr char kUserName[] = "<EMAIL>"; // Extract the metadata proto from the raw request string. Returns true on // success. bool GetUploadMetadata( const std::string& upload_request, enterprise_connectors::ContentAnalysisRequest* out_proto) { // The request is of the following format, see multipart_uploader.h for // details: // ---MultipartBoundary--- // <Headers for metadata> // // <Base64-encoded metadata> // ---MultipartBoundary--- // <Headers for uploaded data> // // <Uploaded data> // ---MultipartBoundary--- size_t boundary_end = upload_request.find("\r\n"); std::string multipart_boundary = upload_request.substr(0, boundary_end); size_t headers_end = upload_request.find("\r\n\r\n"); size_t metadata_end = upload_request.find("\r\n" + multipart_boundary, headers_end); std::string encoded_metadata = upload_request.substr(headers_end + 4, metadata_end - headers_end - 4); std::string serialized_metadata; base::Base64Decode(encoded_metadata, &serialized_metadata); return out_proto->ParseFromString(serialized_metadata); } } // namespace class FakeBinaryFCMService : public BinaryFCMService { public: FakeBinaryFCMService() {} void GetInstanceID(GetInstanceIDCallback callback) override { std::move(callback).Run("test_instance_id"); } void UnregisterInstanceID(const std::string& token, UnregisterInstanceIDCallback callback) override { // Always successfully unregister. std::move(callback).Run(true); } }; // Integration tests for download deep scanning behavior, only mocking network // traffic and FCM dependencies. class DownloadDeepScanningBrowserTestBase : public DeepScanningBrowserTestBase, public content::DownloadManager::Observer, public download::DownloadItem::Observer { public: // |connectors_machine_scope| indicates whether the Connector prefs such as // OnFileDownloadedEnterpriseConnector and OnSecurityEventEnterpriseConnector // should be set at the machine or user scope. explicit DownloadDeepScanningBrowserTestBase(bool connectors_machine_scope) : connectors_machine_scope_(connectors_machine_scope) { if (!connectors_machine_scope) { scoped_feature_list_.InitAndEnableFeature( enterprise_connectors::kPerProfileConnectorsEnabled); } } void OnDownloadCreated(content::DownloadManager* manager, download::DownloadItem* item) override { item->AddObserver(this); download_items_.insert(item); } void OnDownloadDestroyed(download::DownloadItem* item) override { download_items_.erase(item); } void SetUpReporting() { SetOnSecurityEventReporting(browser()->profile()->GetPrefs(), /*enabled*/ true, /*enabled_event_names*/ {}, connectors_machine_scope()); client_ = std::make_unique<policy::MockCloudPolicyClient>(); client_->SetDMToken("dm_token"); #if BUILDFLAG(IS_CHROMEOS_ASH) extensions::SafeBrowsingPrivateEventRouterFactory::GetForProfile( browser()->profile()) ->SetBrowserCloudPolicyClientForTesting(client_.get()); #else if (connectors_machine_scope()) { extensions::SafeBrowsingPrivateEventRouterFactory::GetForProfile( browser()->profile()) ->SetBrowserCloudPolicyClientForTesting(client_.get()); } else { extensions::SafeBrowsingPrivateEventRouterFactory::GetForProfile( browser()->profile()) ->SetProfileCloudPolicyClientForTesting(client_.get()); } #endif identity_test_environment_ = std::make_unique<signin::IdentityTestEnvironment>(); identity_test_environment_->MakePrimaryAccountAvailable(kUserName); extensions::SafeBrowsingPrivateEventRouterFactory::GetForProfile( browser()->profile()) ->SetIdentityManagerForTesting( identity_test_environment_->identity_manager()); } policy::MockCloudPolicyClient* client() { return client_.get(); } protected: void SetUp() override { test_sb_factory_ = std::make_unique<TestSafeBrowsingServiceFactory>(); test_sb_factory_->UseV4LocalDatabaseManager(); SafeBrowsingService::RegisterFactory(test_sb_factory_.get()); InProcessBrowserTest::SetUp(); } void TearDown() override { InProcessBrowserTest::TearDown(); SafeBrowsingService::RegisterFactory(nullptr); } void SetUpOnMainThread() override { embedded_test_server()->ServeFilesFromDirectory(GetTestDataDirectory()); ASSERT_TRUE(embedded_test_server()->Start()); SetBinaryUploadServiceTestFactory(); SetUrlLoaderInterceptor(); ObserveDownloadManager(); AuthorizeForDeepScanning(); #if BUILDFLAG(IS_CHROMEOS_ASH) SetDMTokenForTesting( policy::DMToken::CreateValidTokenForTesting("dm_token")); #else if (connectors_machine_scope()) { SetDMTokenForTesting( policy::DMToken::CreateValidTokenForTesting("dm_token")); } else { SetProfileDMToken(browser()->profile(), "dm_token"); } #endif SetAnalysisConnector(browser()->profile()->GetPrefs(), enterprise_connectors::FILE_DOWNLOADED, R"({ "service_provider": "google", "enable": [ { "url_list": ["*"], "tags": ["dlp", "malware"] } ], "block_password_protected": true })", connectors_machine_scope()); } void WaitForDownloadToFinish() { content::DownloadManager* download_manager = content::BrowserContext::GetDownloadManager(browser()->profile()); content::DownloadTestObserverTerminal observer( download_manager, 1, content::DownloadTestObserver::ON_DANGEROUS_DOWNLOAD_QUIT); observer.WaitForFinished(); } void WaitForDeepScanRequest(bool is_advanced_protection) { if (is_advanced_protection) waiting_for_app_ = true; else waiting_for_enterprise_ = true; base::RunLoop run_loop(base::RunLoop::Type::kNestableTasksAllowed); waiting_for_upload_closure_ = run_loop.QuitClosure(); run_loop.Run(); waiting_for_app_ = false; waiting_for_enterprise_ = false; } void WaitForMetadataCheck() { base::RunLoop run_loop(base::RunLoop::Type::kNestableTasksAllowed); waiting_for_metadata_closure_ = run_loop.QuitClosure(); run_loop.Run(); } void ExpectMetadataResponse(const ClientDownloadResponse& response) { test_sb_factory_->test_safe_browsing_service() ->GetTestUrlLoaderFactory() ->AddResponse(PPAPIDownloadRequest::GetDownloadRequestUrl().spec(), response.SerializeAsString()); } void ExpectContentAnalysisSynchronousResponse( bool is_advanced_protection, const enterprise_connectors::ContentAnalysisResponse& response, const std::vector<std::string>& tags) { connector_url_ = "https://safebrowsing.google.com/safebrowsing/uploads/" "scan?device_token=dm_token&connector=OnFileDownloaded"; for (const std::string& tag : tags) connector_url_ += ("&tag=" + tag); test_sb_factory_->test_safe_browsing_service() ->GetTestUrlLoaderFactory() ->AddResponse(connector_url_, response.SerializeAsString()); } base::FilePath GetTestDataDirectory() { base::FilePath test_file_directory; base::PathService::Get(chrome::DIR_TEST_DATA, &test_file_directory); return test_file_directory; } FakeBinaryFCMService* binary_fcm_service() { return binary_fcm_service_; } TestSafeBrowsingServiceFactory* test_sb_factory() { return test_sb_factory_.get(); } const enterprise_connectors::ContentAnalysisRequest& last_app_request() const { return last_app_request_; } const enterprise_connectors::ContentAnalysisRequest& last_enterprise_request() const { return last_enterprise_request_; } const base::flat_set<download::DownloadItem*>& download_items() { return download_items_; } void SetBinaryUploadServiceTestFactory() { BinaryUploadServiceFactory::GetInstance()->SetTestingFactory( browser()->profile(), base::BindRepeating( &DownloadDeepScanningBrowserTestBase::CreateBinaryUploadService, base::Unretained(this))); } void ObserveDownloadManager() { content::DownloadManager* download_manager = content::BrowserContext::GetDownloadManager(browser()->profile()); download_manager->AddObserver(this); } void SetUrlLoaderInterceptor() { test_sb_factory()->test_safe_browsing_service()->SetUseTestUrlLoaderFactory( true); test_sb_factory() ->test_safe_browsing_service() ->GetTestUrlLoaderFactory() ->SetInterceptor(base::BindRepeating( &DownloadDeepScanningBrowserTestBase::InterceptRequest, base::Unretained(this))); } template <typename T> void SendFcmMessage(const T& response) { std::string encoded_proto; base::Base64Encode(response.SerializeAsString(), &encoded_proto); gcm::IncomingMessage gcm_message; gcm_message.data["proto"] = encoded_proto; binary_fcm_service()->OnMessage("app_id", gcm_message); } void AuthorizeForDeepScanning() { BinaryUploadServiceFactory::GetForProfile(browser()->profile()) ->SetAuthForTesting("dm_token", /*authorized=*/true); } bool connectors_machine_scope() const { return connectors_machine_scope_; } private: std::unique_ptr<KeyedService> CreateBinaryUploadService( content::BrowserContext* browser_context) { std::unique_ptr<FakeBinaryFCMService> binary_fcm_service = std::make_unique<FakeBinaryFCMService>(); binary_fcm_service_ = binary_fcm_service.get(); Profile* profile = Profile::FromBrowserContext(browser_context); return std::make_unique<BinaryUploadService>( g_browser_process->safe_browsing_service()->GetURLLoaderFactory(), profile, std::move(binary_fcm_service)); } void InterceptRequest(const network::ResourceRequest& request) { if (request.url == BinaryUploadService::GetUploadUrl(/*is_consumer_scan_eligible=*/true)) { ASSERT_TRUE(GetUploadMetadata(network::GetUploadData(request), &last_app_request_)); if (waiting_for_app_) std::move(waiting_for_upload_closure_).Run(); } if (request.url == BinaryUploadService::GetUploadUrl( /*is_consumer_scan_eligible=*/false)) { ASSERT_TRUE(GetUploadMetadata(network::GetUploadData(request), &last_enterprise_request_)); if (waiting_for_enterprise_) std::move(waiting_for_upload_closure_).Run(); } if (request.url == connector_url_) { ASSERT_TRUE(GetUploadMetadata(network::GetUploadData(request), &last_enterprise_request_)); if (waiting_for_enterprise_) std::move(waiting_for_upload_closure_).Run(); } if (request.url == PPAPIDownloadRequest::GetDownloadRequestUrl()) { if (waiting_for_metadata_closure_) std::move(waiting_for_metadata_closure_).Run(); } } std::unique_ptr<TestSafeBrowsingServiceFactory> test_sb_factory_; FakeBinaryFCMService* binary_fcm_service_; bool waiting_for_app_; enterprise_connectors::ContentAnalysisRequest last_app_request_; bool waiting_for_enterprise_; enterprise_connectors::ContentAnalysisRequest last_enterprise_request_; std::string connector_url_; base::OnceClosure waiting_for_upload_closure_; base::OnceClosure waiting_for_metadata_closure_; base::flat_set<download::DownloadItem*> download_items_; std::unique_ptr<policy::MockCloudPolicyClient> client_; std::unique_ptr<signin::IdentityTestEnvironment> identity_test_environment_; bool connectors_machine_scope_; base::test::ScopedFeatureList scoped_feature_list_; }; class DownloadDeepScanningBrowserTest : public DownloadDeepScanningBrowserTestBase, public testing::WithParamInterface<bool> { public: DownloadDeepScanningBrowserTest() : DownloadDeepScanningBrowserTestBase(GetParam()) {} }; INSTANTIATE_TEST_SUITE_P(, DownloadDeepScanningBrowserTest, testing::Bool()); IN_PROC_BROWSER_TEST_P(DownloadDeepScanningBrowserTest, SafeDownloadHasCorrectDangerType) { // This allows the blocking DM token reads happening on profile-Connector // triggers. base::ScopedAllowBlockingForTesting allow_blocking; // The file is SAFE according to the metadata check ClientDownloadResponse metadata_response; metadata_response.set_verdict(ClientDownloadResponse::SAFE); ExpectMetadataResponse(metadata_response); // The DLP scan runs synchronously, but doesn't find anything. enterprise_connectors::ContentAnalysisResponse sync_response; auto* dlp_result = sync_response.add_results(); dlp_result->set_tag("dlp"); dlp_result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::SUCCESS); ExpectContentAnalysisSynchronousResponse(/*is_advanced_protection=*/false, sync_response, {"dlp", "malware"}); GURL url = embedded_test_server()->GetURL( "/safe_browsing/download_protection/zipfile_two_archives.zip"); ui_test_utils::NavigateToURLWithDisposition( browser(), url, WindowOpenDisposition::CURRENT_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_LOAD_STOP); WaitForDeepScanRequest(/*is_advanced_protection=*/false); // The malware scan finishes asynchronously, and doesn't find anything. enterprise_connectors::ContentAnalysisResponse async_response; async_response.set_request_token(last_enterprise_request().request_token()); auto* malware_result = async_response.add_results(); malware_result->set_tag("malware"); malware_result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::SUCCESS); SendFcmMessage(async_response); WaitForDownloadToFinish(); // The file should be deep scanned, and safe. ASSERT_EQ(download_items().size(), 1u); download::DownloadItem* item = *download_items().begin(); EXPECT_EQ( item->GetDangerType(), download::DownloadDangerType::DOWNLOAD_DANGER_TYPE_DEEP_SCANNED_SAFE); EXPECT_EQ(item->GetState(), download::DownloadItem::COMPLETE); } IN_PROC_BROWSER_TEST_P(DownloadDeepScanningBrowserTest, FailedScanFailsOpen) { // This allows the blocking DM token reads happening on profile-Connector // triggers. base::ScopedAllowBlockingForTesting allow_blocking; // The file is SAFE according to the metadata check ClientDownloadResponse metadata_response; metadata_response.set_verdict(ClientDownloadResponse::SAFE); ExpectMetadataResponse(metadata_response); // The DLP scan runs synchronously, but doesn't find anything. enterprise_connectors::ContentAnalysisResponse sync_response; auto* dlp_result = sync_response.add_results(); dlp_result->set_tag("dlp"); dlp_result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::SUCCESS); ExpectContentAnalysisSynchronousResponse(/*is_advanced_protection=*/false, sync_response, {"dlp", "malware"}); GURL url = embedded_test_server()->GetURL( "/safe_browsing/download_protection/zipfile_two_archives.zip"); ui_test_utils::NavigateToURLWithDisposition( browser(), url, WindowOpenDisposition::CURRENT_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_LOAD_STOP); WaitForDeepScanRequest(/*is_advanced_protection=*/false); // The malware scan finishes asynchronously, and fails enterprise_connectors::ContentAnalysisResponse async_response; async_response.set_request_token(last_enterprise_request().request_token()); auto* malware_result = async_response.add_results(); malware_result->set_tag("malware"); malware_result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::FAILURE); SendFcmMessage(async_response); WaitForDownloadToFinish(); // The file should be safe, but not deep scanned. ASSERT_EQ(download_items().size(), 1u); download::DownloadItem* item = *download_items().begin(); EXPECT_EQ(item->GetDangerType(), download::DownloadDangerType::DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); EXPECT_EQ(item->GetState(), download::DownloadItem::COMPLETE); } IN_PROC_BROWSER_TEST_P(DownloadDeepScanningBrowserTest, PartialFailureShowsMalwareWarning) { // This allows the blocking DM token reads happening on profile-Connector // triggers. base::ScopedAllowBlockingForTesting allow_blocking; // The file is SAFE according to the metadata check ClientDownloadResponse metadata_response; metadata_response.set_verdict(ClientDownloadResponse::SAFE); ExpectMetadataResponse(metadata_response); // The DLP scan runs synchronously, and fails. enterprise_connectors::ContentAnalysisResponse sync_response; auto* dlp_result = sync_response.add_results(); dlp_result->set_tag("dlp"); dlp_result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::FAILURE); ExpectContentAnalysisSynchronousResponse(/*is_advanced_protection=*/false, sync_response, {"dlp", "malware"}); GURL url = embedded_test_server()->GetURL( "/safe_browsing/download_protection/zipfile_two_archives.zip"); ui_test_utils::NavigateToURLWithDisposition( browser(), url, WindowOpenDisposition::CURRENT_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_LOAD_STOP); WaitForDeepScanRequest(/*is_advanced_protection=*/false); // The malware scan finishes asynchronously, and finds malware. enterprise_connectors::ContentAnalysisResponse async_response; async_response.set_request_token(last_enterprise_request().request_token()); auto* malware_result = async_response.add_results(); malware_result->set_tag("malware"); malware_result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::SUCCESS); auto* malware_rule = malware_result->add_triggered_rules(); malware_rule->set_action(enterprise_connectors::TriggeredRule::BLOCK); malware_rule->set_rule_name("malware"); SendFcmMessage(async_response); WaitForDownloadToFinish(); // The file should be dangerous. ASSERT_EQ(download_items().size(), 1u); download::DownloadItem* item = *download_items().begin(); EXPECT_EQ( item->GetDangerType(), download::DownloadDangerType::DOWNLOAD_DANGER_TYPE_DANGEROUS_CONTENT); EXPECT_EQ(item->GetState(), download::DownloadItem::IN_PROGRESS); } IN_PROC_BROWSER_TEST_P(DownloadDeepScanningBrowserTest, PartialFailureShowsDlpWarning) { // This allows the blocking DM token reads happening on profile-Connector // triggers. base::ScopedAllowBlockingForTesting allow_blocking; // The file is SAFE according to the metadata check ClientDownloadResponse metadata_response; metadata_response.set_verdict(ClientDownloadResponse::SAFE); ExpectMetadataResponse(metadata_response); // The DLP scan runs synchronously, and finds a violation. enterprise_connectors::ContentAnalysisResponse sync_response; auto* dlp_result = sync_response.add_results(); dlp_result->set_tag("dlp"); dlp_result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::SUCCESS); auto* dlp_rule = dlp_result->add_triggered_rules(); dlp_rule->set_action(enterprise_connectors::TriggeredRule::BLOCK); ExpectContentAnalysisSynchronousResponse(/*is_advanced_protection=*/false, sync_response, {"dlp", "malware"}); GURL url = embedded_test_server()->GetURL( "/safe_browsing/download_protection/zipfile_two_archives.zip"); ui_test_utils::NavigateToURLWithDisposition( browser(), url, WindowOpenDisposition::CURRENT_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_LOAD_STOP); WaitForDeepScanRequest(/*is_advanced_protection=*/false); // The malware scan finishes asynchronously, and fails. enterprise_connectors::ContentAnalysisResponse async_response; async_response.set_request_token(last_enterprise_request().request_token()); auto* malware_result = async_response.add_results(); malware_result->set_tag("malware"); malware_result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::FAILURE); SendFcmMessage(async_response); WaitForDownloadToFinish(); // The file should be blocked for sensitive content. ASSERT_EQ(download_items().size(), 1u); download::DownloadItem* item = *download_items().begin(); EXPECT_EQ(item->GetDangerType(), download::DownloadDangerType:: DOWNLOAD_DANGER_TYPE_SENSITIVE_CONTENT_BLOCK); EXPECT_EQ(item->GetState(), download::DownloadItem::INTERRUPTED); } IN_PROC_BROWSER_TEST_P(DownloadDeepScanningBrowserTest, DangerousHostNotMalwareScanned) { // This allows the blocking DM token reads happening on profile-Connector // triggers. base::ScopedAllowBlockingForTesting allow_blocking; // The file is DANGEROUS_HOST according to the metadata check ClientDownloadResponse metadata_response; metadata_response.set_verdict(ClientDownloadResponse::DANGEROUS_HOST); ExpectMetadataResponse(metadata_response); // The DLP scan still runs, but finds nothing enterprise_connectors::ContentAnalysisResponse sync_response; auto* result = sync_response.add_results(); result->set_tag("dlp"); result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::SUCCESS); ExpectContentAnalysisSynchronousResponse(/*is_advanced_protection=*/false, sync_response, {"dlp", "malware"}); GURL url = embedded_test_server()->GetURL( "/safe_browsing/download_protection/signed.exe"); ui_test_utils::NavigateToURLWithDisposition( browser(), url, WindowOpenDisposition::CURRENT_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_LOAD_STOP); WaitForDownloadToFinish(); // The file should be blocked. ASSERT_EQ(download_items().size(), 1u); download::DownloadItem* item = *download_items().begin(); EXPECT_EQ(item->GetDangerType(), download::DownloadDangerType::DOWNLOAD_DANGER_TYPE_DANGEROUS_HOST); EXPECT_EQ(item->GetState(), download::DownloadItem::IN_PROGRESS); } IN_PROC_BROWSER_TEST_P(DownloadDeepScanningBrowserTest, PasswordProtectedTxtFilesAreBlocked) { // This allows the blocking DM token reads happening on profile-Connector // triggers. base::ScopedAllowBlockingForTesting allow_blocking; // The file is SAFE according to the metadata check ClientDownloadResponse metadata_response; metadata_response.set_verdict(ClientDownloadResponse::SAFE); ExpectMetadataResponse(metadata_response); GURL url = embedded_test_server()->GetURL( "/safe_browsing/download_protection/encrypted_txt.zip"); ui_test_utils::NavigateToURLWithDisposition( browser(), url, WindowOpenDisposition::CURRENT_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_LOAD_STOP); WaitForDownloadToFinish(); // The file should be blocked for containing a password protected file. ASSERT_EQ(download_items().size(), 1u); download::DownloadItem* item = *download_items().begin(); EXPECT_EQ(item->GetDangerType(), download::DownloadDangerType:: DOWNLOAD_DANGER_TYPE_BLOCKED_PASSWORD_PROTECTED); EXPECT_EQ(item->GetState(), download::DownloadItem::INTERRUPTED); } IN_PROC_BROWSER_TEST_P(DownloadDeepScanningBrowserTest, MultipleFCMResponses) { // This allows the blocking DM token reads happening on profile-Connector // triggers. base::ScopedAllowBlockingForTesting allow_blocking; SetUpReporting(); base::HistogramTester histograms; // The file is SAFE according to the metadata check ClientDownloadResponse metadata_response; metadata_response.set_verdict(ClientDownloadResponse::SAFE); ExpectMetadataResponse(metadata_response); // No scan runs synchronously. enterprise_connectors::ContentAnalysisResponse sync_response; ExpectContentAnalysisSynchronousResponse(/*is_advanced_protection=*/false, sync_response, {"dlp", "malware"}); GURL url = embedded_test_server()->GetURL( "/safe_browsing/download_protection/zipfile_two_archives.zip"); ui_test_utils::NavigateToURLWithDisposition( browser(), url, WindowOpenDisposition::CURRENT_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_LOAD_STOP); WaitForDeepScanRequest(/*is_advanced_protection=*/false); // The malware scan finishes asynchronously, and finds malware. enterprise_connectors::ContentAnalysisResponse async_response_1; async_response_1.set_request_token(last_enterprise_request().request_token()); auto* result = async_response_1.add_results(); result->set_tag("malware"); result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::SUCCESS); auto* malware_rule_1 = result->add_triggered_rules(); malware_rule_1->set_action(enterprise_connectors::TriggeredRule::BLOCK); malware_rule_1->set_rule_name("malware"); SendFcmMessage(async_response_1); // A single unsafe event should be recorded for this request. std::set<std::string> zip_types = {"application/zip", "application/x-zip-compressed"}; EventReportValidator validator(client()); validator.ExpectDangerousDeepScanningResult( /*url*/ url.spec(), /*filename*/ (*download_items().begin())->GetTargetFilePath().AsUTF8Unsafe(), // sha256sum chrome/test/data/safe_browsing/download_protection/\ // zipfile_two_archives.zip | tr '[:lower:]' '[:upper:]' /*sha*/ "339C8FFDAE735C4F1846D0E6FF07FBD85CAEE6D96045AAEF5B30F3220836643C", /*threat_type*/ "DANGEROUS", /*trigger*/ extensions::SafeBrowsingPrivateEventRouter::kTriggerFileDownload, /*mimetypes*/ &zip_types, /*size*/ 276, /*result*/ EventResultToString(EventResult::WARNED), /*username*/ kUserName); // The DLP scan finishes asynchronously, and finds nothing. The malware result // is attached to the response again. enterprise_connectors::ContentAnalysisResponse async_response_2; async_response_2.set_request_token(last_enterprise_request().request_token()); auto* malware_result = async_response_2.add_results(); malware_result->set_tag("malware"); malware_result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::SUCCESS); auto* malware_rule_2 = malware_result->add_triggered_rules(); malware_rule_2->set_action(enterprise_connectors::TriggeredRule::BLOCK); malware_rule_2->set_rule_name("malware"); auto* dlp_result = async_response_2.add_results(); dlp_result->set_tag("dlp"); dlp_result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::SUCCESS); SendFcmMessage(async_response_2); // The file should be blocked. ASSERT_EQ(download_items().size(), 1u); download::DownloadItem* item = *download_items().begin(); EXPECT_EQ( item->GetDangerType(), download::DownloadDangerType::DOWNLOAD_DANGER_TYPE_DANGEROUS_CONTENT); EXPECT_EQ(item->GetState(), download::DownloadItem::IN_PROGRESS); // UMAs for this request should only be recorded once. histograms.ExpectUniqueSample("SafeBrowsingBinaryUploadRequest.Result", BinaryUploadService::Result::SUCCESS, 1); histograms.ExpectUniqueSample("SafeBrowsingBinaryUploadRequest.DlpResult", true, 1); histograms.ExpectUniqueSample("SafeBrowsingBinaryUploadRequest.MalwareResult", true, 1); } IN_PROC_BROWSER_TEST_P(DownloadDeepScanningBrowserTest, DlpAndMalwareViolations) { // This allows the blocking DM token reads happening on profile-Connector // triggers. base::ScopedAllowBlockingForTesting allow_blocking; SetUpReporting(); base::HistogramTester histograms; // The file is DANGEROUS_HOST according to the metadata check ClientDownloadResponse metadata_response; metadata_response.set_verdict(ClientDownloadResponse::DANGEROUS_HOST); ExpectMetadataResponse(metadata_response); GURL url = embedded_test_server()->GetURL( "/safe_browsing/download_protection/zipfile_two_archives.zip"); ui_test_utils::NavigateToURLWithDisposition( browser(), url, WindowOpenDisposition::CURRENT_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_LOAD_STOP); // The DLP scan finishes synchronously and find a violation. enterprise_connectors::ContentAnalysisResponse sync_response; auto* result = sync_response.add_results(); result->set_tag("dlp"); result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::SUCCESS); auto* dlp_rule = result->add_triggered_rules(); dlp_rule->set_action(enterprise_connectors::TriggeredRule::WARN); dlp_rule->set_rule_name("dlp_rule_name"); ExpectContentAnalysisSynchronousResponse(/*is_advanced_protection=*/false, sync_response, {"dlp"}); WaitForMetadataCheck(); WaitForDeepScanRequest(/*is_advanced_protection=*/false); // Both the DLP and malware violations generate an event. std::set<std::string> zip_types = {"application/zip", "application/x-zip-compressed"}; EventReportValidator validator(client()); validator.ExpectSensitiveDataEventAndDangerousDeepScanningResult( /*url*/ url.spec(), /*filename*/ (*download_items().begin())->GetTargetFilePath().AsUTF8Unsafe(), // sha256sum chrome/test/data/safe_browsing/download_protection/\ // zipfile_two_archives.zip | tr '[:lower:]' '[:upper:]' /*sha*/ "339C8FFDAE735C4F1846D0E6FF07FBD85CAEE6D96045AAEF5B30F3220836643C", /*threat_type*/ "DANGEROUS_HOST", /*trigger*/ extensions::SafeBrowsingPrivateEventRouter::kTriggerFileDownload, /*dlp_verdict*/ *result, /*mimetypes*/ &zip_types, /*size*/ 276, /*result*/ EventResultToString(EventResult::WARNED), /*username*/ kUserName); WaitForDownloadToFinish(); // The file should be blocked. ASSERT_EQ(download_items().size(), 1u); download::DownloadItem* item = *download_items().begin(); EXPECT_EQ(item->GetDangerType(), download::DownloadDangerType::DOWNLOAD_DANGER_TYPE_DANGEROUS_HOST); EXPECT_EQ(item->GetState(), download::DownloadItem::IN_PROGRESS); // UMAs for this request should only be recorded once. The malware metric // should not be recorded since no deep malware scan occurred. histograms.ExpectUniqueSample("SafeBrowsingBinaryUploadRequest.Result", BinaryUploadService::Result::SUCCESS, 1); histograms.ExpectUniqueSample("SafeBrowsingBinaryUploadRequest.DlpResult", true, 1); histograms.ExpectUniqueSample("SafeBrowsingBinaryUploadRequest.MalwareResult", true, 0); } class DownloadRestrictionsDeepScanningBrowserTest : public DownloadDeepScanningBrowserTestBase, public testing::WithParamInterface<bool> { public: DownloadRestrictionsDeepScanningBrowserTest() : DownloadDeepScanningBrowserTestBase(GetParam()) {} ~DownloadRestrictionsDeepScanningBrowserTest() override = default; void SetUpOnMainThread() override { DownloadDeepScanningBrowserTestBase::SetUpOnMainThread(); browser()->profile()->GetPrefs()->SetInteger( prefs::kDownloadRestrictions, static_cast<int>(DownloadPrefs::DownloadRestriction::DANGEROUS_FILES)); SetAnalysisConnector(browser()->profile()->GetPrefs(), enterprise_connectors::FILE_DOWNLOADED, R"({ "service_provider": "google", "enable": [ { "url_list": ["*"], "tags": ["malware"] } ] })", connectors_machine_scope()); } }; INSTANTIATE_TEST_SUITE_P(, DownloadRestrictionsDeepScanningBrowserTest, testing::Bool()); IN_PROC_BROWSER_TEST_P(DownloadRestrictionsDeepScanningBrowserTest, ReportsDownloadsBlockedByDownloadRestrictions) { // This allows the blocking DM token reads happening on profile-Connector // triggers. base::ScopedAllowBlockingForTesting allow_blocking; SetUpReporting(); // The file is DANGEROUS according to the metadata check ClientDownloadResponse metadata_response; metadata_response.set_verdict(ClientDownloadResponse::DANGEROUS); ExpectMetadataResponse(metadata_response); GURL url = embedded_test_server()->GetURL( "/safe_browsing/download_protection/zipfile_two_archives.zip"); ui_test_utils::NavigateToURLWithDisposition( browser(), url, WindowOpenDisposition::CURRENT_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_LOAD_STOP); WaitForMetadataCheck(); EventReportValidator validator(client()); std::set<std::string> zip_types = {"application/zip", "application/x-zip-compressed"}; validator.ExpectDangerousDownloadEvent( /*url*/ url.spec(), (*download_items().begin())->GetTargetFilePath().AsUTF8Unsafe(), // sha256sum chrome/test/data/safe_browsing/download_protection/\ // zipfile_two_archives.zip | tr '[:lower:]' '[:upper:]' /*sha*/ "339C8FFDAE735C4F1846D0E6FF07FBD85CAEE6D96045AAEF5B30F3220836643C", /*threat_type*/ "DANGEROUS_FILE_TYPE", /*trigger*/ extensions::SafeBrowsingPrivateEventRouter::kTriggerFileDownload, /*mimetypes*/ &zip_types, /*size*/ 276, /*result*/ EventResultToString(EventResult::BLOCKED), /*username*/ kUserName); WaitForDownloadToFinish(); ASSERT_EQ(download_items().size(), 1u); download::DownloadItem* item = *download_items().begin(); EXPECT_EQ(item->GetDangerType(), download::DownloadDangerType::DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS); EXPECT_EQ(item->GetState(), download::DownloadItem::INTERRUPTED); } class AllowlistedUrlDeepScanningBrowserTest : public DownloadDeepScanningBrowserTestBase, public testing::WithParamInterface<bool> { public: AllowlistedUrlDeepScanningBrowserTest() : DownloadDeepScanningBrowserTestBase(GetParam()) {} ~AllowlistedUrlDeepScanningBrowserTest() override = default; void SetUpOnMainThread() override { DownloadDeepScanningBrowserTestBase::SetUpOnMainThread(); base::ListValue domain_list; domain_list.AppendString(embedded_test_server()->base_url().host_piece()); browser()->profile()->GetPrefs()->Set(prefs::kSafeBrowsingAllowlistDomains, domain_list); } }; INSTANTIATE_TEST_SUITE_P(, AllowlistedUrlDeepScanningBrowserTest, testing::Bool()); IN_PROC_BROWSER_TEST_P(AllowlistedUrlDeepScanningBrowserTest, AllowlistedUrlStillDoesDlp) { // This allows the blocking DM token reads happening on profile-Connector // triggers. base::ScopedAllowBlockingForTesting allow_blocking; // The file is SAFE according to the metadata check ClientDownloadResponse metadata_response; metadata_response.set_verdict(ClientDownloadResponse::SAFE); ExpectMetadataResponse(metadata_response); // The DLP scan runs synchronously, and finds a violation. enterprise_connectors::ContentAnalysisResponse sync_response; auto* result = sync_response.add_results(); result->set_tag("dlp"); result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::SUCCESS); auto* dlp_rule = result->add_triggered_rules(); dlp_rule->set_action(enterprise_connectors::TriggeredRule::BLOCK); ExpectContentAnalysisSynchronousResponse(/*is_advanced_protection=*/false, sync_response, {"dlp"}); GURL url = embedded_test_server()->GetURL( "/safe_browsing/download_protection/zipfile_two_archives.zip"); ui_test_utils::NavigateToURLWithDisposition( browser(), url, WindowOpenDisposition::CURRENT_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_LOAD_STOP); WaitForDeepScanRequest(/*is_advanced_protection=*/false); WaitForDownloadToFinish(); // The file should be blocked for sensitive content. ASSERT_EQ(download_items().size(), 1u); download::DownloadItem* item = *download_items().begin(); EXPECT_EQ(item->GetDangerType(), download::DownloadDangerType:: DOWNLOAD_DANGER_TYPE_SENSITIVE_CONTENT_BLOCK); EXPECT_EQ(item->GetState(), download::DownloadItem::INTERRUPTED); } enum class ScanningVerdict { MALWARE, UNWANTED, SAFE }; // This test validates that metadata check verdicts and deep scanning verdicts // override each other correctly and only report up to 1 event. class MetadataCheckAndDeepScanningBrowserTest : public DownloadDeepScanningBrowserTestBase, public testing::WithParamInterface< std::tuple<ClientDownloadResponse::Verdict, ScanningVerdict, bool>> { public: MetadataCheckAndDeepScanningBrowserTest() : DownloadDeepScanningBrowserTestBase(std::get<2>(GetParam())) {} ClientDownloadResponse::Verdict metadata_check_verdict() const { return std::get<0>(GetParam()); } ScanningVerdict scanning_verdict() const { return std::get<1>(GetParam()); } enterprise_connectors::ContentAnalysisResponse scanning_response() const { enterprise_connectors::ContentAnalysisResponse response; response.set_request_token(last_enterprise_request().request_token()); auto* result = response.add_results(); result->set_tag("malware"); result->set_status( enterprise_connectors::ContentAnalysisResponse::Result::SUCCESS); if (scanning_verdict() == ScanningVerdict::MALWARE) { auto* rule = result->add_triggered_rules(); rule->set_action(enterprise_connectors::TriggeredRule::BLOCK); rule->set_rule_name("malware"); } else if (scanning_verdict() == ScanningVerdict::UNWANTED) { auto* rule = result->add_triggered_rules(); rule->set_action(enterprise_connectors::TriggeredRule::WARN); rule->set_rule_name("uws"); } return response; } std::string metadata_check_threat_type() const { switch (metadata_check_verdict()) { case ClientDownloadResponse::UNKNOWN: case ClientDownloadResponse::SAFE: return ""; case ClientDownloadResponse::DANGEROUS: return "DANGEROUS"; case ClientDownloadResponse::UNCOMMON: return "UNCOMMON"; case ClientDownloadResponse::POTENTIALLY_UNWANTED: return "POTENTIALLY_UNWANTED"; case ClientDownloadResponse::DANGEROUS_HOST: return "DANGEROUS_HOST"; } } std::string expected_threat_type() const { // These results exempt the file from being deep scanned. if (metadata_check_verdict() == ClientDownloadResponse::DANGEROUS || metadata_check_verdict() == ClientDownloadResponse::DANGEROUS_HOST) { return metadata_check_threat_type(); } switch (scanning_verdict()) { case ScanningVerdict::MALWARE: return "DANGEROUS"; case ScanningVerdict::UNWANTED: return "POTENTIALLY_UNWANTED"; case ScanningVerdict::SAFE: return metadata_check_threat_type(); } } download::DownloadDangerType expected_danger_type() const { switch (metadata_check_verdict()) { case ClientDownloadResponse::DANGEROUS: return download::DownloadDangerType:: DOWNLOAD_DANGER_TYPE_DANGEROUS_CONTENT; case ClientDownloadResponse::DANGEROUS_HOST: return download::DownloadDangerType:: DOWNLOAD_DANGER_TYPE_DANGEROUS_HOST; case ClientDownloadResponse::UNCOMMON: if (scanning_verdict() != ScanningVerdict::MALWARE) { return download::DownloadDangerType:: DOWNLOAD_DANGER_TYPE_UNCOMMON_CONTENT; } break; case ClientDownloadResponse::POTENTIALLY_UNWANTED: if (scanning_verdict() != ScanningVerdict::MALWARE) { return download::DownloadDangerType:: DOWNLOAD_DANGER_TYPE_POTENTIALLY_UNWANTED; } break; case ClientDownloadResponse::UNKNOWN: case ClientDownloadResponse::SAFE: break; } switch (scanning_verdict()) { case ScanningVerdict::MALWARE: return download::DownloadDangerType:: DOWNLOAD_DANGER_TYPE_DANGEROUS_CONTENT; case ScanningVerdict::UNWANTED: return download::DownloadDangerType:: DOWNLOAD_DANGER_TYPE_POTENTIALLY_UNWANTED; case ScanningVerdict::SAFE: return download::DownloadDangerType:: DOWNLOAD_DANGER_TYPE_DEEP_SCANNED_SAFE; } } bool deep_scan_needed() const { return metadata_check_verdict() != ClientDownloadResponse::DANGEROUS && metadata_check_verdict() != ClientDownloadResponse::DANGEROUS_HOST; } }; INSTANTIATE_TEST_SUITE_P( , MetadataCheckAndDeepScanningBrowserTest, testing::Combine( testing::Values(ClientDownloadResponse::SAFE, ClientDownloadResponse::DANGEROUS, ClientDownloadResponse::UNCOMMON, ClientDownloadResponse::POTENTIALLY_UNWANTED, ClientDownloadResponse::DANGEROUS_HOST, ClientDownloadResponse::UNKNOWN), testing::Values(ScanningVerdict::MALWARE, ScanningVerdict::UNWANTED, ScanningVerdict::SAFE), testing::Bool())); IN_PROC_BROWSER_TEST_P(MetadataCheckAndDeepScanningBrowserTest, Test) { // This allows the blocking DM token reads happening on profile-Connector // triggers. base::ScopedAllowBlockingForTesting allow_blocking; SetUpReporting(); SetAnalysisConnector(browser()->profile()->GetPrefs(), enterprise_connectors::FILE_DOWNLOADED, R"({ "service_provider": "google", "enable": [ { "url_list": ["*"], "tags": ["malware"] } ] })", connectors_machine_scope()); base::HistogramTester histograms; // Set up the metadata response. ClientDownloadResponse metadata_response; metadata_response.set_verdict(metadata_check_verdict()); ExpectMetadataResponse(metadata_response); // Nothing is returned synchronously. if (deep_scan_needed()) { enterprise_connectors::ContentAnalysisResponse sync_response; sync_response.set_request_token(last_enterprise_request().request_token()); ExpectContentAnalysisSynchronousResponse(/*is_advanced_protection=*/false, sync_response, {"malware"}); } GURL url = embedded_test_server()->GetURL( "/safe_browsing/download_protection/zipfile_two_archives.zip"); ui_test_utils::NavigateToURLWithDisposition( browser(), url, WindowOpenDisposition::CURRENT_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_LOAD_STOP); WaitForMetadataCheck(); if (deep_scan_needed()) WaitForDeepScanRequest(/*is_advanced_protection=*/false); // Both the DLP and malware violations generate an event. std::set<std::string> zip_types = {"application/zip", "application/x-zip-compressed"}; EventReportValidator validator(client()); std::string threat_type = expected_threat_type(); if (threat_type.empty()) { validator.ExpectNoReport(); } else { validator.ExpectDangerousDeepScanningResult( /*url*/ url.spec(), /*filename*/ (*download_items().begin())->GetTargetFilePath().AsUTF8Unsafe(), // sha256sum chrome/test/data/safe_browsing/download_protection/\ // zipfile_two_archives.zip | tr '[:lower:]' '[:upper:]' /*sha*/ "339C8FFDAE735C4F1846D0E6FF07FBD85CAEE6D96045AAEF5B30F3220836643C", /*threat_type*/ threat_type, /*trigger*/ extensions::SafeBrowsingPrivateEventRouter::kTriggerFileDownload, /*mimetypes*/ &zip_types, /*size*/ 276, /*result*/ EventResultToString(EventResult::WARNED), /*username*/ kUserName); } // The deep scanning malware verdict is returned asynchronously. It is not // done if the previous verdict is DANGEROUS or DANGEROUS_HOST. if (deep_scan_needed()) { SendFcmMessage(scanning_response()); } else { base::RunLoop run_loop(base::RunLoop::Type::kNestableTasksAllowed); validator.SetDoneClosure(run_loop.QuitClosure()); run_loop.Run(); } // The file should be blocked. ASSERT_EQ(download_items().size(), 1u); download::DownloadItem* item = *download_items().begin(); EXPECT_EQ(item->GetDangerType(), expected_danger_type()); EXPECT_EQ(item->GetState(), download::DownloadItem::IN_PROGRESS); if (metadata_check_verdict() == ClientDownloadResponse::UNCOMMON) { // UNCOMMON is not a verdict that's considered malicious, so the download // will not allow Chrome to close before being accepted or cancelled first // (see DownloadManagerImpl::NonMaliciousInProgressCount). This makes the // test crash after it runs as some callbacks are left unresolved, so a // "cancel" is simulated. item->SimulateErrorForTesting( download::DOWNLOAD_INTERRUPT_REASON_USER_CANCELED); } if (threat_type.empty()) { // Safe verdicts on both SB and deep scanning tests need to wait for the // download to complete so they don't crash after being done. WaitForDownloadToFinish(); EXPECT_EQ(item->GetDangerType(), expected_danger_type()); EXPECT_EQ(item->GetState(), download::DownloadItem::COMPLETE); } // UMAs for this request should only be recorded once, and only if the malware // deep scan takes place. int samples = deep_scan_needed() ? 1 : 0; histograms.ExpectUniqueSample("SafeBrowsingBinaryUploadRequest.Result", BinaryUploadService::Result::SUCCESS, samples); histograms.ExpectUniqueSample("SafeBrowsingBinaryUploadRequest.MalwareResult", true, samples); } } // namespace safe_browsing
MewX/contendo-viewer-v1.6.3
net/a/a/e/c/b/k.java
/* */ package net.a.a.e.c.b; /* */ /* */ import net.a.a.c; /* */ import net.a.a.c.d; /* */ import net.a.a.e.c.a; /* */ import org.apache.batik.dom.AbstractDocument; /* */ import org.apache.commons.logging.Log; /* */ import org.apache.commons.logging.LogFactory; /* */ import org.w3c.dom.Node; /* */ import org.w3c.dom.mathml.MathMLStyleElement; /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ public final class k /* */ extends a /* */ implements MathMLStyleElement /* */ { /* */ public static final String r = "scriptminsize"; /* */ public static final String s = "scriptlevel"; /* */ public static final String t = "scriptsizemultiplier"; /* */ public static final String u = "displaystyle"; /* */ public static final String v = "mstyle"; /* 61 */ private static final Log w = LogFactory.getLog(k.class); /* */ /* */ /* */ /* */ /* */ /* */ private static final long x = 1L; /* */ /* */ /* */ /* */ /* */ /* */ public k(String paramString, AbstractDocument paramAbstractDocument) { /* 74 */ super(paramString, paramAbstractDocument); /* */ /* 76 */ a("displaystyle", ""); /* */ } /* */ /* */ /* */ /* */ protected Node newNode() { /* 82 */ return (Node)new k(this.nodeName, this.ownerDocument); /* */ } /* */ /* */ /* */ /* */ /* */ public String getScriptlevel() { /* 89 */ return a("scriptlevel"); /* */ } /* */ /* */ /* */ /* */ /* */ /* */ public void setScriptlevel(String paramString) { /* 97 */ setAttribute("scriptlevel", paramString); /* */ } /* */ /* */ /* */ /* */ /* */ public String getScriptminsize() { /* 104 */ return a("scriptminsize"); /* */ } /* */ /* */ /* */ /* */ /* */ /* */ public void setScriptminsize(String paramString) { /* 112 */ setAttribute("scriptminsize", paramString); /* */ } /* */ /* */ private class a /* */ implements c { /* */ private final c b; /* */ /* */ protected a(k this$0, c param1c) { /* 120 */ this.b = param1c; /* */ } /* */ /* */ /* */ public Object a(d param1d) { /* 125 */ Object object = this.a.b(this.b).a(param1d); /* 126 */ if (d.a.equals(param1d)) { /* 127 */ object = c(object); /* 128 */ } else if (d.e.equals(param1d)) { /* 129 */ object = b(object); /* 130 */ } else if (d.c.equals(param1d)) { /* 131 */ object = a(object); /* */ } /* 133 */ return object; /* */ } /* */ /* */ private Object a(Object param1Object) { /* 137 */ String str = this.a.getScriptminsize(); /* 138 */ if (str != null && str.length() > 0) { /* 139 */ return Float.valueOf(net.a.a.e.d.a.a.a(str, this.b, "pt")); /* */ } /* */ /* 142 */ return param1Object; /* */ } /* */ /* */ /* */ private Object b(Object param1Object) { /* 147 */ Object object = param1Object; /* 148 */ String str = this.a.getScriptlevel(); /* 149 */ if (str == null) { /* 150 */ str = ""; /* */ } /* 152 */ str = str.trim(); /* 153 */ if (str.length() > 0) { /* 154 */ char c1 = str.charAt(0); /* 155 */ boolean bool = false; /* 156 */ if (c1 == '+') { /* 157 */ bool = true; /* 158 */ str = str.substring(1); /* 159 */ } else if (c1 == '-') { /* 160 */ bool = true; /* */ } /* */ try { /* 163 */ int i = Integer.parseInt(str); /* 164 */ if (bool) { /* 165 */ object = Integer.valueOf(((Integer)object).intValue() + i); /* */ } else { /* 167 */ object = Integer.valueOf(i); /* */ } /* 169 */ } catch (NumberFormatException numberFormatException) { /* 170 */ k.i() /* 171 */ .warn("Error in scriptlevel attribute for mstyle: " + str); /* */ } /* */ } /* */ /* */ /* 176 */ return object; /* */ } /* */ /* */ private Object c(Object param1Object) { /* 180 */ Object object = param1Object; /* 181 */ String str = this.a.getDisplaystyle(); /* 182 */ if ("true".equalsIgnoreCase(str)) { /* 183 */ object = net.a.a.c.a.a; /* */ } /* 185 */ if ("false".equalsIgnoreCase(str)) { /* 186 */ object = net.a.a.c.a.b; /* */ } /* 188 */ return object; /* */ } /* */ } /* */ /* */ /* */ /* */ /* */ public c a(int paramInt, c paramc) { /* 196 */ return new a(this, paramc); /* */ } /* */ /* */ /* */ public String getBackground() { /* 201 */ return getMathbackground(); /* */ } /* */ /* */ /* */ public String a() { /* 206 */ return getMathcolor(); /* */ } /* */ /* */ /* */ public String getDisplaystyle() { /* 211 */ return a("displaystyle"); /* */ } /* */ /* */ /* */ public String getScriptsizemultiplier() { /* 216 */ return a("scriptsizemultiplier"); /* */ } /* */ /* */ /* */ public void setBackground(String paramString) { /* 221 */ setMathbackground(paramString); /* */ } /* */ /* */ /* */ public void b(String paramString) { /* 226 */ setMathcolor(paramString); /* */ } /* */ /* */ /* */ public void setDisplaystyle(String paramString) { /* 231 */ setAttribute("displaystyle", paramString); /* */ } /* */ /* */ /* */ public void setScriptsizemultiplier(String paramString) { /* 236 */ setAttribute("scriptsizemultiplier", paramString); /* */ } /* */ /* */ /* */ /* */ public String getMediummathspace() { /* 242 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public String getNegativemediummathspace() { /* 248 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public String getNegativethickmathspace() { /* 254 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public String getNegativethinmathspace() { /* 260 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public String getNegativeverythickmathspace() { /* 266 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public String getNegativeverythinmathspace() { /* 272 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public String getNegativeveryverythickmathspace() { /* 278 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public String getNegativeveryverythinmathspace() { /* 284 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public String getThickmathspace() { /* 290 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public String getThinmathspace() { /* 296 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public String getVerythickmathspace() { /* 302 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public String getVerythinmathspace() { /* 308 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public String getVeryverythickmathspace() { /* 314 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public String getVeryverythinmathspace() { /* 320 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public void setMediummathspace(String paramString) { /* 326 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public void setNegativemediummathspace(String paramString) { /* 332 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public void setNegativethickmathspace(String paramString) { /* 338 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public void setNegativethinmathspace(String paramString) { /* 344 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ /* */ public void setNegativeverythickmathspace(String paramString) { /* 351 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ /* */ public void setNegativeverythinmathspace(String paramString) { /* 358 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ /* */ public void setNegativeveryverythickmathspace(String paramString) { /* 365 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ /* */ public void setNegativeveryverythinmathspace(String paramString) { /* 372 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public void setThickmathspace(String paramString) { /* 378 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public void setThinmathspace(String paramString) { /* 384 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public void setVerythickmathspace(String paramString) { /* 390 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public void setVerythinmathspace(String paramString) { /* 396 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public void setVeryverythickmathspace(String paramString) { /* 402 */ throw new UnsupportedOperationException(); /* */ } /* */ /* */ /* */ /* */ public void setVeryverythinmathspace(String paramString) { /* 408 */ throw new UnsupportedOperationException(); /* */ } /* */ } /* Location: /mnt/r/ConTenDoViewer.jar!/net/a/a/e/c/b/k.class * Java compiler version: 6 (50.0) * JD-Core Version: 1.1.3 */
larsw/rya
sail/src/test/java/org/apache/rya/RdfCloudTripleStoreConnectionTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.rya; import static org.apache.rya.api.RdfCloudTripleStoreConstants.NAMESPACE; import java.io.InputStream; import java.util.List; import org.apache.accumulo.core.client.Connector; import org.apache.accumulo.core.client.Instance; import org.apache.accumulo.core.client.mock.MockInstance; import org.apache.rya.accumulo.AccumuloRdfConfiguration; import org.apache.rya.accumulo.AccumuloRyaDAO; import org.apache.rya.api.RdfCloudTripleStoreConfiguration; import org.apache.rya.api.RdfCloudTripleStoreConstants; import org.apache.rya.rdftriplestore.RdfCloudTripleStore; import org.apache.rya.rdftriplestore.RyaSailRepository; import org.apache.rya.rdftriplestore.inference.InferenceEngine; import org.apache.rya.rdftriplestore.namespace.NamespaceManager; import org.eclipse.rdf4j.model.IRI; import org.eclipse.rdf4j.model.Literal; import org.eclipse.rdf4j.model.Model; import org.eclipse.rdf4j.model.Statement; import org.eclipse.rdf4j.model.Value; import org.eclipse.rdf4j.model.impl.SimpleValueFactory; import org.eclipse.rdf4j.model.vocabulary.OWL; import org.eclipse.rdf4j.model.vocabulary.RDF; import org.eclipse.rdf4j.model.vocabulary.RDFS; import org.eclipse.rdf4j.query.BindingSet; import org.eclipse.rdf4j.query.QueryLanguage; import org.eclipse.rdf4j.query.QueryResultHandlerException; import org.eclipse.rdf4j.query.TupleQuery; import org.eclipse.rdf4j.query.TupleQueryResultHandler; import org.eclipse.rdf4j.query.TupleQueryResultHandlerException; import org.eclipse.rdf4j.query.Update; import org.eclipse.rdf4j.repository.Repository; import org.eclipse.rdf4j.repository.RepositoryConnection; import org.eclipse.rdf4j.repository.RepositoryResult; import org.eclipse.rdf4j.repository.sail.SailRepository; import org.eclipse.rdf4j.rio.RDFFormat; import org.eclipse.rdf4j.rio.Rio; import junit.framework.TestCase; /** * Class RdfCloudTripleStoreConnectionTest * Date: Mar 3, 2011 * Time: 12:03:29 PM */ public class RdfCloudTripleStoreConnectionTest extends TestCase { private Repository repository; private static final SimpleValueFactory VF = SimpleValueFactory.getInstance(); private InferenceEngine internalInferenceEngine; static String litdupsNS = "urn:test:litdups#"; IRI cpu = VF.createIRI(litdupsNS, "cpu"); protected RdfCloudTripleStore store; @Override public void setUp() throws Exception { super.setUp(); store = new MockRdfCloudStore(); // store.setDisplayQueryPlan(true); // store.setInferencing(false); NamespaceManager nm = new NamespaceManager(store.getRyaDAO(), store.getConf()); store.setNamespaceManager(nm); repository = new RyaSailRepository(store); repository.initialize(); } @Override public void tearDown() throws Exception { super.tearDown(); repository.shutDown(); } public void testAddStatement() throws Exception { RepositoryConnection conn = repository.getConnection(); IRI loadPerc = VF.createIRI(litdupsNS, "loadPerc"); IRI uri1 = VF.createIRI(litdupsNS, "uri1"); conn.add(cpu, loadPerc, uri1); conn.commit(); RepositoryResult<Statement> result = conn.getStatements(cpu, loadPerc, null, true); int count = 0; while (result.hasNext()) { count++; result.next(); } result.close(); assertEquals(1, count); //clean up conn.remove(cpu, loadPerc, uri1); // //test removal result = conn.getStatements(cpu, loadPerc, null, true); count = 0; while (result.hasNext()) { count++; result.next(); } result.close(); assertEquals(0, count); conn.close(); } // public void testAddAuth() throws Exception { // RepositoryConnection conn = repository.getConnection(); // IRI cpu = vf.createIRI(litdupsNS, "cpu"); // IRI loadPerc = vf.createIRI(litdupsNS, "loadPerc"); // IRI uri1 = vf.createIRI(litdupsNS, "uri1"); // IRI uri2 = vf.createIRI(litdupsNS, "uri2"); // IRI uri3 = vf.createIRI(litdupsNS, "uri3"); // IRI auth1 = vf.createIRI(RdfCloudTripleStoreConstants.AUTH_NAMESPACE, "1"); // IRI auth2 = vf.createIRI(RdfCloudTripleStoreConstants.AUTH_NAMESPACE, "2"); // IRI auth3 = vf.createIRI(RdfCloudTripleStoreConstants.AUTH_NAMESPACE, "3"); // conn.add(cpu, loadPerc, uri1, auth1, auth2, auth3); // conn.add(cpu, loadPerc, uri2, auth2, auth3); // conn.add(cpu, loadPerc, uri3, auth3); // conn.commit(); // // //query with no auth // RepositoryResult<Statement> result = conn.getStatements(cpu, loadPerc, null, true); // int count = 0; // while (result.hasNext()) { // count++; // result.next(); // } // assertEquals(0, count); // result.close(); // // String query = "select * where {" + // "<" + cpu.toString() + "> ?p ?o1." + // "}"; // TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); // tupleQuery.setBinding(RdfCloudTripleStoreConfiguration.CONF_QUERY_AUTH, vf.createLiteral("2")); // CountTupleHandler cth = new CountTupleHandler(); // tupleQuery.evaluate(cth); // assertEquals(2, cth.getCount()); // // conn.close(); // } public void testEvaluate() throws Exception { RepositoryConnection conn = repository.getConnection(); IRI loadPerc = VF.createIRI(litdupsNS, "loadPerc"); IRI uri1 = VF.createIRI(litdupsNS, "uri1"); conn.add(cpu, loadPerc, uri1); conn.commit(); String query = "select * where {" + "?x <" + loadPerc.stringValue() + "> ?o1." + "}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler cth = new CountTupleHandler(); tupleQuery.evaluate(cth); assertEquals(cth.getCount(), 1); conn.close(); } public void testEvaluateMultiLine() throws Exception { RepositoryConnection conn = repository.getConnection(); IRI loadPerc = VF.createIRI(litdupsNS, "loadPerc"); IRI uri1 = VF.createIRI(litdupsNS, "uri1"); IRI pred2 = VF.createIRI(litdupsNS, "pred2"); IRI uri2 = VF.createIRI(litdupsNS, "uri2"); conn.add(cpu, loadPerc, uri1); conn.add(cpu, pred2, uri2); conn.commit(); String query = "select * where {" + "?x <" + loadPerc.stringValue() + "> ?o1." + "?x <" + pred2.stringValue() + "> ?o2." + "}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleQuery.setBinding(RdfCloudTripleStoreConfiguration.CONF_QUERYPLAN_FLAG, RdfCloudTripleStoreConstants.VALUE_FACTORY.createLiteral(true)); CountTupleHandler cth = new CountTupleHandler(); tupleQuery.evaluate(cth); conn.close(); assertEquals(cth.getCount(), 1); } public void testPOObjRange() throws Exception { RepositoryConnection conn = repository.getConnection(); IRI loadPerc = VF.createIRI(litdupsNS, "loadPerc"); Literal six = VF.createLiteral("6"); Literal sev = VF.createLiteral("7"); Literal ten = VF.createLiteral("10"); conn.add(cpu, loadPerc, six); conn.add(cpu, loadPerc, sev); conn.add(cpu, loadPerc, ten); conn.commit(); String query = "PREFIX org.apache: <" + NAMESPACE + ">\n" + "select * where {" + "?x <" + loadPerc.stringValue() + "> ?o.\n" + "FILTER(org.apache:range(?o, '6', '8'))." + "}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler cth = new CountTupleHandler(); tupleQuery.evaluate(cth); conn.close(); assertEquals(2, cth.getCount()); } public void testPOPredRange() throws Exception { RepositoryConnection conn = repository.getConnection(); IRI loadPerc = VF.createIRI(litdupsNS, "loadPerc1"); IRI loadPerc2 = VF.createIRI(litdupsNS, "loadPerc2"); IRI loadPerc3 = VF.createIRI(litdupsNS, "loadPerc3"); IRI loadPerc4 = VF.createIRI(litdupsNS, "loadPerc4"); Literal six = VF.createLiteral("6"); Literal sev = VF.createLiteral("7"); Literal ten = VF.createLiteral("10"); conn.add(cpu, loadPerc, six); conn.add(cpu, loadPerc2, sev); conn.add(cpu, loadPerc4, ten); conn.commit(); String query = "PREFIX org.apache: <" + NAMESPACE + ">\n" + "select * where {" + "?x ?p ?o.\n" + "FILTER(org.apache:range(?p, <" + loadPerc.stringValue() + ">, <" + loadPerc3.stringValue() + ">))." + "}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler cth = new CountTupleHandler(); tupleQuery.evaluate(cth); conn.close(); assertEquals(cth.getCount(), 2); } public void testSPOPredRange() throws Exception { RepositoryConnection conn = repository.getConnection(); IRI loadPerc = VF.createIRI(litdupsNS, "loadPerc1"); IRI loadPerc2 = VF.createIRI(litdupsNS, "loadPerc2"); IRI loadPerc3 = VF.createIRI(litdupsNS, "loadPerc3"); IRI loadPerc4 = VF.createIRI(litdupsNS, "loadPerc4"); Literal six = VF.createLiteral("6"); Literal sev = VF.createLiteral("7"); Literal ten = VF.createLiteral("10"); conn.add(cpu, loadPerc, six); conn.add(cpu, loadPerc2, sev); conn.add(cpu, loadPerc4, ten); conn.commit(); String query = "PREFIX org.apache: <" + NAMESPACE + ">\n" + "select * where {" + "<" + cpu.stringValue() + "> ?p ?o.\n" + "FILTER(org.apache:range(?p, <" + loadPerc.stringValue() + ">, <" + loadPerc3.stringValue() + ">))." + "}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler cth = new CountTupleHandler(); tupleQuery.evaluate(cth); conn.close(); assertEquals(2, cth.getCount()); } public void testSPOSubjRange() throws Exception { RepositoryConnection conn = repository.getConnection(); IRI cpu2 = VF.createIRI(litdupsNS, "cpu2"); IRI cpu3 = VF.createIRI(litdupsNS, "cpu3"); IRI loadPerc = VF.createIRI(litdupsNS, "loadPerc"); Literal six = VF.createLiteral("6"); Literal sev = VF.createLiteral("7"); Literal ten = VF.createLiteral("10"); conn.add(cpu, loadPerc, six); conn.add(cpu2, loadPerc, sev); conn.add(cpu3, loadPerc, ten); conn.commit(); String query = "PREFIX org.apache: <" + NAMESPACE + ">\n" + "select * where {" + "?s ?p ?o.\n" + "FILTER(org.apache:range(?s, <" + cpu.stringValue() + ">, <" + cpu2.stringValue() + ">))." + "}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler cth = new CountTupleHandler(); tupleQuery.evaluate(cth); conn.close(); assertEquals(cth.getCount(), 2); } public void testSPOObjRange() throws Exception { RepositoryConnection conn = repository.getConnection(); IRI loadPerc = VF.createIRI(litdupsNS, "loadPerc"); Literal six = VF.createLiteral("6"); Literal sev = VF.createLiteral("7"); Literal ten = VF.createLiteral("10"); conn.add(cpu, loadPerc, six); conn.add(cpu, loadPerc, sev); conn.add(cpu, loadPerc, ten); conn.commit(); String query = "PREFIX org.apache: <" + NAMESPACE + ">\n" + "select * where {" + "<" + cpu.stringValue() + "> <" + loadPerc.stringValue() + "> ?o.\n" + "FILTER(org.apache:range(?o, '6', '8'))." + "}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler cth = new CountTupleHandler(); tupleQuery.evaluate(cth); conn.close(); assertEquals(cth.getCount(), 2); } public void testOSPObjRange() throws Exception { RepositoryConnection conn = repository.getConnection(); IRI loadPerc = VF.createIRI(litdupsNS, "loadPerc"); Literal six = VF.createLiteral("6"); Literal sev = VF.createLiteral("7"); Literal ten = VF.createLiteral("10"); conn.add(cpu, loadPerc, six); conn.add(cpu, loadPerc, sev); conn.add(cpu, loadPerc, ten); conn.commit(); String query = "PREFIX org.apache: <" + NAMESPACE + ">\n" + "select * where {" + "?s ?p ?o.\n" + "FILTER(org.apache:range(?o, '6', '8'))." + "}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler cth = new CountTupleHandler(); tupleQuery.evaluate(cth); conn.close(); assertEquals(cth.getCount(), 2); } public void testRegexFilter() throws Exception { RepositoryConnection conn = repository.getConnection(); IRI loadPerc = VF.createIRI(litdupsNS, "loadPerc"); IRI testClass = VF.createIRI(litdupsNS, "test"); Literal six = VF.createLiteral("6"); Literal sev = VF.createLiteral("7"); Literal ten = VF.createLiteral("10"); conn.add(cpu, loadPerc, six); conn.add(cpu, loadPerc, sev); conn.add(cpu, loadPerc, ten); conn.add(cpu, RDF.TYPE, testClass); conn.commit(); String query = "PREFIX org.apache: <" + NAMESPACE + ">\n" + "select * where {" + String.format("<%s> ?p ?o.\n", cpu.stringValue()) + "FILTER(regex(?o, '^1'))." + "}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler cth = new CountTupleHandler(); tupleQuery.evaluate(cth); conn.close(); assertEquals(cth.getCount(), 1); } public void testMMRTS152() throws Exception { RepositoryConnection conn = repository.getConnection(); IRI loadPerc = VF.createIRI(litdupsNS, "testPred"); IRI uri1 = VF.createIRI(litdupsNS, "uri1"); conn.add(cpu, loadPerc, uri1); conn.commit(); RepositoryResult<Statement> result = conn.getStatements(cpu, loadPerc, null, false); // RdfCloudTripleStoreCollectionStatementsIterator iterator = new RdfCloudTripleStoreCollectionStatementsIterator( // cpu, loadPerc, null, store.connector, // vf, new Configuration(), null); while (result.hasNext()) { assertTrue(result.hasNext()); assertNotNull(result.next()); } conn.close(); } public void testDuplicateLiterals() throws Exception { RepositoryConnection conn = repository.getConnection(); IRI loadPerc = VF.createIRI(litdupsNS, "loadPerc"); Literal lit1 = VF.createLiteral(0.0); Literal lit2 = VF.createLiteral(0.0); Literal lit3 = VF.createLiteral(0.0); conn.add(cpu, loadPerc, lit1); conn.add(cpu, loadPerc, lit2); conn.add(cpu, loadPerc, lit3); conn.commit(); RepositoryResult<Statement> result = conn.getStatements(cpu, loadPerc, null, true); int count = 0; while (result.hasNext()) { count++; result.next(); } result.close(); assertEquals(1, count); //clean up conn.remove(cpu, loadPerc, lit1); conn.close(); } public void testNotDuplicateUris() throws Exception { RepositoryConnection conn = repository.getConnection(); IRI loadPerc = VF.createIRI(litdupsNS, "loadPerc"); IRI uri1 = VF.createIRI(litdupsNS, "uri1"); IRI uri2 = VF.createIRI(litdupsNS, "uri1"); IRI uri3 = VF.createIRI(litdupsNS, "uri1"); conn.add(cpu, loadPerc, uri1); conn.add(cpu, loadPerc, uri2); conn.add(cpu, loadPerc, uri3); conn.commit(); RepositoryResult<Statement> result = conn.getStatements(cpu, loadPerc, null, true); int count = 0; while (result.hasNext()) { count++; result.next(); } result.close(); assertEquals(1, count); //clean up conn.remove(cpu, loadPerc, uri1); conn.close(); } public void testNamespaceUsage() throws Exception { RepositoryConnection conn = repository.getConnection(); conn.setNamespace("lit", litdupsNS); IRI loadPerc = VF.createIRI(litdupsNS, "loadPerc"); final IRI uri1 = VF.createIRI(litdupsNS, "uri1"); conn.add(cpu, loadPerc, uri1); conn.commit(); String query = "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {lit:cpu lit:loadPerc ?o.}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleQuery.evaluate(new TupleQueryResultHandler() { @Override public void startQueryResult(List<String> strings) throws TupleQueryResultHandlerException { } @Override public void endQueryResult() throws TupleQueryResultHandlerException { } @Override public void handleSolution(BindingSet bindingSet) throws TupleQueryResultHandlerException { assertTrue(uri1.toString().equals(bindingSet.getBinding("o").getValue().stringValue())); } @Override public void handleBoolean(boolean paramBoolean) throws QueryResultHandlerException { } @Override public void handleLinks(List<String> paramList) throws QueryResultHandlerException { } }); conn.close(); } public void testSubPropertyOf() throws Exception { if(internalInferenceEngine == null) { return; //infer not supported; } RepositoryConnection conn = repository.getConnection(); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "undergradDegreeFrom"), RDFS.SUBPROPERTYOF, VF.createIRI(litdupsNS, "degreeFrom"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "gradDegreeFrom"), RDFS.SUBPROPERTYOF, VF.createIRI(litdupsNS, "degreeFrom"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "degreeFrom"), RDFS.SUBPROPERTYOF, VF.createIRI(litdupsNS, "memberOf"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "memberOf"), RDFS.SUBPROPERTYOF, VF.createIRI(litdupsNS, "associatedWith"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "UgradA"), VF.createIRI(litdupsNS, "undergradDegreeFrom"), VF.createIRI(litdupsNS, "Harvard"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "GradB"), VF.createIRI(litdupsNS, "gradDegreeFrom"), VF.createIRI(litdupsNS, "Yale"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "ProfessorC"), VF.createIRI(litdupsNS, "memberOf"), VF.createIRI(litdupsNS, "Harvard"))); conn.commit(); conn.close(); internalInferenceEngine.refreshGraph(); conn = repository.getConnection(); String query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:degreeFrom lit:Harvard.}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(1, tupleHandler.getCount()); query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:memberOf lit:Harvard.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(2, tupleHandler.getCount()); query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:associatedWith ?o.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(3, tupleHandler.getCount()); query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:gradDegreeFrom lit:Yale.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(1, tupleHandler.getCount()); conn.close(); } public void testEquivPropOf() throws Exception { if(internalInferenceEngine == null) { return; //infer not supported; } RepositoryConnection conn = repository.getConnection(); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "undergradDegreeFrom"), OWL.EQUIVALENTPROPERTY, VF.createIRI(litdupsNS, "ugradDegreeFrom"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "UgradA"), VF.createIRI(litdupsNS, "undergradDegreeFrom"), VF.createIRI(litdupsNS, "Harvard"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "GradB"), VF.createIRI(litdupsNS, "ugradDegreeFrom"), VF.createIRI(litdupsNS, "Harvard"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "GradC"), VF.createIRI(litdupsNS, "ugraduateDegreeFrom"), VF.createIRI(litdupsNS, "Harvard"))); conn.commit(); conn.close(); internalInferenceEngine.refreshGraph(); conn = repository.getConnection(); String query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:ugradDegreeFrom lit:Harvard.}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(2, tupleHandler.getCount()); conn.close(); } public void testSymmPropOf() throws Exception { if(internalInferenceEngine == null) { return; //infer not supported; } RepositoryConnection conn = repository.getConnection(); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "friendOf"), RDF.TYPE, OWL.SYMMETRICPROPERTY)); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "Bob"), VF.createIRI(litdupsNS, "friendOf"), VF.createIRI(litdupsNS, "Jeff"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "James"), VF.createIRI(litdupsNS, "friendOf"), VF.createIRI(litdupsNS, "Jeff"))); conn.commit(); conn.close(); internalInferenceEngine.refreshGraph(); conn = repository.getConnection(); String query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:friendOf lit:Bob.}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(1, tupleHandler.getCount()); query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:friendOf lit:James.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(1, tupleHandler.getCount()); query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:friendOf lit:Jeff.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(2, tupleHandler.getCount()); conn.close(); } public void testTransitiveProp() throws Exception { if(internalInferenceEngine == null) { return; //infer not supported; } RepositoryConnection conn = repository.getConnection(); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "subRegionOf"), RDF.TYPE, OWL.TRANSITIVEPROPERTY)); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "Queens"), VF.createIRI(litdupsNS, "subRegionOf"), VF.createIRI(litdupsNS, "NYC"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "NYC"), VF.createIRI(litdupsNS, "subRegionOf"), VF.createIRI(litdupsNS, "NY"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "NY"), VF.createIRI(litdupsNS, "subRegionOf"), VF.createIRI(litdupsNS, "US"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "US"), VF.createIRI(litdupsNS, "subRegionOf"), VF.createIRI(litdupsNS, "NorthAmerica"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "NorthAmerica"), VF.createIRI(litdupsNS, "subRegionOf"), VF.createIRI(litdupsNS, "World"))); conn.commit(); conn.close(); internalInferenceEngine.refreshGraph(); conn = repository.getConnection(); String query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:subRegionOf lit:NorthAmerica.}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(4, tupleHandler.getCount()); query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:subRegionOf lit:NY.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(2, tupleHandler.getCount()); query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {lit:Queens lit:subRegionOf ?s.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(5, tupleHandler.getCount()); query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {lit:NY lit:subRegionOf ?s.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(3, tupleHandler.getCount()); conn.close(); } public void testInverseOf() throws Exception { if(internalInferenceEngine == null) { return; //infer not supported; } RepositoryConnection conn = repository.getConnection(); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "degreeFrom"), OWL.INVERSEOF, VF.createIRI(litdupsNS, "hasAlumnus"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "UgradA"), VF.createIRI(litdupsNS, "degreeFrom"), VF.createIRI(litdupsNS, "Harvard"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "GradB"), VF.createIRI(litdupsNS, "degreeFrom"), VF.createIRI(litdupsNS, "Harvard"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "Harvard"), VF.createIRI(litdupsNS, "hasAlumnus"), VF.createIRI(litdupsNS, "AlumC"))); conn.commit(); conn.close(); internalInferenceEngine.refreshGraph(); conn = repository.getConnection(); String query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {lit:Harvard lit:hasAlumnus ?s.}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(3, tupleHandler.getCount()); query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s lit:degreeFrom lit:Harvard.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(3, tupleHandler.getCount()); conn.close(); } public void testSubClassOf() throws Exception { if(internalInferenceEngine == null) { return; //infer not supported; } RepositoryConnection conn = repository.getConnection(); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "UndergraduateStudent"), RDFS.SUBCLASSOF, VF.createIRI(litdupsNS, "Student"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "Student"), RDFS.SUBCLASSOF, VF.createIRI(litdupsNS, "Person"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "UgradA"), RDF.TYPE, VF.createIRI(litdupsNS, "UndergraduateStudent"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "StudentB"), RDF.TYPE, VF.createIRI(litdupsNS, "Student"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "PersonC"), RDF.TYPE, VF.createIRI(litdupsNS, "Person"))); conn.commit(); conn.close(); internalInferenceEngine.refreshGraph(); conn = repository.getConnection(); //simple api first RepositoryResult<Statement> person = conn.getStatements(null, RDF.TYPE, VF.createIRI(litdupsNS, "Person"), true); int count = 0; while (person.hasNext()) { count++; person.next(); } person.close(); assertEquals(3, count); String query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s rdf:type lit:Person.}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(3, tupleHandler.getCount()); query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s rdf:type lit:Student.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(2, tupleHandler.getCount()); query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select * where {?s rdf:type lit:UndergraduateStudent.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(1, tupleHandler.getCount()); conn.close(); } public void testSameAs() throws Exception { if(internalInferenceEngine == null) { return; //infer not supported; } RepositoryConnection conn = repository.getConnection(); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "StudentA1"), OWL.SAMEAS, VF.createIRI(litdupsNS, "StudentA2"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "StudentA2"), OWL.SAMEAS, VF.createIRI(litdupsNS, "StudentA3"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "StudentB1"), OWL.SAMEAS, VF.createIRI(litdupsNS, "StudentB2"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "StudentB2"), OWL.SAMEAS, VF.createIRI(litdupsNS, "StudentB3"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "StudentA1"), VF.createIRI(litdupsNS, "pred1"), VF.createIRI(litdupsNS, "StudentB3"))); conn.add(VF.createStatement(VF.createIRI(litdupsNS, "StudentB1"), VF.createIRI(litdupsNS, "pred2"), VF.createIRI(litdupsNS, "StudentA3"))); conn.commit(); conn.close(); internalInferenceEngine.refreshGraph(); conn = repository.getConnection(); // query where finds sameAs for obj, pred specified String query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select ?s where {?s lit:pred1 lit:StudentB2.}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(1, tupleHandler.getCount()); // query where finds sameAs for obj only specified query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select ?s where {?s ?p lit:StudentB2.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(3, tupleHandler.getCount()); // including sameAs assertions // query where finds sameAs for subj, pred specified query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select ?s where {lit:StudentB2 lit:pred2 ?s.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(1, tupleHandler.getCount()); // including sameAs assertions // query where finds sameAs for subj only specified query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select ?s where {lit:StudentB2 ?p ?s.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(3, tupleHandler.getCount()); // including sameAs assertions // query where finds sameAs for subj, obj specified query = "PREFIX rdfs: <" + RDFS.NAMESPACE + ">\n" + "PREFIX rdf: <" + RDF.NAMESPACE + ">\n" + "PREFIX lit: <" + litdupsNS + ">\n" + "select ?s where {lit:StudentB2 ?s lit:StudentA2.}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(1, tupleHandler.getCount()); conn.close(); } public void testNamedGraphLoad() throws Exception { InputStream stream = Thread.currentThread().getContextClassLoader().getResourceAsStream("namedgraphs.trig"); assertNotNull(stream); RepositoryConnection conn = repository.getConnection(); conn.add(stream, "", RDFFormat.TRIG); conn.commit(); String query = "PREFIX ex: <http://www.example.org/exampleDocument#>\n" + "PREFIX voc: <http://www.example.org/vocabulary#>\n" + "PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n" + "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n" + "\n" + "SELECT * \n" + // "FROM NAMED <http://www.example.org/exampleDocument#G1>\n" + "WHERE\n" + "{\n" + " GRAPH ex:G1\n" + " {\n" + " ?m voc:name ?name ;\n" + " voc:homepage ?hp .\n" + " } .\n" + " GRAPH ex:G2\n" + " {\n" + " ?m voc:hasSkill ?skill .\n" + " } .\n" + "}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); // tupleQuery.evaluate(new PrintTupleHandler()); assertEquals(1, tupleHandler.getCount()); query = "PREFIX ex: <http://www.example.org/exampleDocument#>\n" + "PREFIX voc: <http://www.example.org/vocabulary#>\n" + "PREFIX swp: <http://www.w3.org/2004/03/trix/swp-1/>\n" + "PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n" + "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n" + "\n" + "SELECT * \n" + "WHERE\n" + "{\n" + " GRAPH ex:G3\n" + " {\n" + " ?g swp:assertedBy ?w .\n" + " ?w swp:authority ex:Tom .\n" + " } .\n" + " GRAPH ?g\n" + " {\n" + " ?m voc:name ?name .\n" + " } .\n" + "}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(1, tupleHandler.getCount()); query = "PREFIX ex: <http://www.example.org/exampleDocument#>\n" + "PREFIX voc: <http://www.example.org/vocabulary#>\n" + "PREFIX swp: <http://www.w3.org/2004/03/trix/swp-1/>\n" + "PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n" + "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n" + "\n" + "SELECT * \n" + "WHERE\n" + "{\n" + " GRAPH ?g\n" + " {\n" + " ?m voc:name ?name .\n" + " } .\n" + "}"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); // tupleQuery.setBinding(BINDING_DISP_QUERYPLAN, VALUE_FACTORY.createLiteral(true)); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(2, tupleHandler.getCount()); conn.close(); } public void testNamedGraphLoad2() throws Exception { InputStream stream = Thread.currentThread().getContextClassLoader().getResourceAsStream("namedgraphs.trig"); assertNotNull(stream); RepositoryConnection conn = repository.getConnection(); conn.add(stream, "", RDFFormat.TRIG); conn.commit(); RepositoryResult<Statement> statements = conn.getStatements(null, VF.createIRI("http://www.example.org/vocabulary#name"), null, true, VF.createIRI("http://www.example.org/exampleDocument#G1")); int count = 0; while (statements.hasNext()) { statements.next(); count++; } statements.close(); assertEquals(1, count); conn.close(); } // public void testNamedGraphLoadWInlineAuth() throws Exception { // InputStream stream = Thread.currentThread().getContextClassLoader().getResourceAsStream("namedgraphs.trig"); // assertNotNull(stream); // IRI auth1 = vf.createIRI(RdfCloudTripleStoreConstants.AUTH_NAMESPACE, "1"); // RepositoryConnection conn = repository.getConnection(); // conn.add(stream, "", RDFFormat.TRIG, auth1); // conn.commit(); // // String query = "PREFIX ex: <http://www.example.org/exampleDocument#>\n" + // "PREFIX voc: <http://www.example.org/vocabulary#>\n" + // "PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n" + // "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n" + // "\n" + // "SELECT * \n" + // "WHERE\n" + // "{\n" + // " GRAPH ex:G1\n" + // " {\n" + // " ?m voc:name ?name ;\n" + // " voc:homepage ?hp .\n" + // " } .\n" + // " GRAPH ex:G2\n" + // " {\n" + // " ?m voc:hasSkill ?skill .\n" + // " } .\n" + // "}"; // TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); // tupleQuery.setBinding(RdfCloudTripleStoreConfiguration.CONF_QUERY_AUTH, vf.createLiteral("1")); // CountTupleHandler tupleHandler = new CountTupleHandler(); // tupleQuery.evaluate(tupleHandler); // assertEquals(1, tupleHandler.getCount()); // // query = "PREFIX ex: <http://www.example.org/exampleDocument#>\n" + // "PREFIX voc: <http://www.example.org/vocabulary#>\n" + // "PREFIX swp: <http://www.w3.org/2004/03/trix/swp-1/>\n" + // "PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n" + // "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n" + // "\n" + // "SELECT * \n" + // "WHERE\n" + // "{\n" + // " GRAPH ex:G3\n" + // " {\n" + // " ?g swp:assertedBy ?w .\n" + // " ?w swp:authority ex:Tom .\n" + // " } .\n" + // " GRAPH ?g\n" + // " {\n" + // " ?m voc:name ?name .\n" + // " } .\n" + // "}"; // // tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); // tupleHandler = new CountTupleHandler(); // tupleQuery.evaluate(tupleHandler); // assertEquals(0, tupleHandler.getCount()); // // conn.close(); // } private static String escape(Value r) { if (r instanceof IRI) { return "<" + r.toString() +">"; } return r.toString(); } private static String getSparqlUpdate() throws Exception { InputStream stream = Thread.currentThread().getContextClassLoader().getResourceAsStream("namedgraphs.trig"); assertNotNull(stream); Model m = Rio.parse(stream, "", RDFFormat.TRIG); StringBuffer updateStr = new StringBuffer(); updateStr.append("INSERT DATA {\n"); for (Statement s : m){ if (s.getContext() != null) { updateStr.append("graph "); updateStr.append(escape(s.getContext())); updateStr.append("{ "); } updateStr.append(escape(s.getSubject())); updateStr.append(" "); updateStr.append(escape(s.getPredicate())); updateStr.append(" "); updateStr.append(escape(s.getObject())); if (s.getContext() != null){ updateStr.append("}"); } updateStr.append(" . \n"); } updateStr.append("}"); return updateStr.toString(); } // Set the persistence visibilites on the config public void testUpdateWAuthOnConfig() throws Exception { String sparqlUpdate = getSparqlUpdate(); RdfCloudTripleStore tstore = new MockRdfCloudStore(); NamespaceManager nm = new NamespaceManager(tstore.getRyaDAO(), tstore.getConf()); tstore.setNamespaceManager(nm); SailRepository repo = new SailRepository(tstore); tstore.getRyaDAO().getConf().setCv("1|2"); repo.initialize(); RepositoryConnection conn = repo.getConnection(); Update u = conn.prepareUpdate(QueryLanguage.SPARQL, sparqlUpdate); u.execute(); String query = "PREFIX ex: <http://www.example.org/exampleDocument#>\n" + "PREFIX voc: <http://www.example.org/vocabulary#>\n" + "PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n" + "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n" + "\n" + "SELECT * \n" + // "FROM NAMED <http://www.example.org/exampleDocument#G1>\n" + "WHERE\n" + "{\n" + " GRAPH ex:G1\n" + " {\n" + " ?m voc:name ?name ;\n" + " voc:homepage ?hp .\n" + " } .\n" + " GRAPH ex:G2\n" + " {\n" + " ?m voc:hasSkill ?skill .\n" + " } .\n" + "}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleQuery.setBinding(RdfCloudTripleStoreConfiguration.CONF_QUERY_AUTH, VF.createLiteral("2")); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(1, tupleHandler.getCount()); tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); //no auth tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(0, tupleHandler.getCount()); conn.close(); repo.shutDown(); } public void testNamedGraphLoadWAuth() throws Exception { InputStream stream = Thread.currentThread().getContextClassLoader().getResourceAsStream("namedgraphs.trig"); assertNotNull(stream); RdfCloudTripleStore tstore = new MockRdfCloudStore(); NamespaceManager nm = new NamespaceManager(tstore.getRyaDAO(), tstore.getConf()); tstore.setNamespaceManager(nm); SailRepository repo = new SailRepository(tstore); tstore.getRyaDAO().getConf().setCv("1|2"); repo.initialize(); RepositoryConnection conn = repo.getConnection(); conn.add(stream, "", RDFFormat.TRIG); conn.commit(); String query = "PREFIX ex: <http://www.example.org/exampleDocument#>\n" + "PREFIX voc: <http://www.example.org/vocabulary#>\n" + "PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n" + "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n" + "\n" + "SELECT * \n" + // "FROM NAMED <http://www.example.org/exampleDocument#G1>\n" + "WHERE\n" + "{\n" + " GRAPH ex:G1\n" + " {\n" + " ?m voc:name ?name ;\n" + " voc:homepage ?hp .\n" + " } .\n" + " GRAPH ex:G2\n" + " {\n" + " ?m voc:hasSkill ?skill .\n" + " } .\n" + "}"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleQuery.setBinding(RdfCloudTripleStoreConfiguration.CONF_QUERY_AUTH, VF.createLiteral("2")); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(1, tupleHandler.getCount()); tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); //no auth tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(0, tupleHandler.getCount()); conn.close(); repo.shutDown(); } public void testInsertDeleteData() throws Exception { RepositoryConnection conn = repository.getConnection(); String insert = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "INSERT DATA\n" + "{ <http://example/book3> dc:title \"A new book\" ;\n" + " dc:creator \"A.N.Other\" .\n" + "}"; Update update = conn.prepareUpdate(QueryLanguage.SPARQL, insert); update.execute(); String query = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "select * where { <http://example/book3> ?p ?o. }"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(2, tupleHandler.getCount()); String delete = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "\n" + "DELETE DATA\n" + "{ <http://example/book3> dc:title \"A new book\" ;\n" + " dc:creator \"A.N.Other\" .\n" + "}"; update = conn.prepareUpdate(QueryLanguage.SPARQL, delete); update.execute(); query = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "select * where { <http://example/book3> ?p ?o. }"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(0, tupleHandler.getCount()); conn.close(); } public void testUpdateData() throws Exception { RepositoryConnection conn = repository.getConnection(); String insert = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "PREFIX ex: <http://example/addresses#>\n" + "INSERT DATA\n" + "{ GRAPH ex:G1 {\n" + "<http://example/book3> dc:title \"A new book\" ;\n" + " dc:creator \"A.N.Other\" .\n" + "}\n" + "}"; Update update = conn.prepareUpdate(QueryLanguage.SPARQL, insert); update.execute(); String query = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "select * where { <http://example/book3> ?p ?o. }"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(2, tupleHandler.getCount()); String insdel = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "\n" + "WITH <http://example/addresses#G1>\n" + "DELETE { ?book dc:title ?title }\n" + "INSERT { ?book dc:title \"A newer book\"." + " ?book dc:add \"Additional Info\" }\n" + "WHERE\n" + " { ?book dc:creator \"A.N.Other\" ;\n" + " dc:title ?title .\n" + " }"; update = conn.prepareUpdate(QueryLanguage.SPARQL, insdel); update.execute(); query = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "PREFIX ex: <http://example/addresses#>\n" + "select * where { GRAPH ex:G1 {<http://example/book3> ?p ?o. } }"; tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(3, tupleHandler.getCount()); conn.close(); } public void testClearGraph() throws Exception { RepositoryConnection conn = repository.getConnection(); String insert = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "PREFIX ex: <http://example/addresses#>\n" + "INSERT DATA\n" + "{ GRAPH ex:G1 {\n" + "<http://example/book3> dc:title \"A new book\" ;\n" + " dc:creator \"A.N.Other\" .\n" + "}\n" + "}"; Update update = conn.prepareUpdate(QueryLanguage.SPARQL, insert); update.execute(); insert = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "PREFIX ex: <http://example/addresses#>\n" + "INSERT DATA\n" + "{ GRAPH ex:G2 {\n" + "<http://example/book3> dc:title \"A new book\" ;\n" + " dc:creator \"A.N.Other\" .\n" + "}\n" + "}"; update = conn.prepareUpdate(QueryLanguage.SPARQL, insert); update.execute(); String query = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "select * where { <http://example/book3> ?p ?o. }"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(4, tupleHandler.getCount()); tupleHandler = new CountTupleHandler(); conn.clear(VF.createIRI("http://example/addresses#G2")); tupleQuery.evaluate(tupleHandler); assertEquals(2, tupleHandler.getCount()); tupleHandler = new CountTupleHandler(); conn.clear(VF.createIRI("http://example/addresses#G1")); tupleQuery.evaluate(tupleHandler); assertEquals(0, tupleHandler.getCount()); conn.close(); } public void testClearAllGraph() throws Exception { RepositoryConnection conn = repository.getConnection(); String insert = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "PREFIX ex: <http://example/addresses#>\n" + "INSERT DATA\n" + "{ GRAPH ex:G1 {\n" + "<http://example/book3> dc:title \"A new book\" ;\n" + " dc:creator \"A.N.Other\" .\n" + "}\n" + "}"; Update update = conn.prepareUpdate(QueryLanguage.SPARQL, insert); update.execute(); insert = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "PREFIX ex: <http://example/addresses#>\n" + "INSERT DATA\n" + "{ GRAPH ex:G2 {\n" + "<http://example/book3> dc:title \"A new book\" ;\n" + " dc:creator \"A.N.Other\" .\n" + "}\n" + "}"; update = conn.prepareUpdate(QueryLanguage.SPARQL, insert); update.execute(); String query = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "select * where { <http://example/book3> ?p ?o. }"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(4, tupleHandler.getCount()); tupleHandler = new CountTupleHandler(); conn.clear(); tupleQuery.evaluate(tupleHandler); assertEquals(0, tupleHandler.getCount()); conn.close(); } public void testDropGraph() throws Exception { RepositoryConnection conn = repository.getConnection(); String insert = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "PREFIX ex: <http://example/addresses#>\n" + "INSERT DATA\n" + "{ GRAPH ex:G1 {\n" + "<http://example/book3> dc:title \"A new book\" ;\n" + " dc:creator \"A.N.Other\" .\n" + "}\n" + "}"; Update update = conn.prepareUpdate(QueryLanguage.SPARQL, insert); update.execute(); insert = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "PREFIX ex: <http://example/addresses#>\n" + "INSERT DATA\n" + "{ GRAPH ex:G2 {\n" + "<http://example/book3> dc:title \"A new book\" ;\n" + " dc:creator \"A.N.Other\" .\n" + "}\n" + "}"; update = conn.prepareUpdate(QueryLanguage.SPARQL, insert); update.execute(); String query = "PREFIX dc: <http://purl.org/dc/elements/1.1/>\n" + "select * where { <http://example/book3> ?p ?o. }"; TupleQuery tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); CountTupleHandler tupleHandler = new CountTupleHandler(); tupleQuery.evaluate(tupleHandler); assertEquals(4, tupleHandler.getCount()); tupleHandler = new CountTupleHandler(); String drop = "PREFIX ex: <http://example/addresses#>\n" + "DROP GRAPH ex:G2 "; update = conn.prepareUpdate(QueryLanguage.SPARQL, drop); update.execute(); tupleQuery.evaluate(tupleHandler); assertEquals(2, tupleHandler.getCount()); tupleHandler = new CountTupleHandler(); drop = "PREFIX ex: <http://example/addresses#>\n" + "DROP GRAPH ex:G1 "; update = conn.prepareUpdate(QueryLanguage.SPARQL, drop); update.execute(); tupleQuery.evaluate(tupleHandler); assertEquals(0, tupleHandler.getCount()); conn.close(); } public static class CountTupleHandler implements TupleQueryResultHandler { int count = 0; @Override public void startQueryResult(List<String> strings) throws TupleQueryResultHandlerException { } @Override public void endQueryResult() throws TupleQueryResultHandlerException { } @Override public void handleSolution(BindingSet bindingSet) throws TupleQueryResultHandlerException { count++; } public int getCount() { return count; } @Override public void handleBoolean(boolean arg0) throws QueryResultHandlerException { } @Override public void handleLinks(List<String> arg0) throws QueryResultHandlerException { } } private static class PrintTupleHandler implements TupleQueryResultHandler { @Override public void startQueryResult(List<String> strings) throws TupleQueryResultHandlerException { } @Override public void endQueryResult() throws TupleQueryResultHandlerException { } @Override public void handleSolution(BindingSet bindingSet) throws TupleQueryResultHandlerException { System.out.println(bindingSet); } @Override public void handleBoolean(boolean arg0) throws QueryResultHandlerException { } @Override public void handleLinks(List<String> arg0) throws QueryResultHandlerException { } } public class MockRdfCloudStore extends RdfCloudTripleStore { public MockRdfCloudStore() { super(); Instance instance = new MockInstance(); try { AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration(); conf.setInfer(true); setConf(conf); Connector connector = instance.getConnector("", ""); AccumuloRyaDAO cdao = new AccumuloRyaDAO(); cdao.setConf(conf); cdao.setConnector(connector); setRyaDAO(cdao); inferenceEngine = new InferenceEngine(); inferenceEngine.setRyaDAO(cdao); inferenceEngine.setRefreshGraphSchedule(5000); //every 5 sec inferenceEngine.setConf(conf); setInferenceEngine(inferenceEngine); internalInferenceEngine = inferenceEngine; } catch (Exception e) { e.printStackTrace(); } } } }
targeter21/drools
kie-pmml-new/kie-pmml-models/kie-pmml-models-regression/kie-pmml-models-regression-compiler/src/test/resources/KiePMMLRegressionTableRegression1.java
/* * Copyright 2020 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.pmml.models.regression.evaluator; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; import org.kie.pmml.models.regression.model.KiePMMLRegressionTable; public class KiePMMLRegressionTableRegression1 extends KiePMMLRegressionTable { public KiePMMLRegressionTableRegression1() { intercept = 3.5; targetField = "targetField"; numericFunctionMap.put("NumPred-2", this::evaluateNumericPredictor4); numericFunctionMap.put("NumPred-3", this::evaluateNumericPredictor1); numericFunctionMap.put("NumPred-0", this::evaluateNumericPredictor2); numericFunctionMap.put("NumPred-1", this::evaluateNumericPredictor3); categoricalFunctionMap.put("CatPred-2", this::evaluateCategoricalPredictor1); categoricalFunctionMap.put("CatPred-1", this::evaluateCategoricalPredictor2); categoricalFunctionMap.put("CatPred-0", this::evaluateCategoricalPredictor3); predictorTermsFunctionMap.put("PredTerm-2", this::evaluatePredictorTerm3); predictorTermsFunctionMap.put("PredTerm-0", this::evaluatePredictorTerm1); predictorTermsFunctionMap.put("PredTerm-1", this::evaluatePredictorTerm2); } @Override public Object getTargetCategory() { return "professional"; } @Override protected void updateResult(final AtomicReference<Double> toUpdate) { toUpdate.updateAndGet(y -> 0.5 + (1 / Math.PI) * Math.atan(y)); } private double evaluateNumericPredictor1(double input) { double coefficient = 32.55; // Ignoring exponent because it is 1 return input * coefficient; } private double evaluateNumericPredictor2(double input) { double coefficient = 13.11; double exponent = 2.0; // Considering exponent because it is != 1 return Math.pow(input, exponent) * coefficient; } private double evaluateNumericPredictor3(double input) { double coefficient = 13.11; double exponent = 2.0; // Considering exponent because it is != 1 return Math.pow(input, exponent) * coefficient; } private double evaluateNumericPredictor4(double input) { double coefficient = 13.11; double exponent = 2.0; // Considering exponent because it is != 1 return Math.pow(input, exponent) * coefficient; } private double evaluateCategoricalPredictor1(Object input) { if (Objects.equals(27.12, input)) return 3.46; else if (Objects.equals(27.12, input)) return 3.46; else return 0.0; } private double evaluateCategoricalPredictor2(Object input) { if (Objects.equals(27.12, input)) return 3.46; else if (Objects.equals(27.12, input)) return 3.46; else return 0.0; } private double evaluateCategoricalPredictor3(Object input) { if (Objects.equals(27.12, input)) return 3.46; else if (Objects.equals(27.12, input)) return 3.46; else return 0.0; } private double evaluatePredictorTerm1(Map<String, Object> resultMap) { final AtomicReference<Double> result = new AtomicReference<>(1.0); List<String> fieldRefs = Arrays.asList("CatPred-0", "NumPred-3"); fieldRefs.forEach(fldRef -> { if (resultMap.containsKey(fldRef)) { result.set(result.get() * (Double) resultMap.get(fldRef)); } }); double coefficient = 32.29; return result.get() * coefficient; } private double evaluatePredictorTerm2(Map<String, Object> resultMap) { final AtomicReference<Double> result = new AtomicReference<>(1.0); List<String> fieldRefs = Arrays.asList("CatPred-0", "NumPred-3"); fieldRefs.forEach(fldRef -> { if (resultMap.containsKey(fldRef)) { result.set(result.get() * (Double) resultMap.get(fldRef)); } }); double coefficient = 32.29; return result.get() * coefficient; } private double evaluatePredictorTerm3(Map<String, Object> resultMap) { final AtomicReference<Double> result = new AtomicReference<>(1.0); List<String> fieldRefs = Arrays.asList("CatPred-0", "NumPred-3"); fieldRefs.forEach(fldRef -> { if (resultMap.containsKey(fldRef)) { result.set(result.get() * (Double) resultMap.get(fldRef)); } }); double coefficient = 32.29; return result.get() * coefficient; } }
ranji1221/uekcloud
volador/src/main/java/org/ranji/lemon/volador/persist/personal/impl/SignInDapImpl.java
package org.ranji.lemon.volador.persist.personal.impl; import java.util.List; import org.ranji.lemon.core.persist.impl.GenericDaoImpl; import org.ranji.lemon.volador.model.personal.SignIn; import org.ranji.lemon.volador.persist.personal.prototype.ISignInDao; import org.springframework.stereotype.Repository; @Repository("VoladorSignInDaoImpl") public class SignInDapImpl extends GenericDaoImpl<SignIn, Integer> implements ISignInDao{ @Override public List<SignIn> findSignInByUserId(int userId) { return sqlSessionTemplate.selectList(typeNameSpace+".findSignInByUserId", userId); } }
pfistfl/openml-defaults
examples/plot/plot_symbolic_defaults.py
import argparse import matplotlib.pyplot as plt import logging import seaborn as sns import openmldefaults import os import pandas as pd import pickle def parse_args(): parser = argparse.ArgumentParser(description='Creates an ARFF file') parser.add_argument('--results_directory', type=str, default=os.path.expanduser('~/experiments/openml-defaults/symbolic_defaults/svc/')) parser.add_argument('--scoring', type=str, default='predictive_accuracy') parser.add_argument('--n_defaults_in_file', type=int, default=32) return parser.parse_args() def run(args): root = logging.getLogger() root.setLevel(logging.INFO) for basefilename in os.listdir(args.results_directory): filepath = os.path.join(args.results_directory, basefilename) filename_base = os.path.splitext(basefilename)[0] task_id = filename_base.split('_')[-1] with open(filepath, 'rb') as fp: results = pickle.load(fp) best = None for res in results['symbolic_defaults']: if best is None: best = res elif res['avg_performance'] > best['avg_performance']: best = res if task_id == 'all': pass else: task_id = int(task_id) # throws an error in case of an unexpected file if __name__ == '__main__': pd.options.mode.chained_assignment = 'raise' run(parse_args())
workshopper/levelmeup
lib/exercise.js
<reponame>workshopper/levelmeup var path = require('path') var os = require('os') var level = require('level') var rimraf = require('rimraf') var deepDiff = require('deep-diff') var execModule = require('exec-module') var formatDiff = require('./formatDiff') function setupDb (name) { // eslint-disable-next-line node/no-deprecated-api var dir = path.join(os.tmpdir ? os.tmpdir() : os.tmpDir(), '~levelmeup_' + name + '_' + process.pid) rimraf.sync(dir) return dir } function exec (opts, file, callback) { var dbDir = setupDb('1') execModule(file, { setUp: function (file, opt, callback) { var db = level(dbDir) opts.prepare(db, function (err) { if (err) { return callback(err) } db.close(callback) }) }, exec: function (file, opt, mod, callback) { opts.exec(dbDir, mod, callback) }, timeout: 5000, tearDown: function (file, opt, err, data, callback) { var db = level(dbDir, function (err) { if (err && err.type === 'OpenError') { return callback(new Error(file + '\n\n{error.db.not_closed}')) } else if (err) { return callback(new Error(file + '\n\n{error.mod.unexpected}:\n\n```\n' + ((err && err.stack) || err) + '\n```')) } db.close(function () { try { rimraf.sync(dbDir) } catch (e) { // eat rimraf errors } callback(null, data) }) }) } }, callback) } module.exports = function (opt) { return { problem: { file: path.join(opt.dir, 'problem.{lang}.md') }, solution: { file: path.join(opt.dir, 'solution.js'), type: 'js' }, verify: function (args, callback) { var cmd = opt.init() var finish = function (err) { if (err) { callback(null, false, err) } callback(null, true) } exec(cmd, path.join(opt.dir, 'solution.js'), function (err, data) { if (err) { return finish('Error in this workshopper!\n\n```\n' + err.stack + '\n```') } exec(cmd, path.resolve(process.cwd(), args[0]), function (err, data2) { if (err) { return finish('```\n' + err.stack + '\n```') } var diffs = deepDiff(data, data2) if (diffs === undefined) { return finish() } try { finish('{error.not_same}:\n' + formatDiff(diffs)) } catch (e) { console.log(e) } }) }) }, run: function (args, callback) { var cmd = opt.init() exec(cmd, path.resolve(process.cwd(), args[0]), function (err, data) { if (err) { return callback(err, false) } console.log(JSON.stringify(data, null, 2)) callback(null, true) }) } } }
TheEpicBlock/Flywheel
src/main/java/com/jozufozu/flywheel/core/shader/ExtensibleGlProgram.java
<gh_stars>0 package com.jozufozu.flywheel.core.shader; import java.util.ArrayList; import java.util.List; import javax.annotation.Nonnull; import com.jozufozu.flywheel.backend.ShaderContext; import com.jozufozu.flywheel.backend.gl.shader.GlProgram; import com.jozufozu.flywheel.core.shader.extension.IExtensionInstance; import net.minecraft.resources.ResourceLocation; /** * A shader program that be arbitrarily "extended". This class can take in any number of program extensions, and * will initialize them and then call their {@link IExtensionInstance#bind() bind} function every subsequent time this * program is bound. An "extension" is something that interacts with the shader program in a way that is invisible to * the caller using the program. This is used by some programs to implement the different fog modes. Other uses might * include binding extra textures to allow for blocks to have normal maps, for example. As the extensions are * per-program, this also allows for same extra specialization within a * {@link ShaderContext ShaderContext}. */ public class ExtensibleGlProgram extends GlProgram { protected final List<IExtensionInstance> extensions = new ArrayList<>(); public ExtensibleGlProgram(ResourceLocation name, int handle) { super(name, handle); } @Override public void bind() { super.bind(); extensions.forEach(IExtensionInstance::bind); } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append("program ") .append(name) .append('['); for (IExtensionInstance extension : extensions) { builder.append(extension) .append('+'); } builder.append(']'); return builder.toString(); } /** * A factory interface to create {@link GlProgram}s parameterized by a list of extensions. This doesn't necessarily * have to return an {@link ExtensibleGlProgram} if implementors want more flexibility for whatever reason. */ public interface Factory<P extends GlProgram> { @Nonnull P create(ResourceLocation name, int handle); } }
alpha-asp/Alpha
alpha-api/src/main/java/at/ac/tuwien/kr/alpha/api/programs/analysis/DependencyGraph.java
<filename>alpha-api/src/main/java/at/ac/tuwien/kr/alpha/api/programs/analysis/DependencyGraph.java package at.ac.tuwien.kr.alpha.api.programs.analysis; import at.ac.tuwien.kr.alpha.api.programs.Predicate; import java.util.List; import java.util.Map; /** * The predicate-level dependency graph of an ASP program. * Each node of the dependency graph represents one {@link Predicate}. * An edge from node A to node B indicates that predicate B depends on predicate A. Edges also store a "sign" (true or false) * indicating whether a dependency is positive or negative (i.e. the predicate A occurs negated in the rule body deriving B). * * Copyright (c) 2021, the Alpha Team. */ public interface DependencyGraph { Node getNodeForPredicate(Predicate p); Map<Node, List<Edge>> getAdjancencyMap(); interface Edge { Node getTarget(); boolean getSign(); } interface Node { Predicate getPredicate(); String getLabel(); } }
heaths/azure-sdk-for-go
sdk/resourcemanager/avs/armavs/zz_generated_response_types.go
//go:build go1.16 // +build go1.16 // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. package armavs import ( "context" armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" "net/http" "time" ) // AddonsClientCreateOrUpdatePollerResponse contains the response from method AddonsClient.CreateOrUpdate. type AddonsClientCreateOrUpdatePollerResponse struct { // Poller contains an initialized poller. Poller *AddonsClientCreateOrUpdatePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l AddonsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AddonsClientCreateOrUpdateResponse, error) { respType := AddonsClientCreateOrUpdateResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Addon) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a AddonsClientCreateOrUpdatePollerResponse from the provided client and resume token. func (l *AddonsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *AddonsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("AddonsClient.CreateOrUpdate", token, client.pl) if err != nil { return err } poller := &AddonsClientCreateOrUpdatePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // AddonsClientCreateOrUpdateResponse contains the response from method AddonsClient.CreateOrUpdate. type AddonsClientCreateOrUpdateResponse struct { AddonsClientCreateOrUpdateResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // AddonsClientCreateOrUpdateResult contains the result from method AddonsClient.CreateOrUpdate. type AddonsClientCreateOrUpdateResult struct { Addon } // AddonsClientDeletePollerResponse contains the response from method AddonsClient.Delete. type AddonsClientDeletePollerResponse struct { // Poller contains an initialized poller. Poller *AddonsClientDeletePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l AddonsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AddonsClientDeleteResponse, error) { respType := AddonsClientDeleteResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a AddonsClientDeletePollerResponse from the provided client and resume token. func (l *AddonsClientDeletePollerResponse) Resume(ctx context.Context, client *AddonsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("AddonsClient.Delete", token, client.pl) if err != nil { return err } poller := &AddonsClientDeletePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // AddonsClientDeleteResponse contains the response from method AddonsClient.Delete. type AddonsClientDeleteResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // AddonsClientGetResponse contains the response from method AddonsClient.Get. type AddonsClientGetResponse struct { AddonsClientGetResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // AddonsClientGetResult contains the result from method AddonsClient.Get. type AddonsClientGetResult struct { Addon } // AddonsClientListResponse contains the response from method AddonsClient.List. type AddonsClientListResponse struct { AddonsClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // AddonsClientListResult contains the result from method AddonsClient.List. type AddonsClientListResult struct { AddonList } // AuthorizationsClientCreateOrUpdatePollerResponse contains the response from method AuthorizationsClient.CreateOrUpdate. type AuthorizationsClientCreateOrUpdatePollerResponse struct { // Poller contains an initialized poller. Poller *AuthorizationsClientCreateOrUpdatePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l AuthorizationsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AuthorizationsClientCreateOrUpdateResponse, error) { respType := AuthorizationsClientCreateOrUpdateResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRouteAuthorization) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a AuthorizationsClientCreateOrUpdatePollerResponse from the provided client and resume token. func (l *AuthorizationsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *AuthorizationsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("AuthorizationsClient.CreateOrUpdate", token, client.pl) if err != nil { return err } poller := &AuthorizationsClientCreateOrUpdatePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // AuthorizationsClientCreateOrUpdateResponse contains the response from method AuthorizationsClient.CreateOrUpdate. type AuthorizationsClientCreateOrUpdateResponse struct { AuthorizationsClientCreateOrUpdateResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // AuthorizationsClientCreateOrUpdateResult contains the result from method AuthorizationsClient.CreateOrUpdate. type AuthorizationsClientCreateOrUpdateResult struct { ExpressRouteAuthorization } // AuthorizationsClientDeletePollerResponse contains the response from method AuthorizationsClient.Delete. type AuthorizationsClientDeletePollerResponse struct { // Poller contains an initialized poller. Poller *AuthorizationsClientDeletePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l AuthorizationsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AuthorizationsClientDeleteResponse, error) { respType := AuthorizationsClientDeleteResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a AuthorizationsClientDeletePollerResponse from the provided client and resume token. func (l *AuthorizationsClientDeletePollerResponse) Resume(ctx context.Context, client *AuthorizationsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("AuthorizationsClient.Delete", token, client.pl) if err != nil { return err } poller := &AuthorizationsClientDeletePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // AuthorizationsClientDeleteResponse contains the response from method AuthorizationsClient.Delete. type AuthorizationsClientDeleteResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // AuthorizationsClientGetResponse contains the response from method AuthorizationsClient.Get. type AuthorizationsClientGetResponse struct { AuthorizationsClientGetResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // AuthorizationsClientGetResult contains the result from method AuthorizationsClient.Get. type AuthorizationsClientGetResult struct { ExpressRouteAuthorization } // AuthorizationsClientListResponse contains the response from method AuthorizationsClient.List. type AuthorizationsClientListResponse struct { AuthorizationsClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // AuthorizationsClientListResult contains the result from method AuthorizationsClient.List. type AuthorizationsClientListResult struct { ExpressRouteAuthorizationList } // CloudLinksClientCreateOrUpdatePollerResponse contains the response from method CloudLinksClient.CreateOrUpdate. type CloudLinksClientCreateOrUpdatePollerResponse struct { // Poller contains an initialized poller. Poller *CloudLinksClientCreateOrUpdatePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l CloudLinksClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (CloudLinksClientCreateOrUpdateResponse, error) { respType := CloudLinksClientCreateOrUpdateResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.CloudLink) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a CloudLinksClientCreateOrUpdatePollerResponse from the provided client and resume token. func (l *CloudLinksClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *CloudLinksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("CloudLinksClient.CreateOrUpdate", token, client.pl) if err != nil { return err } poller := &CloudLinksClientCreateOrUpdatePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // CloudLinksClientCreateOrUpdateResponse contains the response from method CloudLinksClient.CreateOrUpdate. type CloudLinksClientCreateOrUpdateResponse struct { CloudLinksClientCreateOrUpdateResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // CloudLinksClientCreateOrUpdateResult contains the result from method CloudLinksClient.CreateOrUpdate. type CloudLinksClientCreateOrUpdateResult struct { CloudLink } // CloudLinksClientDeletePollerResponse contains the response from method CloudLinksClient.Delete. type CloudLinksClientDeletePollerResponse struct { // Poller contains an initialized poller. Poller *CloudLinksClientDeletePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l CloudLinksClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (CloudLinksClientDeleteResponse, error) { respType := CloudLinksClientDeleteResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a CloudLinksClientDeletePollerResponse from the provided client and resume token. func (l *CloudLinksClientDeletePollerResponse) Resume(ctx context.Context, client *CloudLinksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("CloudLinksClient.Delete", token, client.pl) if err != nil { return err } poller := &CloudLinksClientDeletePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // CloudLinksClientDeleteResponse contains the response from method CloudLinksClient.Delete. type CloudLinksClientDeleteResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // CloudLinksClientGetResponse contains the response from method CloudLinksClient.Get. type CloudLinksClientGetResponse struct { CloudLinksClientGetResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // CloudLinksClientGetResult contains the result from method CloudLinksClient.Get. type CloudLinksClientGetResult struct { CloudLink } // CloudLinksClientListResponse contains the response from method CloudLinksClient.List. type CloudLinksClientListResponse struct { CloudLinksClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // CloudLinksClientListResult contains the result from method CloudLinksClient.List. type CloudLinksClientListResult struct { CloudLinkList } // ClustersClientCreateOrUpdatePollerResponse contains the response from method ClustersClient.CreateOrUpdate. type ClustersClientCreateOrUpdatePollerResponse struct { // Poller contains an initialized poller. Poller *ClustersClientCreateOrUpdatePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l ClustersClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ClustersClientCreateOrUpdateResponse, error) { respType := ClustersClientCreateOrUpdateResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Cluster) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a ClustersClientCreateOrUpdatePollerResponse from the provided client and resume token. func (l *ClustersClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ClustersClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("ClustersClient.CreateOrUpdate", token, client.pl) if err != nil { return err } poller := &ClustersClientCreateOrUpdatePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // ClustersClientCreateOrUpdateResponse contains the response from method ClustersClient.CreateOrUpdate. type ClustersClientCreateOrUpdateResponse struct { ClustersClientCreateOrUpdateResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ClustersClientCreateOrUpdateResult contains the result from method ClustersClient.CreateOrUpdate. type ClustersClientCreateOrUpdateResult struct { Cluster } // ClustersClientDeletePollerResponse contains the response from method ClustersClient.Delete. type ClustersClientDeletePollerResponse struct { // Poller contains an initialized poller. Poller *ClustersClientDeletePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l ClustersClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ClustersClientDeleteResponse, error) { respType := ClustersClientDeleteResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a ClustersClientDeletePollerResponse from the provided client and resume token. func (l *ClustersClientDeletePollerResponse) Resume(ctx context.Context, client *ClustersClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("ClustersClient.Delete", token, client.pl) if err != nil { return err } poller := &ClustersClientDeletePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // ClustersClientDeleteResponse contains the response from method ClustersClient.Delete. type ClustersClientDeleteResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ClustersClientGetResponse contains the response from method ClustersClient.Get. type ClustersClientGetResponse struct { ClustersClientGetResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ClustersClientGetResult contains the result from method ClustersClient.Get. type ClustersClientGetResult struct { Cluster } // ClustersClientListResponse contains the response from method ClustersClient.List. type ClustersClientListResponse struct { ClustersClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ClustersClientListResult contains the result from method ClustersClient.List. type ClustersClientListResult struct { ClusterList } // ClustersClientUpdatePollerResponse contains the response from method ClustersClient.Update. type ClustersClientUpdatePollerResponse struct { // Poller contains an initialized poller. Poller *ClustersClientUpdatePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l ClustersClientUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ClustersClientUpdateResponse, error) { respType := ClustersClientUpdateResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Cluster) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a ClustersClientUpdatePollerResponse from the provided client and resume token. func (l *ClustersClientUpdatePollerResponse) Resume(ctx context.Context, client *ClustersClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("ClustersClient.Update", token, client.pl) if err != nil { return err } poller := &ClustersClientUpdatePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // ClustersClientUpdateResponse contains the response from method ClustersClient.Update. type ClustersClientUpdateResponse struct { ClustersClientUpdateResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ClustersClientUpdateResult contains the result from method ClustersClient.Update. type ClustersClientUpdateResult struct { Cluster } // DatastoresClientCreateOrUpdatePollerResponse contains the response from method DatastoresClient.CreateOrUpdate. type DatastoresClientCreateOrUpdatePollerResponse struct { // Poller contains an initialized poller. Poller *DatastoresClientCreateOrUpdatePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l DatastoresClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DatastoresClientCreateOrUpdateResponse, error) { respType := DatastoresClientCreateOrUpdateResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Datastore) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a DatastoresClientCreateOrUpdatePollerResponse from the provided client and resume token. func (l *DatastoresClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *DatastoresClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("DatastoresClient.CreateOrUpdate", token, client.pl) if err != nil { return err } poller := &DatastoresClientCreateOrUpdatePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // DatastoresClientCreateOrUpdateResponse contains the response from method DatastoresClient.CreateOrUpdate. type DatastoresClientCreateOrUpdateResponse struct { DatastoresClientCreateOrUpdateResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // DatastoresClientCreateOrUpdateResult contains the result from method DatastoresClient.CreateOrUpdate. type DatastoresClientCreateOrUpdateResult struct { Datastore } // DatastoresClientDeletePollerResponse contains the response from method DatastoresClient.Delete. type DatastoresClientDeletePollerResponse struct { // Poller contains an initialized poller. Poller *DatastoresClientDeletePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l DatastoresClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DatastoresClientDeleteResponse, error) { respType := DatastoresClientDeleteResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a DatastoresClientDeletePollerResponse from the provided client and resume token. func (l *DatastoresClientDeletePollerResponse) Resume(ctx context.Context, client *DatastoresClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("DatastoresClient.Delete", token, client.pl) if err != nil { return err } poller := &DatastoresClientDeletePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // DatastoresClientDeleteResponse contains the response from method DatastoresClient.Delete. type DatastoresClientDeleteResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // DatastoresClientGetResponse contains the response from method DatastoresClient.Get. type DatastoresClientGetResponse struct { DatastoresClientGetResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // DatastoresClientGetResult contains the result from method DatastoresClient.Get. type DatastoresClientGetResult struct { Datastore } // DatastoresClientListResponse contains the response from method DatastoresClient.List. type DatastoresClientListResponse struct { DatastoresClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // DatastoresClientListResult contains the result from method DatastoresClient.List. type DatastoresClientListResult struct { DatastoreList } // GlobalReachConnectionsClientCreateOrUpdatePollerResponse contains the response from method GlobalReachConnectionsClient.CreateOrUpdate. type GlobalReachConnectionsClientCreateOrUpdatePollerResponse struct { // Poller contains an initialized poller. Poller *GlobalReachConnectionsClientCreateOrUpdatePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l GlobalReachConnectionsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (GlobalReachConnectionsClientCreateOrUpdateResponse, error) { respType := GlobalReachConnectionsClientCreateOrUpdateResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.GlobalReachConnection) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a GlobalReachConnectionsClientCreateOrUpdatePollerResponse from the provided client and resume token. func (l *GlobalReachConnectionsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *GlobalReachConnectionsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("GlobalReachConnectionsClient.CreateOrUpdate", token, client.pl) if err != nil { return err } poller := &GlobalReachConnectionsClientCreateOrUpdatePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // GlobalReachConnectionsClientCreateOrUpdateResponse contains the response from method GlobalReachConnectionsClient.CreateOrUpdate. type GlobalReachConnectionsClientCreateOrUpdateResponse struct { GlobalReachConnectionsClientCreateOrUpdateResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // GlobalReachConnectionsClientCreateOrUpdateResult contains the result from method GlobalReachConnectionsClient.CreateOrUpdate. type GlobalReachConnectionsClientCreateOrUpdateResult struct { GlobalReachConnection } // GlobalReachConnectionsClientDeletePollerResponse contains the response from method GlobalReachConnectionsClient.Delete. type GlobalReachConnectionsClientDeletePollerResponse struct { // Poller contains an initialized poller. Poller *GlobalReachConnectionsClientDeletePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l GlobalReachConnectionsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (GlobalReachConnectionsClientDeleteResponse, error) { respType := GlobalReachConnectionsClientDeleteResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a GlobalReachConnectionsClientDeletePollerResponse from the provided client and resume token. func (l *GlobalReachConnectionsClientDeletePollerResponse) Resume(ctx context.Context, client *GlobalReachConnectionsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("GlobalReachConnectionsClient.Delete", token, client.pl) if err != nil { return err } poller := &GlobalReachConnectionsClientDeletePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // GlobalReachConnectionsClientDeleteResponse contains the response from method GlobalReachConnectionsClient.Delete. type GlobalReachConnectionsClientDeleteResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // GlobalReachConnectionsClientGetResponse contains the response from method GlobalReachConnectionsClient.Get. type GlobalReachConnectionsClientGetResponse struct { GlobalReachConnectionsClientGetResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // GlobalReachConnectionsClientGetResult contains the result from method GlobalReachConnectionsClient.Get. type GlobalReachConnectionsClientGetResult struct { GlobalReachConnection } // GlobalReachConnectionsClientListResponse contains the response from method GlobalReachConnectionsClient.List. type GlobalReachConnectionsClientListResponse struct { GlobalReachConnectionsClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // GlobalReachConnectionsClientListResult contains the result from method GlobalReachConnectionsClient.List. type GlobalReachConnectionsClientListResult struct { GlobalReachConnectionList } // HcxEnterpriseSitesClientCreateOrUpdateResponse contains the response from method HcxEnterpriseSitesClient.CreateOrUpdate. type HcxEnterpriseSitesClientCreateOrUpdateResponse struct { HcxEnterpriseSitesClientCreateOrUpdateResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // HcxEnterpriseSitesClientCreateOrUpdateResult contains the result from method HcxEnterpriseSitesClient.CreateOrUpdate. type HcxEnterpriseSitesClientCreateOrUpdateResult struct { HcxEnterpriseSite } // HcxEnterpriseSitesClientDeleteResponse contains the response from method HcxEnterpriseSitesClient.Delete. type HcxEnterpriseSitesClientDeleteResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // HcxEnterpriseSitesClientGetResponse contains the response from method HcxEnterpriseSitesClient.Get. type HcxEnterpriseSitesClientGetResponse struct { HcxEnterpriseSitesClientGetResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // HcxEnterpriseSitesClientGetResult contains the result from method HcxEnterpriseSitesClient.Get. type HcxEnterpriseSitesClientGetResult struct { HcxEnterpriseSite } // HcxEnterpriseSitesClientListResponse contains the response from method HcxEnterpriseSitesClient.List. type HcxEnterpriseSitesClientListResponse struct { HcxEnterpriseSitesClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // HcxEnterpriseSitesClientListResult contains the result from method HcxEnterpriseSitesClient.List. type HcxEnterpriseSitesClientListResult struct { HcxEnterpriseSiteList } // LocationsClientCheckQuotaAvailabilityResponse contains the response from method LocationsClient.CheckQuotaAvailability. type LocationsClientCheckQuotaAvailabilityResponse struct { LocationsClientCheckQuotaAvailabilityResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // LocationsClientCheckQuotaAvailabilityResult contains the result from method LocationsClient.CheckQuotaAvailability. type LocationsClientCheckQuotaAvailabilityResult struct { Quota } // LocationsClientCheckTrialAvailabilityResponse contains the response from method LocationsClient.CheckTrialAvailability. type LocationsClientCheckTrialAvailabilityResponse struct { LocationsClientCheckTrialAvailabilityResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // LocationsClientCheckTrialAvailabilityResult contains the result from method LocationsClient.CheckTrialAvailability. type LocationsClientCheckTrialAvailabilityResult struct { Trial } // OperationsClientListResponse contains the response from method OperationsClient.List. type OperationsClientListResponse struct { OperationsClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // OperationsClientListResult contains the result from method OperationsClient.List. type OperationsClientListResult struct { OperationList } // PlacementPoliciesClientCreateOrUpdatePollerResponse contains the response from method PlacementPoliciesClient.CreateOrUpdate. type PlacementPoliciesClientCreateOrUpdatePollerResponse struct { // Poller contains an initialized poller. Poller *PlacementPoliciesClientCreateOrUpdatePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l PlacementPoliciesClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PlacementPoliciesClientCreateOrUpdateResponse, error) { respType := PlacementPoliciesClientCreateOrUpdateResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.PlacementPolicy) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a PlacementPoliciesClientCreateOrUpdatePollerResponse from the provided client and resume token. func (l *PlacementPoliciesClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *PlacementPoliciesClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("PlacementPoliciesClient.CreateOrUpdate", token, client.pl) if err != nil { return err } poller := &PlacementPoliciesClientCreateOrUpdatePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // PlacementPoliciesClientCreateOrUpdateResponse contains the response from method PlacementPoliciesClient.CreateOrUpdate. type PlacementPoliciesClientCreateOrUpdateResponse struct { PlacementPoliciesClientCreateOrUpdateResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PlacementPoliciesClientCreateOrUpdateResult contains the result from method PlacementPoliciesClient.CreateOrUpdate. type PlacementPoliciesClientCreateOrUpdateResult struct { PlacementPolicy } // PlacementPoliciesClientDeletePollerResponse contains the response from method PlacementPoliciesClient.Delete. type PlacementPoliciesClientDeletePollerResponse struct { // Poller contains an initialized poller. Poller *PlacementPoliciesClientDeletePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l PlacementPoliciesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PlacementPoliciesClientDeleteResponse, error) { respType := PlacementPoliciesClientDeleteResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a PlacementPoliciesClientDeletePollerResponse from the provided client and resume token. func (l *PlacementPoliciesClientDeletePollerResponse) Resume(ctx context.Context, client *PlacementPoliciesClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("PlacementPoliciesClient.Delete", token, client.pl) if err != nil { return err } poller := &PlacementPoliciesClientDeletePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // PlacementPoliciesClientDeleteResponse contains the response from method PlacementPoliciesClient.Delete. type PlacementPoliciesClientDeleteResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PlacementPoliciesClientGetResponse contains the response from method PlacementPoliciesClient.Get. type PlacementPoliciesClientGetResponse struct { PlacementPoliciesClientGetResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PlacementPoliciesClientGetResult contains the result from method PlacementPoliciesClient.Get. type PlacementPoliciesClientGetResult struct { PlacementPolicy } // PlacementPoliciesClientListResponse contains the response from method PlacementPoliciesClient.List. type PlacementPoliciesClientListResponse struct { PlacementPoliciesClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PlacementPoliciesClientListResult contains the result from method PlacementPoliciesClient.List. type PlacementPoliciesClientListResult struct { PlacementPoliciesList } // PlacementPoliciesClientUpdatePollerResponse contains the response from method PlacementPoliciesClient.Update. type PlacementPoliciesClientUpdatePollerResponse struct { // Poller contains an initialized poller. Poller *PlacementPoliciesClientUpdatePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l PlacementPoliciesClientUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PlacementPoliciesClientUpdateResponse, error) { respType := PlacementPoliciesClientUpdateResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.PlacementPolicy) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a PlacementPoliciesClientUpdatePollerResponse from the provided client and resume token. func (l *PlacementPoliciesClientUpdatePollerResponse) Resume(ctx context.Context, client *PlacementPoliciesClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("PlacementPoliciesClient.Update", token, client.pl) if err != nil { return err } poller := &PlacementPoliciesClientUpdatePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // PlacementPoliciesClientUpdateResponse contains the response from method PlacementPoliciesClient.Update. type PlacementPoliciesClientUpdateResponse struct { PlacementPoliciesClientUpdateResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PlacementPoliciesClientUpdateResult contains the result from method PlacementPoliciesClient.Update. type PlacementPoliciesClientUpdateResult struct { PlacementPolicy } // PrivateCloudsClientCreateOrUpdatePollerResponse contains the response from method PrivateCloudsClient.CreateOrUpdate. type PrivateCloudsClientCreateOrUpdatePollerResponse struct { // Poller contains an initialized poller. Poller *PrivateCloudsClientCreateOrUpdatePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l PrivateCloudsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PrivateCloudsClientCreateOrUpdateResponse, error) { respType := PrivateCloudsClientCreateOrUpdateResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.PrivateCloud) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a PrivateCloudsClientCreateOrUpdatePollerResponse from the provided client and resume token. func (l *PrivateCloudsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *PrivateCloudsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("PrivateCloudsClient.CreateOrUpdate", token, client.pl) if err != nil { return err } poller := &PrivateCloudsClientCreateOrUpdatePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // PrivateCloudsClientCreateOrUpdateResponse contains the response from method PrivateCloudsClient.CreateOrUpdate. type PrivateCloudsClientCreateOrUpdateResponse struct { PrivateCloudsClientCreateOrUpdateResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PrivateCloudsClientCreateOrUpdateResult contains the result from method PrivateCloudsClient.CreateOrUpdate. type PrivateCloudsClientCreateOrUpdateResult struct { PrivateCloud } // PrivateCloudsClientDeletePollerResponse contains the response from method PrivateCloudsClient.Delete. type PrivateCloudsClientDeletePollerResponse struct { // Poller contains an initialized poller. Poller *PrivateCloudsClientDeletePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l PrivateCloudsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PrivateCloudsClientDeleteResponse, error) { respType := PrivateCloudsClientDeleteResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a PrivateCloudsClientDeletePollerResponse from the provided client and resume token. func (l *PrivateCloudsClientDeletePollerResponse) Resume(ctx context.Context, client *PrivateCloudsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("PrivateCloudsClient.Delete", token, client.pl) if err != nil { return err } poller := &PrivateCloudsClientDeletePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // PrivateCloudsClientDeleteResponse contains the response from method PrivateCloudsClient.Delete. type PrivateCloudsClientDeleteResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PrivateCloudsClientGetResponse contains the response from method PrivateCloudsClient.Get. type PrivateCloudsClientGetResponse struct { PrivateCloudsClientGetResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PrivateCloudsClientGetResult contains the result from method PrivateCloudsClient.Get. type PrivateCloudsClientGetResult struct { PrivateCloud } // PrivateCloudsClientListAdminCredentialsResponse contains the response from method PrivateCloudsClient.ListAdminCredentials. type PrivateCloudsClientListAdminCredentialsResponse struct { PrivateCloudsClientListAdminCredentialsResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PrivateCloudsClientListAdminCredentialsResult contains the result from method PrivateCloudsClient.ListAdminCredentials. type PrivateCloudsClientListAdminCredentialsResult struct { AdminCredentials } // PrivateCloudsClientListInSubscriptionResponse contains the response from method PrivateCloudsClient.ListInSubscription. type PrivateCloudsClientListInSubscriptionResponse struct { PrivateCloudsClientListInSubscriptionResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PrivateCloudsClientListInSubscriptionResult contains the result from method PrivateCloudsClient.ListInSubscription. type PrivateCloudsClientListInSubscriptionResult struct { PrivateCloudList } // PrivateCloudsClientListResponse contains the response from method PrivateCloudsClient.List. type PrivateCloudsClientListResponse struct { PrivateCloudsClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PrivateCloudsClientListResult contains the result from method PrivateCloudsClient.List. type PrivateCloudsClientListResult struct { PrivateCloudList } // PrivateCloudsClientRotateNsxtPasswordPollerResponse contains the response from method PrivateCloudsClient.RotateNsxtPassword. type PrivateCloudsClientRotateNsxtPasswordPollerResponse struct { // Poller contains an initialized poller. Poller *PrivateCloudsClientRotateNsxtPasswordPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l PrivateCloudsClientRotateNsxtPasswordPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PrivateCloudsClientRotateNsxtPasswordResponse, error) { respType := PrivateCloudsClientRotateNsxtPasswordResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a PrivateCloudsClientRotateNsxtPasswordPollerResponse from the provided client and resume token. func (l *PrivateCloudsClientRotateNsxtPasswordPollerResponse) Resume(ctx context.Context, client *PrivateCloudsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("PrivateCloudsClient.RotateNsxtPassword", token, client.pl) if err != nil { return err } poller := &PrivateCloudsClientRotateNsxtPasswordPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // PrivateCloudsClientRotateNsxtPasswordResponse contains the response from method PrivateCloudsClient.RotateNsxtPassword. type PrivateCloudsClientRotateNsxtPasswordResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PrivateCloudsClientRotateVcenterPasswordPollerResponse contains the response from method PrivateCloudsClient.RotateVcenterPassword. type PrivateCloudsClientRotateVcenterPasswordPollerResponse struct { // Poller contains an initialized poller. Poller *PrivateCloudsClientRotateVcenterPasswordPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l PrivateCloudsClientRotateVcenterPasswordPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PrivateCloudsClientRotateVcenterPasswordResponse, error) { respType := PrivateCloudsClientRotateVcenterPasswordResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a PrivateCloudsClientRotateVcenterPasswordPollerResponse from the provided client and resume token. func (l *PrivateCloudsClientRotateVcenterPasswordPollerResponse) Resume(ctx context.Context, client *PrivateCloudsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("PrivateCloudsClient.RotateVcenterPassword", token, client.pl) if err != nil { return err } poller := &PrivateCloudsClientRotateVcenterPasswordPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // PrivateCloudsClientRotateVcenterPasswordResponse contains the response from method PrivateCloudsClient.RotateVcenterPassword. type PrivateCloudsClientRotateVcenterPasswordResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PrivateCloudsClientUpdatePollerResponse contains the response from method PrivateCloudsClient.Update. type PrivateCloudsClientUpdatePollerResponse struct { // Poller contains an initialized poller. Poller *PrivateCloudsClientUpdatePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l PrivateCloudsClientUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PrivateCloudsClientUpdateResponse, error) { respType := PrivateCloudsClientUpdateResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.PrivateCloud) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a PrivateCloudsClientUpdatePollerResponse from the provided client and resume token. func (l *PrivateCloudsClientUpdatePollerResponse) Resume(ctx context.Context, client *PrivateCloudsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("PrivateCloudsClient.Update", token, client.pl) if err != nil { return err } poller := &PrivateCloudsClientUpdatePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // PrivateCloudsClientUpdateResponse contains the response from method PrivateCloudsClient.Update. type PrivateCloudsClientUpdateResponse struct { PrivateCloudsClientUpdateResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PrivateCloudsClientUpdateResult contains the result from method PrivateCloudsClient.Update. type PrivateCloudsClientUpdateResult struct { PrivateCloud } // ScriptCmdletsClientGetResponse contains the response from method ScriptCmdletsClient.Get. type ScriptCmdletsClientGetResponse struct { ScriptCmdletsClientGetResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ScriptCmdletsClientGetResult contains the result from method ScriptCmdletsClient.Get. type ScriptCmdletsClientGetResult struct { ScriptCmdlet } // ScriptCmdletsClientListResponse contains the response from method ScriptCmdletsClient.List. type ScriptCmdletsClientListResponse struct { ScriptCmdletsClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ScriptCmdletsClientListResult contains the result from method ScriptCmdletsClient.List. type ScriptCmdletsClientListResult struct { ScriptCmdletsList } // ScriptExecutionsClientCreateOrUpdatePollerResponse contains the response from method ScriptExecutionsClient.CreateOrUpdate. type ScriptExecutionsClientCreateOrUpdatePollerResponse struct { // Poller contains an initialized poller. Poller *ScriptExecutionsClientCreateOrUpdatePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l ScriptExecutionsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ScriptExecutionsClientCreateOrUpdateResponse, error) { respType := ScriptExecutionsClientCreateOrUpdateResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ScriptExecution) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a ScriptExecutionsClientCreateOrUpdatePollerResponse from the provided client and resume token. func (l *ScriptExecutionsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ScriptExecutionsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("ScriptExecutionsClient.CreateOrUpdate", token, client.pl) if err != nil { return err } poller := &ScriptExecutionsClientCreateOrUpdatePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // ScriptExecutionsClientCreateOrUpdateResponse contains the response from method ScriptExecutionsClient.CreateOrUpdate. type ScriptExecutionsClientCreateOrUpdateResponse struct { ScriptExecutionsClientCreateOrUpdateResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ScriptExecutionsClientCreateOrUpdateResult contains the result from method ScriptExecutionsClient.CreateOrUpdate. type ScriptExecutionsClientCreateOrUpdateResult struct { ScriptExecution } // ScriptExecutionsClientDeletePollerResponse contains the response from method ScriptExecutionsClient.Delete. type ScriptExecutionsClientDeletePollerResponse struct { // Poller contains an initialized poller. Poller *ScriptExecutionsClientDeletePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l ScriptExecutionsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ScriptExecutionsClientDeleteResponse, error) { respType := ScriptExecutionsClientDeleteResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a ScriptExecutionsClientDeletePollerResponse from the provided client and resume token. func (l *ScriptExecutionsClientDeletePollerResponse) Resume(ctx context.Context, client *ScriptExecutionsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("ScriptExecutionsClient.Delete", token, client.pl) if err != nil { return err } poller := &ScriptExecutionsClientDeletePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // ScriptExecutionsClientDeleteResponse contains the response from method ScriptExecutionsClient.Delete. type ScriptExecutionsClientDeleteResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ScriptExecutionsClientGetExecutionLogsResponse contains the response from method ScriptExecutionsClient.GetExecutionLogs. type ScriptExecutionsClientGetExecutionLogsResponse struct { ScriptExecutionsClientGetExecutionLogsResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ScriptExecutionsClientGetExecutionLogsResult contains the result from method ScriptExecutionsClient.GetExecutionLogs. type ScriptExecutionsClientGetExecutionLogsResult struct { ScriptExecution } // ScriptExecutionsClientGetResponse contains the response from method ScriptExecutionsClient.Get. type ScriptExecutionsClientGetResponse struct { ScriptExecutionsClientGetResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ScriptExecutionsClientGetResult contains the result from method ScriptExecutionsClient.Get. type ScriptExecutionsClientGetResult struct { ScriptExecution } // ScriptExecutionsClientListResponse contains the response from method ScriptExecutionsClient.List. type ScriptExecutionsClientListResponse struct { ScriptExecutionsClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ScriptExecutionsClientListResult contains the result from method ScriptExecutionsClient.List. type ScriptExecutionsClientListResult struct { ScriptExecutionsList } // ScriptPackagesClientGetResponse contains the response from method ScriptPackagesClient.Get. type ScriptPackagesClientGetResponse struct { ScriptPackagesClientGetResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ScriptPackagesClientGetResult contains the result from method ScriptPackagesClient.Get. type ScriptPackagesClientGetResult struct { ScriptPackage } // ScriptPackagesClientListResponse contains the response from method ScriptPackagesClient.List. type ScriptPackagesClientListResponse struct { ScriptPackagesClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // ScriptPackagesClientListResult contains the result from method ScriptPackagesClient.List. type ScriptPackagesClientListResult struct { ScriptPackagesList } // VirtualMachinesClientGetResponse contains the response from method VirtualMachinesClient.Get. type VirtualMachinesClientGetResponse struct { VirtualMachinesClientGetResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // VirtualMachinesClientGetResult contains the result from method VirtualMachinesClient.Get. type VirtualMachinesClientGetResult struct { VirtualMachine } // VirtualMachinesClientListResponse contains the response from method VirtualMachinesClient.List. type VirtualMachinesClientListResponse struct { VirtualMachinesClientListResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // VirtualMachinesClientListResult contains the result from method VirtualMachinesClient.List. type VirtualMachinesClientListResult struct { VirtualMachinesList } // VirtualMachinesClientRestrictMovementPollerResponse contains the response from method VirtualMachinesClient.RestrictMovement. type VirtualMachinesClientRestrictMovementPollerResponse struct { // Poller contains an initialized poller. Poller *VirtualMachinesClientRestrictMovementPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l VirtualMachinesClientRestrictMovementPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualMachinesClientRestrictMovementResponse, error) { respType := VirtualMachinesClientRestrictMovementResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a VirtualMachinesClientRestrictMovementPollerResponse from the provided client and resume token. func (l *VirtualMachinesClientRestrictMovementPollerResponse) Resume(ctx context.Context, client *VirtualMachinesClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("VirtualMachinesClient.RestrictMovement", token, client.pl) if err != nil { return err } poller := &VirtualMachinesClientRestrictMovementPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // VirtualMachinesClientRestrictMovementResponse contains the response from method VirtualMachinesClient.RestrictMovement. type VirtualMachinesClientRestrictMovementResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientCreateDNSServicePollerResponse contains the response from method WorkloadNetworksClient.CreateDNSService. type WorkloadNetworksClientCreateDNSServicePollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientCreateDNSServicePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientCreateDNSServicePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientCreateDNSServiceResponse, error) { respType := WorkloadNetworksClientCreateDNSServiceResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.WorkloadNetworkDNSService) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientCreateDNSServicePollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientCreateDNSServicePollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.CreateDNSService", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientCreateDNSServicePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientCreateDNSServiceResponse contains the response from method WorkloadNetworksClient.CreateDNSService. type WorkloadNetworksClientCreateDNSServiceResponse struct { WorkloadNetworksClientCreateDNSServiceResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientCreateDNSServiceResult contains the result from method WorkloadNetworksClient.CreateDNSService. type WorkloadNetworksClientCreateDNSServiceResult struct { WorkloadNetworkDNSService } // WorkloadNetworksClientCreateDNSZonePollerResponse contains the response from method WorkloadNetworksClient.CreateDNSZone. type WorkloadNetworksClientCreateDNSZonePollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientCreateDNSZonePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientCreateDNSZonePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientCreateDNSZoneResponse, error) { respType := WorkloadNetworksClientCreateDNSZoneResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.WorkloadNetworkDNSZone) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientCreateDNSZonePollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientCreateDNSZonePollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.CreateDNSZone", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientCreateDNSZonePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientCreateDNSZoneResponse contains the response from method WorkloadNetworksClient.CreateDNSZone. type WorkloadNetworksClientCreateDNSZoneResponse struct { WorkloadNetworksClientCreateDNSZoneResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientCreateDNSZoneResult contains the result from method WorkloadNetworksClient.CreateDNSZone. type WorkloadNetworksClientCreateDNSZoneResult struct { WorkloadNetworkDNSZone } // WorkloadNetworksClientCreateDhcpPollerResponse contains the response from method WorkloadNetworksClient.CreateDhcp. type WorkloadNetworksClientCreateDhcpPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientCreateDhcpPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientCreateDhcpPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientCreateDhcpResponse, error) { respType := WorkloadNetworksClientCreateDhcpResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.WorkloadNetworkDhcp) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientCreateDhcpPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientCreateDhcpPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.CreateDhcp", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientCreateDhcpPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientCreateDhcpResponse contains the response from method WorkloadNetworksClient.CreateDhcp. type WorkloadNetworksClientCreateDhcpResponse struct { WorkloadNetworksClientCreateDhcpResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientCreateDhcpResult contains the result from method WorkloadNetworksClient.CreateDhcp. type WorkloadNetworksClientCreateDhcpResult struct { WorkloadNetworkDhcp } // WorkloadNetworksClientCreatePortMirroringPollerResponse contains the response from method WorkloadNetworksClient.CreatePortMirroring. type WorkloadNetworksClientCreatePortMirroringPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientCreatePortMirroringPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientCreatePortMirroringPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientCreatePortMirroringResponse, error) { respType := WorkloadNetworksClientCreatePortMirroringResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.WorkloadNetworkPortMirroring) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientCreatePortMirroringPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientCreatePortMirroringPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.CreatePortMirroring", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientCreatePortMirroringPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientCreatePortMirroringResponse contains the response from method WorkloadNetworksClient.CreatePortMirroring. type WorkloadNetworksClientCreatePortMirroringResponse struct { WorkloadNetworksClientCreatePortMirroringResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientCreatePortMirroringResult contains the result from method WorkloadNetworksClient.CreatePortMirroring. type WorkloadNetworksClientCreatePortMirroringResult struct { WorkloadNetworkPortMirroring } // WorkloadNetworksClientCreatePublicIPPollerResponse contains the response from method WorkloadNetworksClient.CreatePublicIP. type WorkloadNetworksClientCreatePublicIPPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientCreatePublicIPPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientCreatePublicIPPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientCreatePublicIPResponse, error) { respType := WorkloadNetworksClientCreatePublicIPResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.WorkloadNetworkPublicIP) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientCreatePublicIPPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientCreatePublicIPPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.CreatePublicIP", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientCreatePublicIPPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientCreatePublicIPResponse contains the response from method WorkloadNetworksClient.CreatePublicIP. type WorkloadNetworksClientCreatePublicIPResponse struct { WorkloadNetworksClientCreatePublicIPResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientCreatePublicIPResult contains the result from method WorkloadNetworksClient.CreatePublicIP. type WorkloadNetworksClientCreatePublicIPResult struct { WorkloadNetworkPublicIP } // WorkloadNetworksClientCreateSegmentsPollerResponse contains the response from method WorkloadNetworksClient.CreateSegments. type WorkloadNetworksClientCreateSegmentsPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientCreateSegmentsPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientCreateSegmentsPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientCreateSegmentsResponse, error) { respType := WorkloadNetworksClientCreateSegmentsResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.WorkloadNetworkSegment) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientCreateSegmentsPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientCreateSegmentsPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.CreateSegments", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientCreateSegmentsPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientCreateSegmentsResponse contains the response from method WorkloadNetworksClient.CreateSegments. type WorkloadNetworksClientCreateSegmentsResponse struct { WorkloadNetworksClientCreateSegmentsResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientCreateSegmentsResult contains the result from method WorkloadNetworksClient.CreateSegments. type WorkloadNetworksClientCreateSegmentsResult struct { WorkloadNetworkSegment } // WorkloadNetworksClientCreateVMGroupPollerResponse contains the response from method WorkloadNetworksClient.CreateVMGroup. type WorkloadNetworksClientCreateVMGroupPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientCreateVMGroupPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientCreateVMGroupPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientCreateVMGroupResponse, error) { respType := WorkloadNetworksClientCreateVMGroupResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.WorkloadNetworkVMGroup) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientCreateVMGroupPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientCreateVMGroupPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.CreateVMGroup", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientCreateVMGroupPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientCreateVMGroupResponse contains the response from method WorkloadNetworksClient.CreateVMGroup. type WorkloadNetworksClientCreateVMGroupResponse struct { WorkloadNetworksClientCreateVMGroupResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientCreateVMGroupResult contains the result from method WorkloadNetworksClient.CreateVMGroup. type WorkloadNetworksClientCreateVMGroupResult struct { WorkloadNetworkVMGroup } // WorkloadNetworksClientDeleteDNSServicePollerResponse contains the response from method WorkloadNetworksClient.DeleteDNSService. type WorkloadNetworksClientDeleteDNSServicePollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientDeleteDNSServicePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientDeleteDNSServicePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientDeleteDNSServiceResponse, error) { respType := WorkloadNetworksClientDeleteDNSServiceResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientDeleteDNSServicePollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientDeleteDNSServicePollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.DeleteDNSService", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientDeleteDNSServicePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientDeleteDNSServiceResponse contains the response from method WorkloadNetworksClient.DeleteDNSService. type WorkloadNetworksClientDeleteDNSServiceResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientDeleteDNSZonePollerResponse contains the response from method WorkloadNetworksClient.DeleteDNSZone. type WorkloadNetworksClientDeleteDNSZonePollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientDeleteDNSZonePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientDeleteDNSZonePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientDeleteDNSZoneResponse, error) { respType := WorkloadNetworksClientDeleteDNSZoneResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientDeleteDNSZonePollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientDeleteDNSZonePollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.DeleteDNSZone", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientDeleteDNSZonePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientDeleteDNSZoneResponse contains the response from method WorkloadNetworksClient.DeleteDNSZone. type WorkloadNetworksClientDeleteDNSZoneResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientDeleteDhcpPollerResponse contains the response from method WorkloadNetworksClient.DeleteDhcp. type WorkloadNetworksClientDeleteDhcpPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientDeleteDhcpPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientDeleteDhcpPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientDeleteDhcpResponse, error) { respType := WorkloadNetworksClientDeleteDhcpResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientDeleteDhcpPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientDeleteDhcpPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.DeleteDhcp", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientDeleteDhcpPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientDeleteDhcpResponse contains the response from method WorkloadNetworksClient.DeleteDhcp. type WorkloadNetworksClientDeleteDhcpResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientDeletePortMirroringPollerResponse contains the response from method WorkloadNetworksClient.DeletePortMirroring. type WorkloadNetworksClientDeletePortMirroringPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientDeletePortMirroringPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientDeletePortMirroringPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientDeletePortMirroringResponse, error) { respType := WorkloadNetworksClientDeletePortMirroringResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientDeletePortMirroringPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientDeletePortMirroringPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.DeletePortMirroring", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientDeletePortMirroringPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientDeletePortMirroringResponse contains the response from method WorkloadNetworksClient.DeletePortMirroring. type WorkloadNetworksClientDeletePortMirroringResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientDeletePublicIPPollerResponse contains the response from method WorkloadNetworksClient.DeletePublicIP. type WorkloadNetworksClientDeletePublicIPPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientDeletePublicIPPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientDeletePublicIPPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientDeletePublicIPResponse, error) { respType := WorkloadNetworksClientDeletePublicIPResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientDeletePublicIPPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientDeletePublicIPPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.DeletePublicIP", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientDeletePublicIPPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientDeletePublicIPResponse contains the response from method WorkloadNetworksClient.DeletePublicIP. type WorkloadNetworksClientDeletePublicIPResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientDeleteSegmentPollerResponse contains the response from method WorkloadNetworksClient.DeleteSegment. type WorkloadNetworksClientDeleteSegmentPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientDeleteSegmentPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientDeleteSegmentPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientDeleteSegmentResponse, error) { respType := WorkloadNetworksClientDeleteSegmentResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientDeleteSegmentPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientDeleteSegmentPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.DeleteSegment", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientDeleteSegmentPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientDeleteSegmentResponse contains the response from method WorkloadNetworksClient.DeleteSegment. type WorkloadNetworksClientDeleteSegmentResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientDeleteVMGroupPollerResponse contains the response from method WorkloadNetworksClient.DeleteVMGroup. type WorkloadNetworksClientDeleteVMGroupPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientDeleteVMGroupPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientDeleteVMGroupPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientDeleteVMGroupResponse, error) { respType := WorkloadNetworksClientDeleteVMGroupResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientDeleteVMGroupPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientDeleteVMGroupPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.DeleteVMGroup", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientDeleteVMGroupPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientDeleteVMGroupResponse contains the response from method WorkloadNetworksClient.DeleteVMGroup. type WorkloadNetworksClientDeleteVMGroupResponse struct { // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientGetDNSServiceResponse contains the response from method WorkloadNetworksClient.GetDNSService. type WorkloadNetworksClientGetDNSServiceResponse struct { WorkloadNetworksClientGetDNSServiceResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientGetDNSServiceResult contains the result from method WorkloadNetworksClient.GetDNSService. type WorkloadNetworksClientGetDNSServiceResult struct { WorkloadNetworkDNSService } // WorkloadNetworksClientGetDNSZoneResponse contains the response from method WorkloadNetworksClient.GetDNSZone. type WorkloadNetworksClientGetDNSZoneResponse struct { WorkloadNetworksClientGetDNSZoneResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientGetDNSZoneResult contains the result from method WorkloadNetworksClient.GetDNSZone. type WorkloadNetworksClientGetDNSZoneResult struct { WorkloadNetworkDNSZone } // WorkloadNetworksClientGetDhcpResponse contains the response from method WorkloadNetworksClient.GetDhcp. type WorkloadNetworksClientGetDhcpResponse struct { WorkloadNetworksClientGetDhcpResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientGetDhcpResult contains the result from method WorkloadNetworksClient.GetDhcp. type WorkloadNetworksClientGetDhcpResult struct { WorkloadNetworkDhcp } // WorkloadNetworksClientGetGatewayResponse contains the response from method WorkloadNetworksClient.GetGateway. type WorkloadNetworksClientGetGatewayResponse struct { WorkloadNetworksClientGetGatewayResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientGetGatewayResult contains the result from method WorkloadNetworksClient.GetGateway. type WorkloadNetworksClientGetGatewayResult struct { WorkloadNetworkGateway } // WorkloadNetworksClientGetPortMirroringResponse contains the response from method WorkloadNetworksClient.GetPortMirroring. type WorkloadNetworksClientGetPortMirroringResponse struct { WorkloadNetworksClientGetPortMirroringResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientGetPortMirroringResult contains the result from method WorkloadNetworksClient.GetPortMirroring. type WorkloadNetworksClientGetPortMirroringResult struct { WorkloadNetworkPortMirroring } // WorkloadNetworksClientGetPublicIPResponse contains the response from method WorkloadNetworksClient.GetPublicIP. type WorkloadNetworksClientGetPublicIPResponse struct { WorkloadNetworksClientGetPublicIPResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientGetPublicIPResult contains the result from method WorkloadNetworksClient.GetPublicIP. type WorkloadNetworksClientGetPublicIPResult struct { WorkloadNetworkPublicIP } // WorkloadNetworksClientGetSegmentResponse contains the response from method WorkloadNetworksClient.GetSegment. type WorkloadNetworksClientGetSegmentResponse struct { WorkloadNetworksClientGetSegmentResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientGetSegmentResult contains the result from method WorkloadNetworksClient.GetSegment. type WorkloadNetworksClientGetSegmentResult struct { WorkloadNetworkSegment } // WorkloadNetworksClientGetVMGroupResponse contains the response from method WorkloadNetworksClient.GetVMGroup. type WorkloadNetworksClientGetVMGroupResponse struct { WorkloadNetworksClientGetVMGroupResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientGetVMGroupResult contains the result from method WorkloadNetworksClient.GetVMGroup. type WorkloadNetworksClientGetVMGroupResult struct { WorkloadNetworkVMGroup } // WorkloadNetworksClientGetVirtualMachineResponse contains the response from method WorkloadNetworksClient.GetVirtualMachine. type WorkloadNetworksClientGetVirtualMachineResponse struct { WorkloadNetworksClientGetVirtualMachineResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientGetVirtualMachineResult contains the result from method WorkloadNetworksClient.GetVirtualMachine. type WorkloadNetworksClientGetVirtualMachineResult struct { WorkloadNetworkVirtualMachine } // WorkloadNetworksClientListDNSServicesResponse contains the response from method WorkloadNetworksClient.ListDNSServices. type WorkloadNetworksClientListDNSServicesResponse struct { WorkloadNetworksClientListDNSServicesResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientListDNSServicesResult contains the result from method WorkloadNetworksClient.ListDNSServices. type WorkloadNetworksClientListDNSServicesResult struct { WorkloadNetworkDNSServicesList } // WorkloadNetworksClientListDNSZonesResponse contains the response from method WorkloadNetworksClient.ListDNSZones. type WorkloadNetworksClientListDNSZonesResponse struct { WorkloadNetworksClientListDNSZonesResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientListDNSZonesResult contains the result from method WorkloadNetworksClient.ListDNSZones. type WorkloadNetworksClientListDNSZonesResult struct { WorkloadNetworkDNSZonesList } // WorkloadNetworksClientListDhcpResponse contains the response from method WorkloadNetworksClient.ListDhcp. type WorkloadNetworksClientListDhcpResponse struct { WorkloadNetworksClientListDhcpResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientListDhcpResult contains the result from method WorkloadNetworksClient.ListDhcp. type WorkloadNetworksClientListDhcpResult struct { WorkloadNetworkDhcpList } // WorkloadNetworksClientListGatewaysResponse contains the response from method WorkloadNetworksClient.ListGateways. type WorkloadNetworksClientListGatewaysResponse struct { WorkloadNetworksClientListGatewaysResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientListGatewaysResult contains the result from method WorkloadNetworksClient.ListGateways. type WorkloadNetworksClientListGatewaysResult struct { WorkloadNetworkGatewayList } // WorkloadNetworksClientListPortMirroringResponse contains the response from method WorkloadNetworksClient.ListPortMirroring. type WorkloadNetworksClientListPortMirroringResponse struct { WorkloadNetworksClientListPortMirroringResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientListPortMirroringResult contains the result from method WorkloadNetworksClient.ListPortMirroring. type WorkloadNetworksClientListPortMirroringResult struct { WorkloadNetworkPortMirroringList } // WorkloadNetworksClientListPublicIPsResponse contains the response from method WorkloadNetworksClient.ListPublicIPs. type WorkloadNetworksClientListPublicIPsResponse struct { WorkloadNetworksClientListPublicIPsResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientListPublicIPsResult contains the result from method WorkloadNetworksClient.ListPublicIPs. type WorkloadNetworksClientListPublicIPsResult struct { WorkloadNetworkPublicIPsList } // WorkloadNetworksClientListSegmentsResponse contains the response from method WorkloadNetworksClient.ListSegments. type WorkloadNetworksClientListSegmentsResponse struct { WorkloadNetworksClientListSegmentsResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientListSegmentsResult contains the result from method WorkloadNetworksClient.ListSegments. type WorkloadNetworksClientListSegmentsResult struct { WorkloadNetworkSegmentsList } // WorkloadNetworksClientListVMGroupsResponse contains the response from method WorkloadNetworksClient.ListVMGroups. type WorkloadNetworksClientListVMGroupsResponse struct { WorkloadNetworksClientListVMGroupsResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientListVMGroupsResult contains the result from method WorkloadNetworksClient.ListVMGroups. type WorkloadNetworksClientListVMGroupsResult struct { WorkloadNetworkVMGroupsList } // WorkloadNetworksClientListVirtualMachinesResponse contains the response from method WorkloadNetworksClient.ListVirtualMachines. type WorkloadNetworksClientListVirtualMachinesResponse struct { WorkloadNetworksClientListVirtualMachinesResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientListVirtualMachinesResult contains the result from method WorkloadNetworksClient.ListVirtualMachines. type WorkloadNetworksClientListVirtualMachinesResult struct { WorkloadNetworkVirtualMachinesList } // WorkloadNetworksClientUpdateDNSServicePollerResponse contains the response from method WorkloadNetworksClient.UpdateDNSService. type WorkloadNetworksClientUpdateDNSServicePollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientUpdateDNSServicePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientUpdateDNSServicePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientUpdateDNSServiceResponse, error) { respType := WorkloadNetworksClientUpdateDNSServiceResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.WorkloadNetworkDNSService) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientUpdateDNSServicePollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientUpdateDNSServicePollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.UpdateDNSService", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientUpdateDNSServicePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientUpdateDNSServiceResponse contains the response from method WorkloadNetworksClient.UpdateDNSService. type WorkloadNetworksClientUpdateDNSServiceResponse struct { WorkloadNetworksClientUpdateDNSServiceResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientUpdateDNSServiceResult contains the result from method WorkloadNetworksClient.UpdateDNSService. type WorkloadNetworksClientUpdateDNSServiceResult struct { WorkloadNetworkDNSService } // WorkloadNetworksClientUpdateDNSZonePollerResponse contains the response from method WorkloadNetworksClient.UpdateDNSZone. type WorkloadNetworksClientUpdateDNSZonePollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientUpdateDNSZonePoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientUpdateDNSZonePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientUpdateDNSZoneResponse, error) { respType := WorkloadNetworksClientUpdateDNSZoneResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.WorkloadNetworkDNSZone) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientUpdateDNSZonePollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientUpdateDNSZonePollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.UpdateDNSZone", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientUpdateDNSZonePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientUpdateDNSZoneResponse contains the response from method WorkloadNetworksClient.UpdateDNSZone. type WorkloadNetworksClientUpdateDNSZoneResponse struct { WorkloadNetworksClientUpdateDNSZoneResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientUpdateDNSZoneResult contains the result from method WorkloadNetworksClient.UpdateDNSZone. type WorkloadNetworksClientUpdateDNSZoneResult struct { WorkloadNetworkDNSZone } // WorkloadNetworksClientUpdateDhcpPollerResponse contains the response from method WorkloadNetworksClient.UpdateDhcp. type WorkloadNetworksClientUpdateDhcpPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientUpdateDhcpPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientUpdateDhcpPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientUpdateDhcpResponse, error) { respType := WorkloadNetworksClientUpdateDhcpResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.WorkloadNetworkDhcp) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientUpdateDhcpPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientUpdateDhcpPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.UpdateDhcp", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientUpdateDhcpPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientUpdateDhcpResponse contains the response from method WorkloadNetworksClient.UpdateDhcp. type WorkloadNetworksClientUpdateDhcpResponse struct { WorkloadNetworksClientUpdateDhcpResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientUpdateDhcpResult contains the result from method WorkloadNetworksClient.UpdateDhcp. type WorkloadNetworksClientUpdateDhcpResult struct { WorkloadNetworkDhcp } // WorkloadNetworksClientUpdatePortMirroringPollerResponse contains the response from method WorkloadNetworksClient.UpdatePortMirroring. type WorkloadNetworksClientUpdatePortMirroringPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientUpdatePortMirroringPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientUpdatePortMirroringPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientUpdatePortMirroringResponse, error) { respType := WorkloadNetworksClientUpdatePortMirroringResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.WorkloadNetworkPortMirroring) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientUpdatePortMirroringPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientUpdatePortMirroringPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.UpdatePortMirroring", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientUpdatePortMirroringPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientUpdatePortMirroringResponse contains the response from method WorkloadNetworksClient.UpdatePortMirroring. type WorkloadNetworksClientUpdatePortMirroringResponse struct { WorkloadNetworksClientUpdatePortMirroringResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientUpdatePortMirroringResult contains the result from method WorkloadNetworksClient.UpdatePortMirroring. type WorkloadNetworksClientUpdatePortMirroringResult struct { WorkloadNetworkPortMirroring } // WorkloadNetworksClientUpdateSegmentsPollerResponse contains the response from method WorkloadNetworksClient.UpdateSegments. type WorkloadNetworksClientUpdateSegmentsPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientUpdateSegmentsPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientUpdateSegmentsPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientUpdateSegmentsResponse, error) { respType := WorkloadNetworksClientUpdateSegmentsResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.WorkloadNetworkSegment) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientUpdateSegmentsPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientUpdateSegmentsPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.UpdateSegments", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientUpdateSegmentsPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientUpdateSegmentsResponse contains the response from method WorkloadNetworksClient.UpdateSegments. type WorkloadNetworksClientUpdateSegmentsResponse struct { WorkloadNetworksClientUpdateSegmentsResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientUpdateSegmentsResult contains the result from method WorkloadNetworksClient.UpdateSegments. type WorkloadNetworksClientUpdateSegmentsResult struct { WorkloadNetworkSegment } // WorkloadNetworksClientUpdateVMGroupPollerResponse contains the response from method WorkloadNetworksClient.UpdateVMGroup. type WorkloadNetworksClientUpdateVMGroupPollerResponse struct { // Poller contains an initialized poller. Poller *WorkloadNetworksClientUpdateVMGroupPoller // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. // freq: the time to wait between intervals in absence of a Retry-After header. Allowed minimum is one second. // A good starting value is 30 seconds. Note that some resources might benefit from a different value. func (l WorkloadNetworksClientUpdateVMGroupPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WorkloadNetworksClientUpdateVMGroupResponse, error) { respType := WorkloadNetworksClientUpdateVMGroupResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.WorkloadNetworkVMGroup) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil } // Resume rehydrates a WorkloadNetworksClientUpdateVMGroupPollerResponse from the provided client and resume token. func (l *WorkloadNetworksClientUpdateVMGroupPollerResponse) Resume(ctx context.Context, client *WorkloadNetworksClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("WorkloadNetworksClient.UpdateVMGroup", token, client.pl) if err != nil { return err } poller := &WorkloadNetworksClientUpdateVMGroupPoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil } // WorkloadNetworksClientUpdateVMGroupResponse contains the response from method WorkloadNetworksClient.UpdateVMGroup. type WorkloadNetworksClientUpdateVMGroupResponse struct { WorkloadNetworksClientUpdateVMGroupResult // RawResponse contains the underlying HTTP response. RawResponse *http.Response } // WorkloadNetworksClientUpdateVMGroupResult contains the result from method WorkloadNetworksClient.UpdateVMGroup. type WorkloadNetworksClientUpdateVMGroupResult struct { WorkloadNetworkVMGroup }
Tedros-Box/tedros-apps
app-extensions/app-extensions-ejb/src/main/java/com/tedros/extension/server/base/bo/TExtensionBO.java
/** * */ package com.tedros.extension.server.base.bo; import javax.enterprise.context.Dependent; import javax.inject.Inject; import com.tedros.ejb.base.bo.TGenericBO; import com.tedros.ejb.base.eao.ITGenericEAO; import com.tedros.ejb.base.entity.ITEntity; import com.tedros.extension.server.base.eao.TExtensionEAO; /** * @author <NAME> * */ @Dependent public class TExtensionBO<E extends ITEntity> extends TGenericBO<E> { @Inject private TExtensionEAO<E> eao; @Override public ITGenericEAO<E> getEao() { return eao; } }
insad-video/Ant-Media-Server
src/main/java/io/antmedia/console/rest/SupportRequest.java
<reponame>insad-video/Ant-Media-Server package io.antmedia.console.rest; public class SupportRequest { private String name; private String email; private String title; private String description; private boolean sendSystemInfo; public boolean isSendSystemInfo() { return sendSystemInfo; } public void setSendSystemInfo(boolean sendSystemInfo) { this.sendSystemInfo = sendSystemInfo; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public String getName() { return name; } public void setName(String name) { this.name = name; } public String getEmail() { return email; } public void setEmail(String email) { this.email = email; } public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } }
wjiec/leetcode-solution
leetcode-java/src/daily/d210628p815busroutes/Solution.java
<filename>leetcode-java/src/daily/d210628p815busroutes/Solution.java package daily.d210628p815busroutes; import java.util.*; /** * 815. Bus Routes * * https://leetcode-cn.com/problems/bus-routes/ * * You are given an array routes representing bus routes where routes[i] * is a bus route that the ith bus repeats forever. * * For example, if routes[0] = [1, 5, 7], this means that the 0th bus travels * in the sequence 1 -> 5 -> 7 -> 1 -> 5 -> 7 -> 1 -> ... forever. * * You will start at the bus stop source (You are not on any bus initially), * and you want to go to the bus stop target. You can travel between bus stops by buses only. * * Return the least number of buses you must take to travel from source to target. Return -1 if it is not possible. */ public class Solution { private static class Bus { private final int id, station; private Bus(int id, int station) { this.id = id; this.station = station; } } public int numBusesToDestination(int[][] routes, int source, int target) { if (source == target) return 0; Map<Integer, List<Bus>> stations = new HashMap<>(), buses = new HashMap<>(); for (int i = 0, l = routes.length; i < l; i++) { buses.putIfAbsent(i + 1, new ArrayList<>()); for (int station : routes[i]) { var bus = new Bus(i + 1, station); buses.get(bus.id).add(bus); stations.putIfAbsent(station, new ArrayList<>()); stations.get(station).add(bus); } } if (!stations.containsKey(target)) return -1; int[] visited = new int[routes.length + 1]; Arrays.fill(visited, -1); Queue<Bus> queue = new ArrayDeque<>(); if (stations.containsKey(source)) { for (var start : stations.get(source)) { visited[start.id] = 1; queue.addAll(buses.get(start.id)); } while (!queue.isEmpty()) { var bus = queue.remove(); for (var next : stations.get(bus.station)) { if (visited[next.id] == -1) { visited[next.id] = visited[bus.id] + 1; queue.addAll(buses.get(next.id)); } } } } int ans = Integer.MAX_VALUE; for (var bus : stations.get(target)) { if (visited[bus.id] != -1) { ans = Math.min(ans, visited[bus.id]); } } return ans == Integer.MAX_VALUE ? -1 : ans; } public static void main(String[] args) { assert new Solution().numBusesToDestination(new int[][]{{1,2,7}, {3,6,7}}, 1, 1) == 0; assert new Solution().numBusesToDestination(new int[][]{{1,2,7}, {3,6,7}}, 1, 6) == 2; assert new Solution().numBusesToDestination(new int[][]{{7,12}, {4,5,15}, {6}, {15,19}, {9,12,13}}, 15, 12) == -1; } }
mpangas/DynamicCore-MineCraft
src/main/java/dev/me/bombies/dynamiccore/commands/commands/misc/skills/guis/SkillsGUICommand.java
package dev.me.bombies.dynamiccore.commands.commands.misc.skills.guis; import dev.me.bombies.dynamiccore.constants.Config; import dev.me.bombies.dynamiccore.constants.GUIs; import dev.me.bombies.dynamiccore.utils.guibuilder.GUIBuilder; import org.bukkit.Bukkit; import org.bukkit.Material; import org.bukkit.command.Command; import org.bukkit.command.CommandExecutor; import org.bukkit.command.CommandSender; import org.bukkit.enchantments.Enchantment; import org.bukkit.entity.Player; import org.bukkit.inventory.Inventory; import org.bukkit.inventory.ItemFlag; import org.bukkit.inventory.ItemStack; import org.bukkit.inventory.meta.ItemMeta; import java.util.List; public class SkillsGUICommand implements CommandExecutor { @Override public boolean onCommand(CommandSender sender, Command command, String label, String[] args) { if (!(sender instanceof Player player)) { System.out.println("This command cannot be executed from console!"); return true; } int invSize = Config.getInt(Config.SKILLS_GUI_SIZE); if (invSize % 9 != 0 || invSize <= 0) throw new IllegalArgumentException(invSize + " is an invalid GUI size"); GUIBuilder builder = new GUIBuilder(player, GUIs.SKILLS_MAIN.toString(), invSize, true); Material miningButtonMaterial = Config.getMaterial(Config.SKILLS_MINING_MATERIAL); Material grindingButtonMaterial = Config.getMaterial(Config.SKILLS_GRINDING_MATERIAL); Material farmingButtonMaterial = Config.getMaterial(Config.SKILLS_FARMING_MATERIAL); if (miningButtonMaterial == null) throw new NullPointerException("Material '"+Config.getString(Config.SKILLS_MINING_MATERIAL)+"' isn't a valid material for the mining button!"); if (farmingButtonMaterial == null) throw new NullPointerException("Material '"+Config.getString(Config.SKILLS_FARMING_MATERIAL)+"' isn't a valid material for the mining button!"); if (grindingButtonMaterial == null) throw new NullPointerException("Material '"+Config.getString(Config.SKILLS_GRINDING_MATERIAL)+"' isn't a valid material for the mining button!"); List<Integer> slots = Config.getIntList(Config.SKILLS_ITEM_SLOTS); if (slots.isEmpty()) throw new NullPointerException("There were no item slots provided!"); else if (slots.size() != 3) throw new IllegalArgumentException("Too much or not enough item slots were provided!"); builder.setItem( miningButtonMaterial, Config.getColouredString(Config.SKILLS_MINING_NAME), Config.getLore(Config.SKILLS_MINING_LORE), slots.get(0), true, true ); builder.setItem( grindingButtonMaterial, Config.getColouredString(Config.SKILLS_GRINDING_NAME), Config.getLore(Config.SKILLS_GRINDING_LORE), slots.get(1), true, true ); builder.setItem( farmingButtonMaterial, Config.getColouredString(Config.SKILLS_FARMING_NAME), Config.getLore(Config.SKILLS_FARMING_LORE), slots.get(2), true, true ); player.openInventory(builder.build()); return true; } protected static int getSeriesIndex(int level) { if (level == 0) return 0; if (level % 21 == 0) return (level/21)-1; return level/21; } }
pasmuss/cmssw
CondCore/Utilities/interface/PayloadToXML.h
<filename>CondCore/Utilities/interface/PayloadToXML.h #include <iostream> #include <string> #include <memory> #include <boost/python/class.hpp> #include <boost/python/module.hpp> #include <boost/python/init.hpp> #include <boost/python/def.hpp> #include <iostream> #include <string> #include <sstream> #include "boost/archive/xml_oarchive.hpp" #include "CondFormats/Serialization/interface/Serializable.h" #include "CondFormats/Serialization/interface/Archive.h" namespace cond { template<typename T> std::string convertToXML( const std::string &payloadData, const std::string &payloadType ) { std::unique_ptr< T > payload; std::stringbuf sdataBuf; sdataBuf.pubsetbuf( const_cast<char *> ( payloadData.c_str() ), payloadData.size() ); std::istream inBuffer( &sdataBuf ); eos::portable_iarchive ia( inBuffer ); payload.reset( new T ); ia >> (*payload); // now we have the object in memory, convert it to xml in a string and return it std::ostringstream outBuffer; boost::archive::xml_oarchive xmlResult( outBuffer ); xmlResult << boost::serialization::make_nvp( "cmsCondPayload", *payload ); return outBuffer.str(); } } // end namespace cond
jpmieville/sir
book_examples/program_2_7.py
<filename>book_examples/program_2_7.py #!/usr/bin/env python """ Although the SIR and SEIR model paradigms are a good approximation to the epidemiological characteristics of many infectious diseases, such as measles or influenza, other infections have a more complex natural history. As an example of how such complexities can be accommodated in the model, will we consider infections such as hepatitis B, herpes, or chickenpox, where a proportion of infected individuals may become chronic carriers, transmitting infection at a low rate for many years. For diseases with carrier states, susceptible individuals can be infected by either carriers or acutely infectious individuals. It is generally assumed that the progress of infection within an individual is independent of their source of infection; that is, those infected by acutely infectious individuals and those infected by carriers are indistinguishable. A recently infected individual is acutely (highly) infectious for a given period and then either recovers completely or moves into the carrier class. Such dynamics lead to the following model: Equations Parameters μ is the per capita death rate, and the population level birth rate. β is the transmission rate and incorporates the encounter rate between susceptible and infectious individuals together with the probability of transmission. γ is called the removal or recovery rate, though often we are more interested in its reciprocal (1/γ) which determines the average infectious period. ε is the proportion reduction in transmission from carriers compared to standard infectious individuals q is the proportion of infected individuals that become carriers rather than fully recover Γ is the recovery rate associated with carriers; hence the reciprocal (1/Γ) is the average time an individual is in the carrier class S(0) is the initial proportion of the population that are susceptible. I(0) is the initial proportion of the population that are infectious C(0) is the initial proportion of the population that are carriers All rates are specified in days. Requirements. All parameters must be positive, and S(0)+I(0)+C(0) ≤ 1. """ #################################################################### ### This is the PYTHON version of program 2.7 from page 44 of # ### "Modeling Infectious Disease in humans and animals" # ### by Keeling & Rohani. # ### # ### It is the SICR which includes a carrier class. # #################################################################### ################################### ### Written by <NAME> # ### <EMAIL> (work) # ### <EMAIL> # ################################### import scipy.integrate as spi import numpy as np import pylab as pl beta = 0.2 epsilon = 0.1 gamma = 0.01 Gamma = 0.001 mu = 1 / (50 * 365.0) q = 0.4 S0 = 0.1 I0 = 1e-4 C0 = 1e-3 ND = 60 * 365 TS = 1.0 INPUT = (S0, I0, C0) def diff_eqs(INP, t): """The main set of equations""" Y = np.zeros((3)) V = INP Y[0] = mu - beta * V[0] * (V[1] + epsilon * V[2]) - mu * V[0] Y[1] = beta * V[0] * (V[1] + epsilon * V[2]) - gamma * V[1] - mu * V[1] Y[2] = q * gamma * V[1] - Gamma * V[2] - mu * V[2] return Y # For odeint t_start = 0.0 t_end = ND t_inc = TS t_range = np.arange(t_start, t_end + t_inc, t_inc) RES = spi.odeint(diff_eqs, INPUT, t_range) Rec = 1.0 - (RES[:, 0] + RES[:, 1] + RES[:, 2]) print(RES) # Ploting pl.subplot(311) pl.plot(RES[:, 0], "-g", label="Susceptibles") pl.title("Program_2_7.py") pl.xlabel("Time") pl.ylabel("Susceptibles") pl.subplot(312) pl.plot(RES[:, 1], "-r", label="Infectious") pl.xlabel("Time") pl.ylabel("Infected") pl.subplot(313) pl.plot(RES[:, 1], "-m", label="Carriers") pl.xlabel("Time") pl.ylabel("Carriers") pl.show()
GavinRay97/teiid
engine/src/main/java/org/teiid/query/sql/symbol/GroupSymbol.java
/* * Copyright Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags and * the COPYRIGHT.txt file distributed with this work. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.teiid.query.sql.symbol; import org.teiid.core.util.EquivalenceUtil; import org.teiid.core.util.HashCodeUtil; import org.teiid.query.QueryPlugin; import org.teiid.query.metadata.TempMetadataAdapter; import org.teiid.query.metadata.TempMetadataID; import org.teiid.query.sql.LanguageVisitor; /** * <p>This is the server's representation of a metadata group symbol. The group * symbol has a name, an optional definition, and a reference to a real * metadata ID. Typically, a GroupSymbol will be created only from a name and * possibly a definition if the group has an alias. The metadata ID is * discovered only when resolving the query. * * <p>For example, if the original string contained a FROM clause such as * "FROM Group1 AS G, Group2", there would be two GroupSymbols created. The * first would have name=G, definition=Group1 and the second would have * name=Group2, definition=null. */ public class GroupSymbol extends Symbol implements Comparable<GroupSymbol> { public static final String TEMP_GROUP_PREFIX = "#"; //$NON-NLS-1$ /** Definition of the symbol, may be null */ private String definition; /** Actual metadata ID */ private Object metadataID; private boolean isTempTable; private boolean isGlobalTable; private boolean isProcedure; private String outputDefinition; //possible qualifier, not included with the short name //due do legacy choice this is ambiguous with schema and may be part of the name //TODO: refactor to be a proper schema reference private String qualifier; private Object checkMatViewStatus; /** * Construct a symbol with a name. * @param name Name of the symbol * @throws IllegalArgumentException If name is null */ public GroupSymbol(String name) { super(name); } /** * Construct a symbol with a name. * @param name Name of the symbol * @param definition Definition of the symbol, may be null * @throws IllegalArgumentException If name is null */ public GroupSymbol(String name, String definition) { super(name); setDefinition(definition); } private GroupSymbol(String schema, String shortName, String definition) { this.qualifier = schema; this.setShortName(shortName); this.setDefinition(definition); } public Object getModelMetadataId() { if (getMetadataID() instanceof TempMetadataID) { return ((TempMetadataID)getMetadataID()).getTableData().getModel(); } return null; } public String getNonCorrelationName() { if (this.definition == null) { return this.getName(); } return this.getDefinition(); } /** * Get the definition for the group symbol, which may be null * @return Group definition, may be null */ public String getDefinition() { return definition; } /** * Set the definition for the group symbol, which may be null * @param definition Definition */ public void setDefinition(String definition) { this.definition = definition; this.outputDefinition = definition; } /** * Get the metadata ID that this group symbol resolves to. If * the group symbol has not been resolved yet, this will be null. * If the symbol has been resolved, this will never be null. * @return Metadata ID object */ public Object getMetadataID() { return metadataID; } public void acceptVisitor(LanguageVisitor visitor) { visitor.visit(this); } /** * Set the metadata ID that this group symbol resolves to. It cannot * be null. * @param metadataID Metadata ID object * @throws IllegalArgumentException If metadataID is null */ public void setMetadataID(Object metadataID) { if(metadataID == null) { throw new IllegalArgumentException(QueryPlugin.Util.getString("ERR.015.010.0016")); //$NON-NLS-1$ } if (this.isImplicitTempGroupSymbol()) { this.isTempTable = true; } this.metadataID = metadataID; } /** * Returns true if this symbol has been completely resolved with respect * to actual runtime metadata. A resolved symbol has been validated that * it refers to actual metadata and will have references to the real metadata * IDs if necessary. Different types of symbols determine their resolution * in different ways, so this method is abstract and must be implemented * by subclasses. * @return True if resolved with runtime metadata */ public boolean isResolved() { return (metadataID != null); } /** * Returns true if this is a symbol for a temporary (implicit or explicit) group * May return false for explicit temp tables prior to resolving. * see {@link #isTempTable()} * @return * @since 5.5 */ public boolean isTempGroupSymbol() { return isTempTable || (metadataID == null && isImplicitTempGroupSymbol()); } public boolean isImplicitTempGroupSymbol() { return isTempGroupName(getNonCorrelationName()); } /** * Compare two groups and give an ordering. * @param o Other group * @return -1, 0, or 1 depending on how this compares to group */ public int compareTo(GroupSymbol o) { return getName().compareTo(o.getName()); } /** * Return a deep copy of this object. * @return Deep copy of the object */ public GroupSymbol clone() { GroupSymbol copy = new GroupSymbol(qualifier, getShortName(), getDefinition()); copy.metadataID = this.metadataID; copy.setIsTempTable(isTempTable); copy.setProcedure(isProcedure); copy.outputDefinition = this.outputDefinition; copy.outputName = this.outputName; copy.isGlobalTable = isGlobalTable; copy.checkMatViewStatus = checkMatViewStatus; return copy; } /** * Compare group symbols * @param obj Other object to compare * @return True if equivalent */ public boolean equals(Object obj) { if(this == obj) { return true; } if(!(obj instanceof GroupSymbol)) { return false; } GroupSymbol other = (GroupSymbol) obj; if (this.qualifier == null || other.qualifier == null) { return this.getName().equals(other.getName()); } return EquivalenceUtil.areEqual(this.qualifier, other.qualifier) && this.getShortName().equals(other.getShortName()); } public boolean hasAlias() { return getDefinition() != null; } public void setIsTempTable(boolean isTempTable) { this.isTempTable = isTempTable; } public static boolean isTempGroupName(String name) { if (name == null) return false; return name.startsWith(TEMP_GROUP_PREFIX); } /** * Returns if this is a Temp Table * Set after resolving. * @return */ public boolean isTempTable() { return this.isTempTable; } /** * Returns if this is a pushed Common Table * Set after resolving and initial common table planning * @return */ public boolean isPushedCommonTable() { return isTempTable && TempMetadataAdapter.getActualMetadataId(metadataID) == metadataID; } public boolean isProcedure() { return this.isProcedure; } public void setProcedure(boolean isProcedure) { this.isProcedure = isProcedure; } public String getOutputDefinition() { return this.outputDefinition == null?this.getDefinition():this.outputDefinition; } public void setOutputDefinition(String outputDefinition) { this.outputDefinition = outputDefinition; } public boolean isGlobalTable() { return isGlobalTable; } public void setGlobalTable(boolean isGlobalTable) { this.isGlobalTable = isGlobalTable; } @Override public String getName() { if (this.qualifier != null) { return this.qualifier + Symbol.SEPARATOR + this.getShortName(); } return super.getName(); } @Override public int hashCode() { if (this.qualifier != null) { return HashCodeUtil.hashCode(this.qualifier.hashCode(), this.getShortName().hashCode()); } return super.hashCode(); } public void setName(String name) { int index = name.indexOf('.'); if (index > 0) { this.qualifier = new String(name.substring(0, index)); name = new String(name.substring(index + 1)); } else { this.qualifier = null; } super.setShortName(name); } public void setCheckMatStatus(Object viewMatadataId) { this.checkMatViewStatus = viewMatadataId; } public Object getCheckMatViewStatus() { return this.checkMatViewStatus; } }
straceX/Ectoplasm
linux-1.2.0/linux/include/asm-mips/string.h
<gh_stars>0 /* * include/asm-mips/string.h * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1994, 1995 Waldorf Electronics * written by <NAME> */ #ifndef __ASM_MIPS_STRING_H #define __ASM_MIPS_STRING_H #include <asm/mipsregs.h> extern __inline__ char * strcpy(char * dest, const char *src) { char *xdest = dest; __asm__ __volatile__( ".set\tnoreorder\n\t" ".set\tnoat\n" "1:\tlbu\t$1,(%1)\n\t" "addiu\t%1,%1,1\n\t" "sb\t$1,(%0)\n\t" "bnez\t$1,1b\n\t" "addiu\t%0,%0,1\n\t" ".set\tat\n\t" ".set\treorder" : "=r" (dest), "=r" (src) : "0" (dest), "1" (src) : "$1","memory"); return xdest; } extern __inline__ char * strncpy(char *dest, const char *src, size_t n) { char *xdest = dest; if (n == 0) return xdest; __asm__ __volatile__( ".set\tnoreorder\n\t" ".set\tnoat\n" "1:\tlbu\t$1,(%1)\n\t" "subu\t%2,%2,1\n\t" "sb\t$1,(%0)\n\t" "beqz\t$1,2f\n\t" "addiu\t%0,%0,1\n\t" "bnez\t%2,1b\n\t" "addiu\t%1,%1,1\n" "2:\n\t" ".set\tat\n\t" ".set\treorder\n\t" : "=r" (dest), "=r" (src), "=r" (n) : "0" (dest), "1" (src), "2" (n) : "$1","memory"); return dest; } extern __inline__ int strcmp(const char * cs, const char * ct) { int __res; __asm__ __volatile__( ".set\tnoreorder\n\t" ".set\tnoat\n\t" "lbu\t%2,(%0)\n" "1:\tlbu\t$1,(%1)\n\t" "addiu\t%0,%0,1\n\t" "bne\t$1,%2,2f\n\t" "addiu\t%1,%1,1\n\t" "bnez\t%2,1b\n\t" "lbu\t%2,(%0)\n\t" STR(FILL_LDS) "\n\t" "move\t%2,$1\n" "2:\tsub\t%2,%2,$1\n" "3:\t.set\tat\n\t" ".set\treorder\n\t" : "=d" (cs), "=d" (ct), "=d" (__res) : "0" (cs), "1" (ct) : "$1"); return __res; } extern __inline__ int strncmp(const char * cs, const char * ct, size_t count) { char __res; __asm__ __volatile__( ".set\tnoreorder\n\t" ".set\tnoat\n" "1:\tlbu\t%3,(%0)\n\t" "beqz\t%2,2f\n\t" "lbu\t$1,(%1)\n\t" "addiu\t%2,%2,-1\n\t" "bne\t$1,%3,3f\n\t" "addiu\t%0,%0,1\n\t" "bnez\t%3,1b\n\t" "addiu\t%1,%1,1\n" "2:\tmove\t%3,$1\n" "3:\tsub\t%3,%3,$1\n\t" ".set\tat\n\t" ".set\treorder" : "=d" (cs), "=d" (ct), "=d" (count), "=d" (__res) : "0" (cs), "1" (ct), "2" (count) : "$1"); return __res; } extern __inline__ void * memset(void * s, int c, size_t count) { void *xs = s; if (!count) return xs; __asm__ __volatile__( ".set\tnoreorder\n" "1:\tsb\t%3,(%0)\n\t" "bne\t%0,%1,1b\n\t" "addiu\t%0,%0,1\n\t" ".set\treorder" : "=r" (s), "=r" (count) : "0" (s), "r" (c), "1" (s + count - 1) : "memory"); return xs; } extern __inline__ void * memcpy(void * to, const void * from, size_t n) { void *xto = to; if (!n) return xto; __asm__ __volatile__( ".set\tnoreorder\n\t" ".set\tnoat\n" "1:\tlbu\t$1,(%1)\n\t" "addiu\t%1,%1,1\n\t" "sb\t$1,(%0)\n\t" "subu\t%2,%2,1\n\t" "bnez\t%2,1b\n\t" "addiu\t%0,%0,1\n\t" ".set\tat\n\t" ".set\treorder" : "=r" (to), "=r" (from), "=r" (n) : "0" (to), "1" (from), "2" (n) : "$1","memory" ); return xto; } extern __inline__ void * memmove(void * dest,const void * src, size_t n) { void *xdest = dest; if (!n) return xdest; if (dest < src) __asm__ __volatile__( ".set\tnoreorder\n\t" ".set\tnoat\n" "1:\tlbu\t$1,(%1)\n\t" "addiu\t%1,%1,1\n\t" "sb\t$1,(%0)\n\t" "subu\t%2,%2,1\n\t" "bnez\t%2,1b\n\t" "addiu\t%0,%0,1\n\t" ".set\tat\n\t" ".set\treorder" : "=r" (dest), "=r" (src), "=r" (n) : "0" (dest), "1" (src), "2" (n) : "$1","memory" ); else __asm__ __volatile__( ".set\tnoreorder\n\t" ".set\tnoat\n" "1:\tlbu\t$1,-1(%1)\n\t" "subu\t%1,%1,1\n\t" "sb\t$1,-1(%0)\n\t" "subu\t%2,%2,1\n\t" "bnez\t%2,1b\n\t" "subu\t%0,%0,1\n\t" ".set\tat\n\t" ".set\treorder" : "=r" (dest), "=r" (src), "=r" (n) : "0" (dest+n), "1" (src+n), "2" (n) : "$1","memory" ); return xdest; } extern __inline__ void * memscan(void * addr, int c, size_t size) { if (!size) return addr; __asm__(".set\tnoreorder\n\t" ".set\tnoat\n" "1:\tbeq\t$0,%1,2f\n\t" "lbu\t$1,(%0)\n\t" "subu\t%1,%1,1\n\t" "bnez\t%1,1b\n\t" "addiu\t%0,%0,1\n\t" ".set\tat\n\t" ".set\treorder\n" "2:" : "=r" (addr), "=r" (size) : "0" (addr), "1" (size), "r" (c) : "$1"); return addr; } #endif /* __ASM_MIPS_STRING_H */
grissomlab/sigpy-rf
sigpy/learn/util.py
<reponame>grissomlab/sigpy-rf<filename>sigpy/learn/util.py # -*- coding: utf-8 -*- """Machine learning utilities. """ import numpy as np import sigpy as sp __all__ = ['labels_to_scores', 'scores_to_labels'] def labels_to_scores(labels): """Convert labels to scores. Args: labels (array): One-dimensional label array. Returns: array: Score array of shape (len(labels), max(labels) + 1). """ device = sp.get_device(labels) xp = device.xp with device: num_classes = labels.max() + 1 scores = xp.zeros([len(labels), num_classes], dtype=np.float32) scores[xp.arange(len(labels)), labels] = 1 return scores def scores_to_labels(scores): """Convert scores to labels, by setting peak index to label. Args: scores (array): Two-dimensional score array. Returns: array: Label array of lengths scores.shape[0]. """ device = sp.get_device(scores) xp = device.xp with device: return xp.argmax(scores, axis=1)
nr17/platform
launcher/src/main/java/com/proofpoint/launcher/Processes.java
/* * Copyright 2012 Proofpoint, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.proofpoint.launcher; import com.google.common.collect.ImmutableList; import jnr.constants.platform.Errno; import jnr.posix.POSIX; import jnr.posix.POSIXFactory; import jnr.posix.POSIXHandler; import sun.misc.Signal; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.PrintStream; import java.lang.ProcessBuilder.Redirect; import java.util.Arrays; import java.util.IllegalFormatException; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Level; import java.util.logging.Logger; class Processes { private static final OurPOSIXHandler posixHandler = new OurPOSIXHandler(); public static final File NULL_FILE; private Processes() { } static { if (System.getProperty("os.name").startsWith("Windows")) { NULL_FILE = new File("NUL"); } else { NULL_FILE = new File("/dev/null"); } } static int getpid() { return getPosix().getpid(); } static void detach() { if (!System.getProperty("os.name").startsWith("Windows")) { getPosix().setsid(); } } static void kill(int pid, boolean graceful) { if (System.getProperty("os.name").startsWith("Windows")) { List<String> args = ImmutableList.of("taskkill", "/f", "/pid", Integer.toString(pid)); try { new ProcessBuilder(args) .redirectInput(Redirect.from(NULL_FILE)) .redirectOutput(Redirect.INHERIT) .redirectError(Redirect.INHERIT) .start() .waitFor(); } catch (IOException | InterruptedException ignored) { } } else { int signal = new Signal(graceful ? "TERM" : "KILL").getNumber(); getPosix().kill(pid, signal); } } public static boolean exists(int pid) { if (System.getProperty("os.name").startsWith("Windows")) { return false; } else { return getPosix().kill(pid, 0) == 0; } } public static void setVerbose(boolean verbose) { posixHandler.setVerbose(verbose); } private static POSIX getPosix() { return PosixSingletonHolder.instance; } private static class PosixSingletonHolder { private static final POSIX instance = POSIXFactory.getNativePOSIX(posixHandler); } private static final class OurPOSIXHandler implements POSIXHandler { private final AtomicBoolean verbose = new AtomicBoolean(false); void setVerbose(boolean verbose) { this.verbose.set(verbose); } @Override public void error(jnr.constants.platform.Errno error, String extraData) { throw new RuntimeException("native error " + error.description() + " " + extraData); } @Override public void error(Errno errno, String extraData1, String extraData2) { throw new RuntimeException("native error " + errno.description() + " " + extraData1 + " " + extraData2); } @Override public void unimplementedError(String methodName) { throw new IllegalStateException(methodName + " is not implemented in jnr-posix"); } @Override public void warn(WARNING_ID id, String message, Object... data) { String msg; try { msg = String.format(message, data); } catch (IllegalFormatException e) { msg = message + " " + Arrays.toString(data); } Logger.getLogger("jnr-posix").log(Level.WARNING, msg); } @Override public boolean isVerbose() { return verbose.get(); } @Override public File getCurrentWorkingDirectory() { return new File("."); } @Override public String[] getEnv() { String[] envp = new String[System.getenv().size()]; int i = 0; for (Map.Entry<String, String> pair : System.getenv().entrySet()) { envp[i++] = pair.getKey() + "=" + pair.getValue(); } return envp; } @Override public InputStream getInputStream() { return System.in; } @Override public PrintStream getOutputStream() { return System.out; } @Override public int getPID() { throw new IllegalStateException("getPID is not implemented in jnr-posix"); } @Override public PrintStream getErrorStream() { return System.err; } } }
uk-gov-mirror/ministryofjustice.correspondence_tool_staff
app/state_machines/configurable_state_machine/invalid_event_error.rb
module ConfigurableStateMachine class InvalidEventError < RuntimeError def initialize(role:, kase:, user:, event:, message: nil) description = <<~EOS Invalid event: type: #{kase.type_abbreviation} workflow: #{kase.workflow} role: #{role} state: #{kase.current_state} event: #{event} kase_id: #{kase.id} user_id: #{user.id} EOS if message description += " message: #{message}\n" end super(description) end end end
esalesky/NMTGMinor
onmt/metrics/sbleu.py
import sys import math ngramLength = 4; smoothingConstant=0.1 bpSmoothingConstant=1.5 def getCounts(words): counts = {} for i in range(len(words)): ngram = [] for j in range(ngramLength): if(i+j < len(words)): ngram.append(words[i+j]) if(" ".join(ngram) in counts): counts[" ".join(ngram)] += 1 else: counts[" ".join(ngram)] = 1 return counts def getRefCounts(ref): count = getCounts(ref) length = len(ref) return count, length #~ file = open(filename) #~ #~ line = file.readline() #~ #~ counts = [] #~ length = [] #~ #~ while(line): #~ #~ counts.append(getCounts(line.split())) #~ length.append(len(line.split())) #~ line = file.readline() #~ return counts,length def countMatches(hyp, ref): counts = [0] * ngramLength found = {} for i in range(len(hyp)): ngram = [] for j in range(ngramLength): if(i+j < len(hyp)): ngram.append(hyp[i+j]) if(" ".join(ngram) in ref and (" ".join(ngram) not in found or found[" ".join(ngram)] < ref[" ".join(ngram)])): counts[j] += 1 if(" ".join(ngram) in found): found[" ".join(ngram)] += 1; else: found[" ".join(ngram)] = 1; return counts def calcBLEU(counts,length,referenceLength): result = 1; for i in range(ngramLength): if(length -i > 0): #cannot calculte 4-gram precision for sentence length 3 result *= 1.0*(counts[i]+smoothingConstant)/(length-i+smoothingConstant) result = pow(result,1.0/ngramLength); if(length > referenceLength): return result else: if(length == 0): return math.exp(1.0-(referenceLength+bpSmoothingConstant)/1)*result return math.exp(1.0-(referenceLength+bpSmoothingConstant)/length)*result def calc(refCounts,refLength,hyp): target = hyp count = countMatches(target, refCounts) s = calcBLEU(count, len(target), refLength) return s #~ file = open(filename) #~ #~ out = open(outname,'w') #~ line = file.readline() #~ #~ bestScores = [] #~ firstScores = [] #~ #~ while(line): #~ number = int(line.split("|||")[0]) #~ target = line.split("|||")[1].split() #~ count = countMatches(target,refCounts[number]) #~ s = calcBLEU(count,len(target),refLength[number]) #~ print >>out,s #~ if(number < len(bestScores)): #~ if(bestScores[number] < s): #~ bestScores[number] = s; #~ else: #~ firstScores.append(s) #~ bestScores.append(s) #~ line = file.readline() #~ #~ avg = sum(firstScores)/len(firstScores) #~ oracle = sum(bestScores)/len(bestScores) #~ print "First hypothesis: ",avg #~ print "Oracle score: ",oracle # inputs are lists of words def sentence_bleu(ref, hyp): refCounts, refLength = getRefCounts(ref) sbleu = calc(refCounts,refLength,hyp) return (sbleu,)
isabella232/aistreams
third_party/gst-plugins-base/tests/examples/seek/stepping.c
/* GStreamer * * stepping.c: stepping sample application * * Copyright (C) 2009 <NAME> <<EMAIL>> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301, USA. */ #include <stdlib.h> #include <math.h> #include <gst/gst.h> static GMainLoop *loop; static gdouble period = 0.0; static gboolean do_step (GstElement * bin) { gdouble length; length = sin (period); period += G_PI / 40; length += 1.1; length *= 100 * GST_MSECOND; gst_element_send_event (bin, gst_event_new_step (GST_FORMAT_TIME, length, 1.0, TRUE, FALSE)); return FALSE; } static gboolean handle_message (GstBus * bus, GstMessage * message, gpointer data) { GstElement *bin = GST_ELEMENT_CAST (data); switch (message->type) { case GST_MESSAGE_EOS: g_message ("got EOS"); g_main_loop_quit (loop); break; case GST_MESSAGE_WARNING: case GST_MESSAGE_ERROR: { GError *gerror; gchar *debug; if (message->type == GST_MESSAGE_ERROR) gst_message_parse_error (message, &gerror, &debug); else gst_message_parse_warning (message, &gerror, &debug); gst_object_default_error (GST_MESSAGE_SRC (message), gerror, debug); g_error_free (gerror); g_free (debug); g_main_loop_quit (loop); break; } case GST_MESSAGE_ASYNC_DONE: g_timeout_add (40, (GSourceFunc) do_step, bin); break; default: break; } return TRUE; } int main (int argc, char *argv[]) { GstElement *bin; GstBus *bus; gst_init (&argc, &argv); if (argc < 2) { g_print ("usage: %s <uri>\n", argv[0]); return -1; } /* create a new bin to hold the elements */ bin = gst_element_factory_make ("playbin", "bin"); g_assert (bin); g_object_set (bin, "uri", argv[1], NULL); bus = gst_pipeline_get_bus (GST_PIPELINE (bin)); gst_bus_add_watch (bus, handle_message, bin); /* go to the PAUSED state and wait for preroll */ g_message ("prerolling first frame"); gst_element_set_state (bin, GST_STATE_PAUSED); gst_element_get_state (bin, NULL, NULL, -1); loop = g_main_loop_new (NULL, TRUE); g_main_loop_run (loop); g_message ("finished"); /* stop the bin */ gst_element_set_state (bin, GST_STATE_NULL); g_main_loop_unref (loop); gst_object_unref (bus); exit (0); }
pritamghanghas/lighttpd-1.x
src/iosocket.h
<filename>src/iosocket.h #ifndef _IOSOCKET_H_ #define _IOSOCKET_H_ /** * make sure we know about OPENSSL all the time * * if we don't include config.h here we run into different sizes * for the iosocket-struct depending on config.h include before * iosocket.h or not */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #if defined HAVE_LIBSSL && defined HAVE_OPENSSL_SSL_H # define USE_OPENSSL # include <openssl/ssl.h> #endif #include "settings.h" #include "buffer.h" typedef enum { IOSOCKET_TYPE_UNSET, IOSOCKET_TYPE_SOCKET, IOSOCKET_TYPE_PIPE } iosocket_t; /** * a non-blocking fd */ typedef struct { int fd; int fde_ndx; #ifdef USE_OPENSSL SSL *ssl; #ifndef OPENSSL_NO_TLSEXT buffer *tlsext_server_name; #endif #endif iosocket_t type; /**< sendfile on solaris doesn't work on pipes */ } iosocket; LI_API iosocket * iosocket_init(void); LI_API void iosocket_free(iosocket *sock); #endif
danielSoler93/msm_pele
msm_pele/AdaptivePELE/analysis/writeNetworkFiles.py
import os import sys import argparse from AdaptivePELE.utilities import utilities import matplotlib.pyplot as plt try: # This might fail for older versions of matplotlib (e.g in life cluster) plt.style.use("ggplot") except: pass def parseArguments(): desc = "Write the information related to the conformation network to file\n" parser = argparse.ArgumentParser(description=desc) parser.add_argument("clusteringObject", type=str, help="Path to the clustering object") parser.add_argument("suffix", type=str, help="Suffix to append to file names") parser.add_argument("metricCol", type=int, help="Column of the metric of interest") parser.add_argument("-o", type=str, default=None, help="Output path where to write the files") parser.add_argument("-c", "--cond", type=str, default="min", help="Condition on the metric optimality, options are max or min") parser.add_argument("-b", "--bindEn", type=int, default=None, help="Column of the binding energy in the report file") args = parser.parse_args() return args.clusteringObject, args.suffix, args.metricCol, args.o, args.cond, args.bindEn if __name__ == "__main__": clusteringObject, suffix, metricCol, outputPath, metricOptimization, bindingEnergy = parseArguments() if outputPath is not None: outputPath = os.path.join(outputPath, "") if not os.path.exists(outputPath): os.makedirs(outputPath) else: outputPath = "" sys.stderr.write("Reading clustering object...\n") cl = utilities.readClusteringObject(clusteringObject) if cl.conformationNetwork is None: sys.exit("Clustering object loaded has no conformation network!!") conf = cl.conformationNetwork optimalCluster = cl.getOptimalMetric(metricCol, simulationType=metricOptimization) pathway = conf.createPathwayToCluster(optimalCluster) if not os.path.exists(outputPath+"conformationNetwork%s.edgelist" % suffix): sys.stderr.write("Writing conformation network...\n") conf.writeConformationNetwork(outputPath+"conformationNetwork%s.edgelist" % suffix) if not os.path.exists(outputPath+"FDT%s.edgelist" % suffix): sys.stderr.write("Writing FDT...\n") conf.writeFDT(outputPath+"FDT%s.edgelist" % suffix) if not os.path.exists(outputPath+"pathwayFDT%s.pdb" % suffix): sys.stderr.write("Writing pathway to optimal cluster...\n") # cl.writePathwayOptimalCluster(outputPath+"pathwayFDT%s.pdb" % suffix) cl.writePathwayTrajectory(pathway, outputPath+"pathwayFDT%s.pdb" % suffix) if not os.path.exists(outputPath+"nodesPopulation%s.txt" % suffix): sys.stderr.write("Writing nodes population...\n") cl.writeConformationNodePopulation(outputPath+"nodesPopulation%s.txt" % suffix) if not os.path.exists(outputPath+"nodesMetric%s.txt" % suffix): sys.stderr.write("Writing nodes metrics...\n") cl.writeClusterMetric(outputPath+"nodesMetric%s.txt" % suffix, metricCol) if bindingEnergy is not None: plt.figure() plt.plot(pathway, [cl.clusters.clusters[i].getMetricFromColumn(bindingEnergy) for i in pathway]) plt.xlabel("Cluster number") plt.ylabel("Binding energy(kcal/mol)") plt.savefig(outputPath+"bindingEnergy_%s.png" % suffix) plt.figure() plt.plot(pathway, [cl.clusters.clusters[i].contacts for i in pathway]) plt.xlabel("Cluster number") plt.ylabel("Contacts ratio") plt.savefig(outputPath+"contacts_%s.png" % suffix) plt.figure() plt.plot(pathway, [cl.clusters.clusters[i].getMetricFromColumn(3) for i in pathway]) plt.xlabel("Cluster number") plt.ylabel("Energy(kcal/mol)") plt.savefig(outputPath+"totalEnergy_%s.png" % suffix) plt.show()
wajaja/sf-opi
web/app/routes/user/resettings/Resetting.js
<reponame>wajaja/sf-opi<gh_stars>0 import React from 'react' import { findDOMNode } from 'react-dom' import createReactClass from 'create-react-class' import { connect } from 'react-redux' import { withRouter, Switch, Route, Redirect } from 'react-router-dom' import { Request, SendEmail, Reset, CheckEmail } from './components' // import { // Setting as SettingActions, // } from '../../../actions' import '../../../styles/user/resetting.scss' const Resetting = createReactClass( { getInitialState() { return { } }, // shouldComponentUpdate(nextProps) { // return this.props.location !== nextProps.location // }, render() { const { screenWidth, } = this.state const { dispatch, user } = this.props return ( <Switch> <Route exact path='/resetting/request' children={() => <Request {...this.props} />} /> <Route exact path='/resetting/check-email' children={() => <CheckEmail {...this.props} />} /> <Route exact path="/resetting/reset/:token" children={() => <Reset {...this.props} /> } /> <Redirect to='/resetting/request' /> </Switch> ) } }) ////// export default withRouter(connect(state =>({ user: state.User.user, }), null)(Resetting))
jin-ku-git/Tool
app/src/main/java/com/youwu/tool/utils_view/CountDownView.java
package com.youwu.tool.utils_view; import android.content.Context; import android.graphics.Color; import android.os.Handler; import android.os.Message; import android.util.AttributeSet; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.widget.RelativeLayout; import android.widget.TextView; import com.youwu.tool.R; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; import java.util.Timer; import java.util.TimerTask; /** * 倒计时控件 */ public class CountDownView extends RelativeLayout { private static final String TAG = "CountDownView"; private Context context; private TextView tv_hours; private TextView tv_colon1; private TextView tv_minutes; private TextView tv_colon2; private TextView tv_seconds; private long mDay;// 天 private long mHour;//小时, private long mMin;//分钟, private long mSecond;//秒 int type=1;//1倒计时 2计时 private Timer mTimer; private Date endDate = null; private long endTime; private CountDownView.CountDownEndListener countDownEndListener; public CountDownView(Context context) { this(context, null); } public CountDownView(Context context, AttributeSet attrs) { this(context, attrs, 0); } public CountDownView(Context context, AttributeSet attrs, int defStyleAttr) { super(context, attrs, defStyleAttr); this.context = context; initView(context); } private void initView(Context context) { View rootView = LayoutInflater.from(context).inflate(R.layout.count_down, this, true); this.tv_hours = (TextView) rootView.findViewById(R.id.tv_hours); this.tv_colon1 = (TextView) rootView.findViewById(R.id.tv_colon1); this.tv_minutes = (TextView) rootView.findViewById(R.id.tv_minutes); this.tv_colon2 = (TextView) rootView.findViewById(R.id.tv_colon2); this.tv_seconds = (TextView) rootView.findViewById(R.id.tv_seconds); mTimer = new Timer(); } /** * 设置时分秒背景图 * @param backgroundRes */ public CountDownView setTimeBackGroundResource(int backgroundRes){ tv_hours.setBackgroundResource(backgroundRes); tv_minutes.setBackgroundResource(backgroundRes); tv_seconds.setBackgroundResource(backgroundRes); return this; } /** * 时 * @param hour */ public CountDownView setTvHourText(String hour){ tv_hours.setText(hour); return this; } /** * 分 * @param minute */ public CountDownView setTvMinuteText(String minute){ tv_minutes.setText(minute); return this; } /** * 秒 * @param second */ public CountDownView setTvSecondText(String second){ tv_seconds.setText(second); return this; } /** * 设置时分秒及:的字体大小 * @param textSize */ public CountDownView setTimeTextSize(float textSize){ tv_hours.setTextSize(textSize); tv_minutes.setTextSize(textSize); tv_seconds.setTextSize(textSize); return this; } /** * 设置时分秒字体颜色 * @param colorHex */ public CountDownView setTimeTextColor(String colorHex){ int color = Color.parseColor(colorHex); tv_hours.setTextColor(color); tv_minutes.setTextColor(color); tv_seconds.setTextColor(color); return this; } /** * 设置时间分隔符字体大小 * @param textSize */ public CountDownView setColonTextSize(float textSize){ tv_colon1.setTextSize(textSize); tv_colon2.setTextSize(textSize); return this; } /** * 设置时间分隔符字体颜色 * @param colorHex */ public CountDownView setColonTextColor(String colorHex){ int color = Color.parseColor(colorHex); tv_colon1.setTextColor(color); tv_colon2.setTextColor(color); return this; } private Handler timeHandler = new Handler() { @Override public void handleMessage(Message msg) { super.handleMessage(msg); if (msg.what == 1) { computeTime(); setTvHourText(getTv(mHour)) .setTvMinuteText(getTv(mMin)) .setTvSecondText(getTv(mSecond)); if (mSecond == 0 && mDay == 0 && mHour == 0 && mMin == 0 ) { mTimer.cancel(); /** * 倒计时结束的回调 */ if (mtime_end != null) { Log.i("倒计时结束的回调",""); mtime_end.onTimeEndt(); } } }else if (msg.what==2){ computeTime_zheng(); setTvHourText(getTv(mHour)) .setTvMinuteText(getTv(mMin)) .setTvSecondText(getTv(mSecond)); } } }; /** * Cancel the countdown. */ public synchronized final void cancel() { mTimer.cancel(); } private String getTv(long l){ if(l>=10){ return l+""; }else{ return "0"+l;//小于10,,前面补位一个"0" } } /** * 开启倒计时 * //time为Date类型:在指定时间执行一次。 * timer.schedule(task, time); * //firstTime为Date类型,period为long,表示从firstTime时刻开始,每隔period毫秒执行一次。 * timer.schedule(task, firstTime,period); * //delay 为long类型:从现在起过delay毫秒执行一次。 * timer.schedule(task, delay); * //delay为long,period为long:从现在起过delay毫秒以后,每隔period毫秒执行一次。 * timer.schedule(task, delay,period); */ private MyTimerTask mTimerTask; public CountDownView startRun(int type) { this.type=type; if (mTimer != null){ if (mTimerTask != null){ mTimerTask.cancel(); //将原任务从队列中移除 } mTimer.cancel(); mTimer = new Timer(); mTimerTask = new MyTimerTask(); // 新建一个任务 mTimer.schedule(mTimerTask,0,1000); } return this; } class MyTimerTask extends TimerTask { @Override public void run() { Message message = Message.obtain(); message.what = type; timeHandler.sendMessage(message); } } /** * 倒计时计算 */ private void computeTime() { mSecond--; if (mSecond < 0) { mMin--; mSecond = 59; if (mMin < 0) { mMin = 59; mHour--; if (mHour < 0) { // 倒计时结束 mHour = 23; mDay--; if(mDay < 0){ // 倒计时结束 mDay = 0; mHour= 0; mMin = 0; mSecond = 0; } } } } } //倒计时结束的回调 public interface OnTimeEnd { void onTimeEndt(); } public void setOnTimeEnd(OnTimeEnd listener) { mtime_end = listener; } private OnTimeEnd mtime_end; /** * 正计时计算 */ private void computeTime_zheng() { mSecond++; if (mSecond > 59) { mMin++; mSecond = 0; if (mMin > 59) { mMin = 0; mHour++; if (mHour > 23) { mHour = 0; mDay++; } } } } /** * 初始化倒计时结束时间 */ public CountDownView initEndTime(String endTimeStr){ SimpleDateFormat format=new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); //设置要读取的时间字符串格式 try { endDate = format.parse(endTimeStr); Log.i(TAG,"endDate="+endDate); endTime = endDate.getTime(); Log.i(TAG,"endtime="+endTime); } catch (ParseException e) { e.printStackTrace(); } return this; } /* * 将时间戳转换为时间 */ public static String stampToDate(long time){ SimpleDateFormat format = new SimpleDateFormat("yyyy年MM月dd日 HH:mm:ss"); String times = format.format(new Date(time * 1000L)); // System.out.println("日期格式---->" + times); return times; } /** * 计算距倒计时结束的剩余时间,转换成天数、小时、分钟、秒 */ public CountDownView calcTime(){ //s1.获取当前系统时间 Date nowDate = new Date(); long nowTime = nowDate.getTime(); //s2.计算剩余时间 long allTime = (endTime - nowTime) / 1000; //s3.剩余时间转换成天数、小时、分钟、秒 mDay = allTime / 3600 / 24; mHour = allTime / 3600 % 24; mMin = allTime / 60 % 60; mSecond = allTime % 60; //Log.i(TAG,"mDay="+mDay + "mHour=" + mHour + "mMin=" + mMin + "mSecond=" + mSecond); return this; } /** * 计算距倒计时结束的剩余时间,转换成天数、小时、分钟、秒 */ public CountDownView calcTime_two(){ //s1.获取当前系统时间 Date nowDate = new Date(); long nowTime = nowDate.getTime(); String sss=stampToDate(nowTime/1000); System.out.println("系统当前时间:" + sss); System.out.println("endTime:" + endTime); //s2.计算剩余时间 long allTime = (nowTime - endTime) / 1000; //s3.剩余时间转换成天数、小时、分钟、秒 mDay = allTime / 3600 / 24; mHour = allTime / 3600 % 24; mMin = allTime / 60 % 60; mSecond = allTime % 60; //Log.i(TAG,"mDay="+mDay + "mHour=" + mHour + "mMin=" + mMin + "mSecond=" + mSecond); return this; } public interface CountDownEndListener { void onCountDownEnd(); } public CountDownView setCountDownEndListener(CountDownEndListener countDownEndListener) { this.countDownEndListener = countDownEndListener; return this; } /** * 获取天数 * @return */ public long getmDay() { return mDay; } }
qtoggle/qui
js/forms/common-fields/choice-buttons-field.js
import JQueryUIField from './jquery-ui-field.js' /** * A field backed by choice buttons. The value data type can be anything. * @alias qui.forms.commonfields.ChoiceButtonsField * @extends qui.forms.commonfields.JQueryUIField */ class ChoiceButtonsField extends JQueryUIField { static WIDGET_CLASS = 'choicebuttons' /** * @constructs * @param {Object[]|Object[][]} choices choices or groups of choices (pairs/arrays of pairs of `label` and `value`) * @param {String} [onClass] the CSS class to add to buttons in *on* state (defaults to `on`) * @param {...*} args parent class parameters */ constructor({choices, onClass = 'on', ...args}) { super({widgetAttrs: {choices: choices, onClass: onClass}, ...args}) } // TODO add setters and getters for choices } export default ChoiceButtonsField
StephanZaaijer/IPASS
docs/html/search/all_a.js
<filename>docs/html/search/all_a.js<gh_stars>0 var searchData= [ ['magenta_62',['magenta',['../class_i_p_a_s_s_1_1_a_p_a102.html#a012c28e4910852f6da5386969e1ddc12',1,'IPASS::APA102']]], ['mask_63',['Mask',['../struct_i_p_a_s_s_1_1_r_f24_l01_1_1_s_e_t_t_i_n_g_1_1_setting.html#a77f1a60c4561c1cdfcff513920fab706',1,'IPASS::RF24L01::SETTING::Setting']]], ['mask_5fmax_5frt_64',['Mask_MAX_RT',['../class_i_p_a_s_s_1_1_r_f24_l01_1_1_s_e_t_t_i_n_g.html#aa43fb8ae2864b86073c972765b6d6b38',1,'IPASS::RF24L01::SETTING']]], ['mask_5frx_5fdr_65',['Mask_RX_DR',['../class_i_p_a_s_s_1_1_r_f24_l01_1_1_s_e_t_t_i_n_g.html#a6d5cd1ef00ef29bc7eb59fd8ac4a39b5',1,'IPASS::RF24L01::SETTING']]], ['mask_5ftx_5fds_66',['Mask_TX_DS',['../class_i_p_a_s_s_1_1_r_f24_l01_1_1_s_e_t_t_i_n_g.html#af4c1a09f89aac37e28a2a0dc0023e727',1,'IPASS::RF24L01::SETTING']]], ['max_5frt_67',['MAX_RT',['../class_i_p_a_s_s_1_1_r_f24_l01_1_1_s_e_t_t_i_n_g.html#a23640e49e3b99c69eeaef75a7aab04e5',1,'IPASS::RF24L01::SETTING']]], ['minion_5fselect_68',['minion_select',['../class_i_p_a_s_s_1_1_r_f24_l01.html#a9d7fc55429c536ec93d442539c5c95cb',1,'IPASS::RF24L01']]] ];
HewlettPackard/oneview-sdk-java
oneview-sdk-java-lib/src/main/java/com/hp/ov/sdk/dto/servers/serverprofiletemplate/LogicalDriveTemplate.java
<filename>oneview-sdk-java-lib/src/main/java/com/hp/ov/sdk/dto/servers/serverprofiletemplate/LogicalDriveTemplate.java /* * (C) Copyright 2016 Hewlett Packard Enterprise Development LP * * Licensed under the Apache License, Version 2.0 (the "License"); * You may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hp.ov.sdk.dto.servers.serverprofiletemplate; import java.io.Serializable; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; import com.google.gson.annotations.Since; import com.google.gson.annotations.Until; public class LogicalDriveTemplate implements Serializable { private static final long serialVersionUID = 1L; private Boolean bootable; @Until(299) private String driveName; private String driveTechnology; @Since(300) private String name; private Integer numPhysicalDrives; private String raidLevel; @Since(300) private Integer sasLogicalJBODId; /** * @return the bootable */ public Boolean getBootable() { return bootable; } /** * @param bootable the bootable to set */ public void setBootable(Boolean bootable) { this.bootable = bootable; } /** * @return the driveName */ public String getDriveName() { return driveName; } /** * @param driveName the driveName to set */ public void setDriveName(String driveName) { this.driveName = driveName; } /** * @return the driveTechnology */ public String getDriveTechnology() { return driveTechnology; } /** * @param driveTechnology the driveTechnology to set */ public void setDriveTechnology(String driveTechnology) { this.driveTechnology = driveTechnology; } /** * @return the name */ public String getName() { return name; } /** * @param name the name to set */ public void setName(String name) { this.name = name; } /** * @return the numPhysicalDrives */ public Integer getNumPhysicalDrives() { return numPhysicalDrives; } /** * @param numPhysicalDrives the numPhysicalDrives to set */ public void setNumPhysicalDrives(Integer numPhysicalDrives) { this.numPhysicalDrives = numPhysicalDrives; } /** * @return the raidLevel */ public String getRaidLevel() { return raidLevel; } /** * @param raidLevel the raidLevel to set */ public void setRaidLevel(String raidLevel) { this.raidLevel = raidLevel; } /** * @return the sasLogicalJBODId */ public Integer getSasLogicalJBODId() { return sasLogicalJBODId; } /** * @param sasLogicalJBODId the sasLogicalJBODId to set */ public void setSasLogicalJBODId(Integer sasLogicalJBODId) { this.sasLogicalJBODId = sasLogicalJBODId; } @Override public String toString() { return ToStringBuilder.reflectionToString(this); } @Override public int hashCode() { return HashCodeBuilder.reflectionHashCode(this); } @Override public boolean equals(Object obj) { return EqualsBuilder.reflectionEquals(this, obj); } }
mc-ireiser/dev_challenges
backend_node/src/routes/user-route.js
const express = require("express"); const router = express.Router(); // Controller const UserController = require("../controllers/user-controller"); // Routes router.post("", UserController.createUser); router.post("/login", UserController.loginUser); // Exports module.exports = router;
garypwn/mage
Mage.Sets/src/mage/cards/s/SphinxsDecree.java
<filename>Mage.Sets/src/mage/cards/s/SphinxsDecree.java<gh_stars>1-10 package mage.cards.s; import java.util.UUID; import mage.MageObject; import mage.abilities.Ability; import mage.abilities.effects.ContinuousEffect; import mage.abilities.effects.ContinuousRuleModifyingEffectImpl; import mage.abilities.effects.OneShotEffect; import mage.cards.CardImpl; import mage.cards.CardSetInfo; import mage.constants.CardType; import mage.constants.Duration; import mage.constants.Outcome; import mage.game.Game; import mage.game.events.GameEvent; import mage.target.targetpointer.FixedTarget; /** * * @author LevelX2 */ public final class SphinxsDecree extends CardImpl { public SphinxsDecree(UUID ownerId, CardSetInfo setInfo) { super(ownerId, setInfo, new CardType[]{CardType.SORCERY}, "{1}{W}"); // Each opponent can't cast instant or sorcery spells during that player's next turn. this.getSpellAbility().addEffect(new SphinxsDecreeEffect()); } public SphinxsDecree(final SphinxsDecree card) { super(card); } @Override public SphinxsDecree copy() { return new SphinxsDecree(this); } } class SphinxsDecreeEffect extends OneShotEffect { public SphinxsDecreeEffect() { super(Outcome.Benefit); this.staticText = "Each opponent can't cast instant or sorcery spells during that player's next turn"; } public SphinxsDecreeEffect(final SphinxsDecreeEffect effect) { super(effect); } @Override public SphinxsDecreeEffect copy() { return new SphinxsDecreeEffect(this); } @Override public boolean apply(Game game, Ability source) { for (UUID opponentId : game.getOpponents(source.getControllerId())) { ContinuousEffect effect = new SphinxsDecreeCantCastEffect(); effect.setTargetPointer(new FixedTarget(opponentId)); game.addEffect(effect, source); } return true; } } class SphinxsDecreeCantCastEffect extends ContinuousRuleModifyingEffectImpl { int playersNextTurn; public SphinxsDecreeCantCastEffect() { super(Duration.Custom, Outcome.Detriment); staticText = "You can't cast instant or sorcery spells during this turn"; playersNextTurn = 0; } public SphinxsDecreeCantCastEffect(final SphinxsDecreeCantCastEffect effect) { super(effect); this.playersNextTurn = effect.playersNextTurn; } @Override public SphinxsDecreeCantCastEffect copy() { return new SphinxsDecreeCantCastEffect(this); } @Override public boolean apply(Game game, Ability source) { return true; } @Override public String getInfoMessage(Ability source, GameEvent event, Game game) { MageObject mageObject = game.getObject(source.getSourceId()); if (mageObject != null) { return "You can't cast instant or sorcery spells this turn (" + mageObject.getIdName() + ")."; } return null; } @Override public boolean applies(GameEvent event, Ability source, Game game) { UUID opponentId = getTargetPointer().getFirst(game, source); if (game.isActivePlayer(opponentId)) { if (playersNextTurn == 0) { playersNextTurn = game.getTurnNum(); } if (playersNextTurn == game.getTurnNum()) { if (opponentId.equals(event.getPlayerId())) { MageObject object = game.getObject(event.getSourceId()); if (event.getType() == GameEvent.EventType.CAST_SPELL) { if (object.isInstant() || object.isSorcery()) { return true; } } } } else { discard(); } } else if (playersNextTurn > 0) { discard(); } return false; } }
xSparkleMan/ftc_skystone
TeamCode/src/main/java/org/firstinspires/ftc/teamcode/opmodes/unitTest/ServoBuildPlateUnitTest.java
package org.firstinspires.ftc.teamcode.opmodes.unitTest; import com.qualcomm.robotcore.eventloop.opmode.OpMode; import com.qualcomm.robotcore.eventloop.opmode.TeleOp; import com.qualcomm.robotcore.hardware.Servo; import com.qualcomm.robotcore.util.Range; import org.firstinspires.ftc.teamcode.apis.testing.Trigger; @HxUnitTest @TeleOp(name = "Servo Build Plate Unit Test", group = "unitTest") public class ServoBuildPlateUnitTest extends OpMode { Servo servoTavaFront, servoTavaRear; Trigger valueChangerTrigger; @Override public void init() { valueChangerTrigger = new Trigger(0); servoTavaFront = hardwareMap.get(Servo.class, "servoTavaFront"); servoTavaRear = hardwareMap.get(Servo.class, "servoTavaRear"); } double servoValue = 1; @Override public void loop() { if(valueChangerTrigger.getState() && (gamepad1.dpad_up || gamepad1.dpad_down)){ valueChangerTrigger = new Trigger(200); if(gamepad1.dpad_up)servoValue += 0.05; if(gamepad1.dpad_down)servoValue -= 0.05; } servoValue = Range.clip(servoValue, 0, 1); servoTavaFront.setPosition(0.3); servoTavaRear.setPosition(0.7); telemetry.addData("Angle", servoValue * 180); } }
freedywu/gophercloud
acceptance/openstack/containerinfra/v1/nodegroups_test.go
//go:build acceptance || containerinfra // +build acceptance containerinfra package v1 import ( "fmt" "testing" "time" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/acceptance/clients" "github.com/gophercloud/gophercloud/acceptance/tools" "github.com/gophercloud/gophercloud/openstack/containerinfra/v1/nodegroups" th "github.com/gophercloud/gophercloud/testhelper" ) func TestNodeGroupsCRUD(t *testing.T) { // API not available until Magnum train clients.SkipRelease(t, "stable/mitaka") clients.SkipRelease(t, "stable/newton") clients.SkipRelease(t, "stable/ocata") clients.SkipRelease(t, "stable/pike") clients.SkipRelease(t, "stable/queens") clients.SkipRelease(t, "stable/rocky") clients.SkipRelease(t, "stable/stein") client, err := clients.NewContainerInfraV1Client() th.AssertNoErr(t, err) client.Microversion = "1.9" clusterTemplate, err := CreateKubernetesClusterTemplate(t, client) th.AssertNoErr(t, err) defer DeleteClusterTemplate(t, client, clusterTemplate.UUID) clusterID, err := CreateKubernetesCluster(t, client, clusterTemplate.UUID) th.AssertNoErr(t, err) defer DeleteCluster(t, client, clusterID) var nodeGroupID string t.Run("list", func(t *testing.T) { testNodeGroupsList(t, client, clusterID) }) t.Run("listone-get", func(t *testing.T) { testNodeGroupGet(t, client, clusterID) }) t.Run("create", func(t *testing.T) { nodeGroupID = testNodeGroupCreate(t, client, clusterID) }) t.Logf("Created nodegroup: %s", nodeGroupID) // Wait for the node group to finish creating err = tools.WaitForTimeout(func() (bool, error) { ng, err := nodegroups.Get(client, clusterID, nodeGroupID).Extract() if err != nil { return false, fmt.Errorf("error waiting for node group to create: %v", err) } return (ng.Status == "CREATE_COMPLETE"), nil }, 900*time.Second) th.AssertNoErr(t, err) t.Run("update", func(t *testing.T) { testNodeGroupUpdate(t, client, clusterID, nodeGroupID) }) t.Run("delete", func(t *testing.T) { testNodeGroupDelete(t, client, clusterID, nodeGroupID) }) } func testNodeGroupsList(t *testing.T, client *gophercloud.ServiceClient, clusterID string) { allPages, err := nodegroups.List(client, clusterID, nil).AllPages() th.AssertNoErr(t, err) allNodeGroups, err := nodegroups.ExtractNodeGroups(allPages) th.AssertNoErr(t, err) // By default two node groups should be created th.AssertEquals(t, 2, len(allNodeGroups)) } func testNodeGroupGet(t *testing.T, client *gophercloud.ServiceClient, clusterID string) { listOpts := nodegroups.ListOpts{ Role: "worker", } allPages, err := nodegroups.List(client, clusterID, listOpts).AllPages() th.AssertNoErr(t, err) allNodeGroups, err := nodegroups.ExtractNodeGroups(allPages) th.AssertNoErr(t, err) // Should be one worker node group th.AssertEquals(t, 1, len(allNodeGroups)) ngID := allNodeGroups[0].UUID ng, err := nodegroups.Get(client, clusterID, ngID).Extract() th.AssertNoErr(t, err) // Should have got the same node group as from the list th.AssertEquals(t, ngID, ng.UUID) th.AssertEquals(t, "worker", ng.Role) } func testNodeGroupCreate(t *testing.T, client *gophercloud.ServiceClient, clusterID string) string { name := tools.RandomString("test-ng-", 8) // have to create two nodes for the Update test (can't set minimum above actual node count) two := 2 createOpts := nodegroups.CreateOpts{ Name: name, NodeCount: &two, } ng, err := nodegroups.Create(client, clusterID, createOpts).Extract() th.AssertNoErr(t, err) th.AssertEquals(t, name, ng.Name) return ng.UUID } func testNodeGroupUpdate(t *testing.T, client *gophercloud.ServiceClient, clusterID, nodeGroupID string) { // Node group starts with min=1, max=unset // Set min, then set max, then set both updateOpts := []nodegroups.UpdateOptsBuilder{ nodegroups.UpdateOpts{ Op: nodegroups.ReplaceOp, Path: "/min_node_count", Value: 2, }, } ng, err := nodegroups.Update(client, clusterID, nodeGroupID, updateOpts).Extract() th.AssertNoErr(t, err) th.AssertEquals(t, 2, ng.MinNodeCount) updateOpts = []nodegroups.UpdateOptsBuilder{ nodegroups.UpdateOpts{ Op: nodegroups.ReplaceOp, Path: "/max_node_count", Value: 5, }, } ng, err = nodegroups.Update(client, clusterID, nodeGroupID, updateOpts).Extract() th.AssertNoErr(t, err) th.AssertEquals(t, false, ng.MaxNodeCount == nil) th.AssertEquals(t, 5, *ng.MaxNodeCount) updateOpts = []nodegroups.UpdateOptsBuilder{ nodegroups.UpdateOpts{ Op: nodegroups.ReplaceOp, Path: "/min_node_count", Value: 1, }, nodegroups.UpdateOpts{ Op: nodegroups.ReplaceOp, Path: "/max_node_count", Value: 3, }, } ng, err = nodegroups.Update(client, clusterID, nodeGroupID, updateOpts).Extract() th.AssertNoErr(t, err) th.AssertEquals(t, false, ng.MaxNodeCount == nil) th.AssertEquals(t, 1, ng.MinNodeCount) th.AssertEquals(t, 3, *ng.MaxNodeCount) } func testNodeGroupDelete(t *testing.T, client *gophercloud.ServiceClient, clusterID, nodeGroupID string) { err := nodegroups.Delete(client, clusterID, nodeGroupID).ExtractErr() th.AssertNoErr(t, err) // Wait for the node group to be deleted err = tools.WaitFor(func() (bool, error) { _, err := nodegroups.Get(client, clusterID, nodeGroupID).Extract() if _, ok := err.(gophercloud.ErrDefault404); ok { return true, nil } return false, nil }) th.AssertNoErr(t, err) }
atif4461/FCS-GPU
athena/Simulation/ISF/ISF_FastCaloSim/ISF_FastCaloSimEvent/src/TFCS2DFunctionHistogram.cxx
<reponame>atif4461/FCS-GPU<filename>athena/Simulation/ISF/ISF_FastCaloSim/ISF_FastCaloSimEvent/src/TFCS2DFunctionHistogram.cxx /* Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration */ #include "ISF_FastCaloSimEvent/TFCS2DFunctionHistogram.h" #include <algorithm> #include <iostream> #include "TMath.h" #include "TCanvas.h" #include "TH2F.h" #include "TRandom.h" #include "TFile.h" //============================================= //======= TFCS2DFunctionHistogram ========= //============================================= void TFCS2DFunctionHistogram::Initialize(TH2* hist) { Int_t nbinsx = hist->GetNbinsX(); Int_t nbinsy = hist->GetNbinsY(); Int_t nbins = nbinsx*nbinsy; float integral=0; m_HistoBorders.resize(nbinsx+1); m_HistoBordersy.resize(nbinsy+1); m_HistoContents.resize(nbins); int ibin=0; for (int iy=1; iy<=nbinsy; iy++){ for (int ix=1; ix<=nbinsx; ix++){ float binval=hist->GetBinContent(ix,iy); if(binval<0) { //Can't work if a bin is negative, forcing bins to 0 in this case double fraction=binval/hist->Integral(); if(TMath::Abs(fraction)>1e-5) { std::cout<<"WARNING: bin content is negative in histogram "<<hist->GetName()<<" : "<<hist->GetTitle()<<" binval="<<binval<<" "<<fraction*100<<"% of integral="<<hist->Integral()<<". Forcing bin to 0."<<std::endl; } binval=0; } integral+=binval; m_HistoContents[ibin]=integral; ++ibin; } } if(integral<=0) { std::cout<<"ERROR: histogram "<<hist->GetName()<<" : "<<hist->GetTitle()<<" integral="<<integral<<" is <=0"<<std::endl; m_HistoBorders.resize(0); m_HistoBordersy.resize(0); m_HistoContents.resize(0); return; } for (int ix=1; ix<=nbinsx; ix++) m_HistoBorders[ix-1]=hist->GetXaxis()->GetBinLowEdge(ix); m_HistoBorders[nbinsx]=hist->GetXaxis()->GetXmax(); for (int iy=1; iy<=nbinsy; iy++) m_HistoBordersy[iy-1]=hist->GetYaxis()->GetBinLowEdge(iy); m_HistoBordersy[nbinsy]=hist->GetYaxis()->GetXmax(); for(ibin=0;ibin<nbins;++ibin) m_HistoContents[ibin]/=integral; } void TFCS2DFunctionHistogram::rnd_to_fct(float& valuex,float& valuey,float rnd0,float rnd1) const { if(m_HistoContents.size()==0) { valuex=0; valuey=0; return; } auto it = std::upper_bound(m_HistoContents.begin(),m_HistoContents.end(),rnd0); int ibin=std::distance(m_HistoContents.begin(),it); if(ibin>=(int)m_HistoContents.size()) ibin=m_HistoContents.size()-1; Int_t nbinsx=m_HistoBorders.size()-1; Int_t biny = ibin/nbinsx; Int_t binx = ibin - nbinsx*biny; float basecont=0; if(ibin>0) basecont=m_HistoContents[ibin-1]; float dcont=m_HistoContents[ibin]-basecont; if(dcont>0) { valuex = m_HistoBorders[binx] + (m_HistoBorders[binx+1]-m_HistoBorders[binx]) * (rnd0-basecont) / dcont; } else { valuex = m_HistoBorders[binx] + (m_HistoBorders[binx+1]-m_HistoBorders[binx]) / 2; } valuey = m_HistoBordersy[biny] + (m_HistoBordersy[biny+1]-m_HistoBordersy[biny]) * rnd1; } void TFCS2DFunctionHistogram::unit_test(TH2* hist) { int nbinsx; int nbinsy; if(hist==nullptr) { // hist=new TH2F("test2D","test2D",5,0,5,5,0,10); nbinsx=64; nbinsy=64; hist=new TH2F("test2D","test2D",nbinsx,0,1,nbinsy,0,1); hist->Sumw2(); for(int ix=1;ix<=nbinsx;++ix) { for(int iy=1;iy<=nbinsy;++iy) { hist->SetBinContent(ix,iy,(0.5+gRandom->Rndm())*(nbinsx+ix)*(nbinsy*nbinsy/2+iy*iy)); if(gRandom->Rndm()<0.1) hist->SetBinContent(ix,iy,0); hist->SetBinError(ix,iy,0); } } } TFCS2DFunctionHistogram rtof(hist); nbinsx=hist->GetNbinsX(); nbinsy=hist->GetNbinsY(); float value[2]; float rnd[2]; for(rnd[0]=0;rnd[0]<0.9999;rnd[0]+=0.25) { for(rnd[1]=0;rnd[1]<0.9999;rnd[1]+=0.25) { rtof.rnd_to_fct(value,rnd); std::cout<<"rnd0="<<rnd[0]<<" rnd1="<<rnd[1]<<" -> x="<<value[0]<<" y="<<value[1]<<std::endl; } } // TH2F* hist_val=new TH2F("val2D","val2D",16,hist->GetXaxis()->GetXmin(),hist->GetXaxis()->GetXmax(), // 16,hist->GetYaxis()->GetXmin(),hist->GetYaxis()->GetXmax()); TH2F* hist_val=(TH2F*)hist->Clone("hist_val"); hist_val->Reset(); int nrnd=100000000; float weight=hist->Integral()/nrnd; hist_val->Sumw2(); for(int i=0;i<nrnd;++i) { rnd[0]=gRandom->Rndm(); rnd[1]=gRandom->Rndm(); rtof.rnd_to_fct(value,rnd); hist_val->Fill(value[0],value[1],weight); } hist_val->Add(hist,-1); TH1F* hist_pull=new TH1F("pull","pull",80,-4,4); for(int ix=1;ix<=nbinsx;++ix) { for(int iy=1;iy<=nbinsy;++iy) { float val=hist_val->GetBinContent(ix,iy); float err=hist_val->GetBinError(ix,iy); if(err>0) hist_pull->Fill(val/err); std::cout<<"val="<<val<<" err="<<err<<std::endl; } } std::unique_ptr<TFile> outputfile(TFile::Open( "TFCS2DFunctionHistogram_unit_test.root", "RECREATE" )); if (outputfile != NULL) { hist->Write(); hist_val->Write(); hist_pull->Write(); outputfile->ls(); outputfile->Close(); } //Screen output in athena won't make sense and would require linking of additional libraries #if defined(__FastCaloSimStandAlone__) new TCanvas("input","Input"); hist->Draw("colz"); new TCanvas("validation","Validation"); hist_val->Draw("colz"); new TCanvas("pull","Pull"); hist_pull->Draw(); #endif }
colinw7/CQGnuPlot
src/CGnuPlotStyleTestPalette.cpp
#include <CGnuPlotStyleTestPalette.h> #include <CGnuPlotPlot.h> #include <CGnuPlotGroup.h> #include <CGnuPlotRenderer.h> #include <CGnuPlotUtil.h> #include <CGnuPlotColorBox.h> CGnuPlotStyleTestPalette:: CGnuPlotStyleTestPalette() : CGnuPlotStyleBase(CGnuPlot::PlotStyle::TEST_PALETTE) { } void CGnuPlotStyleTestPalette:: draw2D(CGnuPlotPlot *plot, CGnuPlotRenderer *renderer) { if (renderer->isPseudo()) { renderer->drawRect(CBBox2D(0,0,1,1), CRGBA(0,0,0), 1); return; } //--- CGnuPlotGroup *group = plot->group(); //--- renderer->setRegion(CBBox2D(0, 0, 1, 1)); double px1, py1, px2, py2; renderer->windowToPixel(0.0, 0.0, &px1, &py1); renderer->windowToPixel(1.0, 1.0, &px2, &py2); //double wx1, wy1, wx2, wy2; //renderer->pixelToWindow(0, py1 + 32, &wx1, &wy1); //renderer->pixelToWindow(0, py1 + 64, &wx2, &wy2); bool first = true; double r1, g1, b1, m1, x1; for (double i = px1; i <= px2; i += 1.0) { double wx, wy; renderer->pixelToWindow(i, 0, &wx, &wy); CColor c = group->palette()->getColor(wx); CRGBA rgba = c.rgba(); //renderer->drawLine(CPoint2D(wx, wy1), CPoint2D(wx, wy2), rgba, 0); //double x = (i - px1)/(px2 - px1); double x2 = wx; double r2 = rgba.getRed (); double g2 = rgba.getGreen(); double b2 = rgba.getBlue (); double m2 = rgba.getGray(); if (! first) { renderer->drawLine(CPoint2D(x1, r1), CPoint2D(x2, r2), CRGBA(1,0,0), 0); renderer->drawLine(CPoint2D(x1, g1), CPoint2D(x2, g2), CRGBA(0,1,0), 0); renderer->drawLine(CPoint2D(x1, b1), CPoint2D(x2, b2), CRGBA(0,0,1), 0); renderer->drawLine(CPoint2D(x1, m1), CPoint2D(x2, m2), CRGBA(0,0,0), 0); } x1 = x2; r1 = r2; g1 = g2; b1 = b2; m1 = m2; first = false; } //renderer->drawRect(CBBox2D(0.0, wy1, 1.0, wy2), CRGBA(0,0,0), 1); #if 0 xaxis->drawAxis(0.0); yaxis->drawAxis(0.0); xaxis->drawGrid(0.0, 1.0); yaxis->drawGrid(0.0, 1.0); #endif }
lakshmiDRIP/NDRIP
src/main/java/org/drip/spaces/rxtor1/NormedR1ToNormedR1.java
package org.drip.spaces.rxtor1; /* * -*- mode: java; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /*! * Copyright (C) 2017 <NAME> * Copyright (C) 2016 <NAME> * Copyright (C) 2015 <NAME> * * This file is part of DRIP, a free-software/open-source library for buy/side financial/trading model * libraries targeting analysts and developers * https://lakshmidrip.github.io/DRIP/ * * DRIP is composed of four main libraries: * * - DRIP Fixed Income - https://lakshmidrip.github.io/DRIP-Fixed-Income/ * - DRIP Asset Allocation - https://lakshmidrip.github.io/DRIP-Asset-Allocation/ * - DRIP Numerical Optimizer - https://lakshmidrip.github.io/DRIP-Numerical-Optimizer/ * - DRIP Statistical Learning - https://lakshmidrip.github.io/DRIP-Statistical-Learning/ * * - DRIP Fixed Income: Library for Instrument/Trading Conventions, Treasury Futures/Options, * Funding/Forward/Overnight Curves, Multi-Curve Construction/Valuation, Collateral Valuation and XVA * Metric Generation, Calibration and Hedge Attributions, Statistical Curve Construction, Bond RV * Metrics, Stochastic Evolution and Option Pricing, Interest Rate Dynamics and Option Pricing, LMM * Extensions/Calibrations/Greeks, Algorithmic Differentiation, and Asset Backed Models and Analytics. * * - DRIP Asset Allocation: Library for model libraries for MPT framework, Black Litterman Strategy * Incorporator, Holdings Constraint, and Transaction Costs. * * - DRIP Numerical Optimizer: Library for Numerical Optimization and Spline Functionality. * * - DRIP Statistical Learning: Library for Statistical Evaluation and Machine Learning. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. */ /** * NormedR1ToNormedR1 is the Abstract Class underlying the f : Validated Normed R^1 To Validated Normed R^1 * Function Spaces. * * The Reference we've used is: * * - <NAME>., and <NAME> (1990): Entropy, Compactness, and Approximation of Operators, Cambridge * University Press, Cambridge UK. * * @author <NAME> */ public abstract class NormedR1ToNormedR1 extends org.drip.spaces.rxtor1.NormedRxToNormedR1 { private org.drip.spaces.metric.R1Normed _r1Input = null; private org.drip.spaces.metric.R1Normed _r1Output = null; private org.drip.function.definition.R1ToR1 _funcR1ToR1 = null; protected NormedR1ToNormedR1 ( final org.drip.spaces.metric.R1Normed r1Input, final org.drip.spaces.metric.R1Normed r1Output, final org.drip.function.definition.R1ToR1 funcR1ToR1) throws java.lang.Exception { if (null == (_r1Input = r1Input) || null == (_r1Output = r1Output)) throw new java.lang.Exception ("NormedR1ToNormedR1 ctr: Invalid Inputs"); _funcR1ToR1 = funcR1ToR1; } /** * Retrieve the Underlying R1ToR1 Function * * @return The Underlying R1ToR1 Function */ public org.drip.function.definition.R1ToR1 function() { return _funcR1ToR1; } @Override public double sampleSupremumNorm ( final org.drip.spaces.instance.GeneralizedValidatedVector gvvi) throws java.lang.Exception { if (null == _funcR1ToR1 || null == gvvi || !gvvi.tensorSpaceType().match (_r1Input)) throw new java.lang.Exception ("NormedR1ToNormedR1::sampleSupremumNorm => Invalid Input"); double[] adblInstance = ((org.drip.spaces.instance.ValidatedR1) gvvi).instance(); int iNumSample = adblInstance.length; double dblSupremumNorm = java.lang.Math.abs (_funcR1ToR1.evaluate (adblInstance[0])); for (int i = 1; i < iNumSample; ++i) { double dblResponse = java.lang.Math.abs (_funcR1ToR1.evaluate (adblInstance[i])); if (dblResponse > dblSupremumNorm) dblSupremumNorm = dblResponse; } return dblSupremumNorm; } @Override public double sampleMetricNorm ( final org.drip.spaces.instance.GeneralizedValidatedVector gvvi) throws java.lang.Exception { int iPNorm = _r1Output.pNorm(); if (java.lang.Integer.MAX_VALUE == iPNorm) return sampleSupremumNorm (gvvi); if (null == _funcR1ToR1 || null == gvvi || !gvvi.tensorSpaceType().match (_r1Input)) throw new java.lang.Exception ("NormedR1ToNormedR1::sampleMetricNorm => Invalid Input"); double[] adblInstance = ((org.drip.spaces.instance.ValidatedR1) gvvi).instance(); double dblNorm = 0.; int iNumSample = adblInstance.length; for (int i = 0; i < iNumSample; ++i) dblNorm += java.lang.Math.pow (java.lang.Math.abs (_funcR1ToR1.evaluate (adblInstance[i])), iPNorm); return java.lang.Math.pow (dblNorm, 1. / iPNorm); } @Override public double populationESS() throws java.lang.Exception { if (null == _funcR1ToR1) throw new java.lang.Exception ("NormedR1ToNormedR1::populationESS => Invalid Input"); return _funcR1ToR1.evaluate (_r1Input.populationMode()); } @Override public org.drip.spaces.metric.R1Normed outputMetricVectorSpace() { return _r1Output; } @Override public org.drip.spaces.metric.R1Normed inputMetricVectorSpace() { return _r1Input; } }
joshje/times-components
packages/ad/__tests__/ios/ad-placeholder-with-style.ios.test.js
<gh_stars>0 import shared from "../ad-placeholder-with-style.native"; shared();
adamkorynta/opendcs
src/main/java/decodes/decoder/PositionOperation.java
/* * $Id$ * * $State$ * * $Log$ * Revision 1.2 2010/09/13 19:30:36 mmaloney * Scan should always jump to the specified label if the scan fails, even if the scan ran out of data. * * Revision 1.1 2008/04/04 18:21:01 cvs * Added legacy code to repository * * Revision 1.4 2007/12/11 01:05:17 mmaloney * javadoc cleanup * * Revision 1.3 2004/08/31 16:31:21 mjmaloney * javadoc * * Revision 1.2 2001/05/21 13:38:50 mike * dev * * Revision 1.1 2001/05/06 22:53:18 mike * Added * * */ package decodes.decoder; import ilex.util.Logger; import java.io.*; import java.util.Vector; /** PositionOperation implements the nP operator, which moves the character to the nth character on the current line. */ public class PositionOperation extends DecodesOperation { /** Constructor. @param position the desired position on the line. */ public PositionOperation(int position) { // Note 'position' stored as 'repetitions' in super class. super(position); } /** @return type code for this operation. */ public char getType() { return 'P'; } /** Executes this operation using the context provided. @param dd holds the raw data and context. @param msg store decoded values here. @throws DecoderException or subclass if error detected. */ public void execute(DataOperations dd, DecodedMessage msg) throws DecoderException { Logger.instance().log(Logger.E_DEBUG3, "Positioning to " + repetitions); dd.position(repetitions); } }
codacy-badger/tp
src/main/java/seedu/internhunter/model/FilterableItemList.java
package seedu.internhunter.model; import java.util.function.Predicate; import javafx.collections.ObservableList; import seedu.internhunter.model.item.Item; import seedu.internhunter.model.item.ItemList; public interface FilterableItemList<T extends Item> { /** * {@code Predicate} that always evaluate to true */ Predicate<Item> PREDICATE_SHOW_ALL_ITEMS = unused -> true; /** * Replaces item list data with the data in {@code itemList}. */ void setItemList(ItemList<T> itemList); /** * Returns the ItemList */ ItemList<T> getUnfilteredItemList(); /** * Returns true if a Item with the same identity as {@code Item} exists in the item list. */ boolean hasItem(T item); /** * Deletes the given Item. * The Item must exist in the item list. */ void deleteItem(T target); /** * Deletes the given Item according to the weaker notion of equality. * The Item may not necessarily exist in the item list. */ void deleteSameItem(T target); /** * Adds the given Item. * {@code Item} must not already exist in the item list. */ void addItem(T item); /** * Replaces the given Item {@code target} with {@code editedItem}. * {@code target} must exist in the item list. * The Item identity of {@code editedItem} must not be the same as another existing Item in the item list. */ void setItem(T target, T editedItem); /** * Returns an unmodifiable view of the filtered Item list */ ObservableList<T> getFilteredItemList(); /** Returns an Item from the filtered Item list */ T getItemFromFilteredItemList(int index); /** Returns the observable Item List */ ObservableList<T> getItemList(); /** * Updates the filter of the filtered Item list to filter by the given {@code predicate}. * * @throws NullPointerException if {@code predicate} is null. */ void updateFilteredItemList(Predicate<? super T> predicate); /** Retrieves the size of the list. */ int getSize(); }
LikoGuan/code_guns
guns-admin/src/main/java/com/stylefeng/guns/modular/system/model/District.java
<reponame>LikoGuan/code_guns package com.stylefeng.guns.modular.system.model; import com.baomidou.mybatisplus.enums.IdType; import java.math.BigDecimal; import java.util.Date; import com.baomidou.mybatisplus.annotations.TableId; import com.baomidou.mybatisplus.annotations.TableField; import com.baomidou.mybatisplus.activerecord.Model; import java.io.Serializable; /** * <p> * * </p> * * @author stylefeng * @since 2018-10-11 */ public class District extends Model<District> { private static final long serialVersionUID = 1L; /** * 主键id */ @TableId(value = "id", type = IdType.AUTO) private Integer id; /** * 父id */ private Integer pid; /** * 地区名称 */ @TableField("district_name") private String districtName; /** * 简称 */ @TableField("short_name") private String shortName; /** * 地区级别(省为1级,依次内推) */ private Integer level; /** * 排序顺序 */ @TableField("sort_order") private Integer sortOrder; /** * 创建时间 */ @TableField("create_time") private Date createTime; /** * 更新时间 */ @TableField("update_time") private Date updateTime; /** * 经度 */ private BigDecimal longitude; /** * 纬度 */ private BigDecimal latitude; /** * 激活状态(1 启用 0 未启用) */ @TableField("is_activated") private Integer isActivated; public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } public Integer getPid() { return pid; } public void setPid(Integer pid) { this.pid = pid; } public String getDistrictName() { return districtName; } public void setDistrictName(String districtName) { this.districtName = districtName; } public String getShortName() { return shortName; } public void setShortName(String shortName) { this.shortName = shortName; } public Integer getLevel() { return level; } public void setLevel(Integer level) { this.level = level; } public Integer getSortOrder() { return sortOrder; } public void setSortOrder(Integer sortOrder) { this.sortOrder = sortOrder; } public Date getCreateTime() { return createTime; } public void setCreateTime(Date createTime) { this.createTime = createTime; } public Date getUpdateTime() { return updateTime; } public void setUpdateTime(Date updateTime) { this.updateTime = updateTime; } public BigDecimal getLongitude() { return longitude; } public void setLongitude(BigDecimal longitude) { this.longitude = longitude; } public BigDecimal getLatitude() { return latitude; } public void setLatitude(BigDecimal latitude) { this.latitude = latitude; } public Integer getIsActivated() { return isActivated; } public void setIsActivated(Integer isActivated) { this.isActivated = isActivated; } @Override protected Serializable pkVal() { return this.id; } @Override public String toString() { return "District{" + "id=" + id + ", pid=" + pid + ", districtName=" + districtName + ", shortName=" + shortName + ", level=" + level + ", sortOrder=" + sortOrder + ", createTime=" + createTime + ", updateTime=" + updateTime + ", longitude=" + longitude + ", latitude=" + latitude + ", isActivated=" + isActivated + "}"; } }
RUB-Informatik-im-Bauwesen/XPlanung-JAXB-Binding
src/net/opengis/gml/_3/AbstractFeatureType.java
<gh_stars>0 // // Diese Datei wurde mit der JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.8-b130911.1802 generiert // Siehe <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Änderungen an dieser Datei gehen bei einer Neukompilierung des Quellschemas verloren. // Generiert: 2018.06.04 um 02:29:48 PM CEST // package net.opengis.gml._3; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlSeeAlso; import javax.xml.bind.annotation.XmlType; import de.xplanung.xplangml._5._0.RPLegendenobjektType; import de.xplanung.xplangml._5._0.XPAbstraktesPraesentationsobjektType; import de.xplanung.xplangml._5._0.XPBegruendungAbschnittType; import de.xplanung.xplangml._5._0.XPBereichType; import de.xplanung.xplangml._5._0.XPObjektType; import de.xplanung.xplangml._5._0.XPPlanType; import de.xplanung.xplangml._5._0.XPRasterdarstellungType; import de.xplanung.xplangml._5._0.XPTextAbschnittType; /** * <p>Java-Klasse für AbstractFeatureType complex type. * * <p>Das folgende Schemafragment gibt den erwarteten Content an, der in dieser Klasse enthalten ist. * * <pre> * &lt;complexType name="AbstractFeatureType"> * &lt;complexContent> * &lt;extension base="{http://www.opengis.net/gml/3.2}AbstractGMLType"> * &lt;sequence> * &lt;element ref="{http://www.opengis.net/gml/3.2}boundedBy" minOccurs="0"/> * &lt;/sequence> * &lt;/extension> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "AbstractFeatureType", propOrder = { "boundedBy" }) @XmlSeeAlso({ XPRasterdarstellungType.class, XPTextAbschnittType.class, XPBereichType.class, XPBegruendungAbschnittType.class, XPPlanType.class, AbstractFeatureCollectionType.class, XPAbstraktesPraesentationsobjektType.class, RPLegendenobjektType.class, XPObjektType.class, AbstractCoverageType.class }) public abstract class AbstractFeatureType extends AbstractGMLType { @XmlElement(nillable = true) protected BoundingShapeType boundedBy; /** * Ruft den Wert der boundedBy-Eigenschaft ab. * * @return * possible object is * {@link BoundingShapeType } * */ public BoundingShapeType getBoundedBy() { return boundedBy; } /** * Legt den Wert der boundedBy-Eigenschaft fest. * * @param value * allowed object is * {@link BoundingShapeType } * */ public void setBoundedBy(BoundingShapeType value) { this.boundedBy = value; } }
Slynchy/March22-JS
src/engine/March22.js
<reponame>Slynchy/March22-JS<filename>src/engine/March22.js const ScriptCompiler = require('./ScriptCompiler.js'); const BackgroundHandler = require('./handlers/BackgroundHandler.js'); const CharacterHandler = require('./handlers/CharacterHandler.js'); const InputHandler = require('./handlers/InputHandler.js'); const CustomFunctionHandler = require('./handlers/CustomFunctionHandler.js'); const SceneHandler = require('./handlers/SceneHandler.js'); const AssetHandler = require('./handlers/AssetHandler.js'); const ScriptHandler = require('./handlers/ScriptHandler.js'); const EventHandler = require('./handlers/EventHandler.js'); /** * Singleton master class for all M22 functionality * * From all handlers, we assume global.M22 === the singleton instance, because * the handler shouldn't exist without the primary engine being created. * * ^^^ All of this is incorrect, TODO: update! */ class March22 { constructor() { this.ScriptCompiler = ScriptCompiler; this.BackgroundHandler = new BackgroundHandler(this); this.CharacterHandler = new CharacterHandler(this); this.InputHandler = new InputHandler(this); this.CustomFunctionHandler = new CustomFunctionHandler(this); this.SceneHandler = new SceneHandler(this); this.AssetHandler = new AssetHandler(this); this.ScriptHandler = new ScriptHandler(this); global.EventHandler = this.EventHandler = new EventHandler(this.SceneHandler.ticker); this._domElement = null; } get domElement() { if (!this._domElement) return this.SceneHandler.domElement; else { return this._domElement; } } addViewToDocument() { this._domElement = document.body.appendChild(this.domElement); window.addEventListener('resize', this.SceneHandler.resize); this.SceneHandler.resize(); } _loadScriptToActive(scriptName, onSuccess, onFail) { return this.ScriptCompiler.CompileScript( scriptName, data => { this.ScriptHandler.LoadScript(data); if (onSuccess) onSuccess(); }, onFail ); } /** * * @param {String} entrypoint Script name (w/o extensions) to start the engine with */ start(entrypoint) { if (!entrypoint) { entrypoint = 'START_SCRIPT'; } this._loadScriptToActive( entrypoint, () => { this._loadAssetsForScript( this.ScriptHandler.activeScript, assets => { // assets are loaded, start script this.ScriptHandler.activeScript.addAssets(assets); console.log(this.ScriptHandler.activeScript); this.ScriptHandler.NextLine(); this.SceneHandler.textBox.setupTextboxTextures( { narrative: this.ScriptHandler.activeScript.getTextbox('narrative').texture, dialogue: this.ScriptHandler.activeScript.getTextbox('dialogue').texture, novel: this.ScriptHandler.activeScript.getTextbox('novel').texture } ); this.SceneHandler.startLoop(); }, err => { throw new Error(err.reason); } ); }, err => { throw new Error(err.reason); } ); } /** * * @param {M22Script} scriptObj * @param {Function} onSuccess * @param {Function} onFail * @private */ _loadAssetsForScript(scriptObj, onSuccess, onFail) { // unload existing assets to clear memory // load new assets // run callback this.AssetHandler.loadAssetsFromScript(scriptObj, onSuccess, onFail); } } // if (!global.hasOwnProperty('M22')) { // global.M22 = new March22(); // } module.exports = March22;
hoangvlbk61/HUST-Final-project-2019B-TRMS
TRMS-Frontend/pages/data-display/list.js
<gh_stars>0 import Demo from '../../demos/antd/list/demo'; const DemoPage = () => <Demo />; export default DemoPage;
aliyun/dingtalk-sdk
dingtalk/java/src/main/java/com/aliyun/dingtalkbadge_1_0/models/CreateBadgeCodeUserInstanceResponseBody.java
// This file is auto-generated, don't edit it. Thanks. package com.aliyun.dingtalkbadge_1_0.models; import com.aliyun.tea.*; public class CreateBadgeCodeUserInstanceResponseBody extends TeaModel { // 码ID @NameInMap("codeId") public String codeId; // 码详情跳转地址 @NameInMap("codeDetailUrl") public String codeDetailUrl; public static CreateBadgeCodeUserInstanceResponseBody build(java.util.Map<String, ?> map) throws Exception { CreateBadgeCodeUserInstanceResponseBody self = new CreateBadgeCodeUserInstanceResponseBody(); return TeaModel.build(map, self); } public CreateBadgeCodeUserInstanceResponseBody setCodeId(String codeId) { this.codeId = codeId; return this; } public String getCodeId() { return this.codeId; } public CreateBadgeCodeUserInstanceResponseBody setCodeDetailUrl(String codeDetailUrl) { this.codeDetailUrl = codeDetailUrl; return this; } public String getCodeDetailUrl() { return this.codeDetailUrl; } }
xiaofan2406/ekko
docs/components/Navigation.js
<gh_stars>0 /* @flow */ import React from 'react'; import { css } from 'react-emotion'; import { NavLink } from 'react-router-dom'; import { theme, spacing } from 'styles'; import { NAV_LINKS } from 'utils/constants'; const cssNavigation = css` position: fixed; left: ${spacing.unit}px; bottom: ${spacing.unit}px; display: flex; flex-direction: column; border: 1px solid ${theme.borderColor}; & > .link { text-decoration: none; padding: 0.5em; display: inline-block; &:hover { background-color: ${theme.borderColor}; } &.active { border-right: 2px solid ${theme.primaryColor}; } } `; const Navigation = () => ( <div className={cssNavigation}> {Object.keys(NAV_LINKS).map(routeName => ( <NavLink className="link" activeClassName="active" key={NAV_LINKS[routeName].to} exact={NAV_LINKS[routeName].exact} to={NAV_LINKS[routeName].to} > {NAV_LINKS[routeName].name} </NavLink> ))} </div> ); export { Navigation as Component }; export default Navigation;
lmokto/ilovec
punteros/example2.c
<filename>punteros/example2.c<gh_stars>0 int main(void){ //Definamos estas variables: int x[100],b,*pa,*pb; //... x[50]=10; //Le asignamos el valor de 10, al array #50 pa=&x[50]; //Le asignamos al puntero pa, la direccion de memoria que tiene x[50] //Ahora mostramos algunas posibles operaciones: b = *pa+1; //Esto es como decir el valor que tiene el array de x[50] sumarle 1. //Esto es igual a: b=x[50]+1; => Su valor seria igual a 11. b = *(pa+1); //Esto primero pasa a la siguiente direccion de memoria y luego lo referencia //El resultado es: b = x[51]; pb = &x[10]; //al puntero pb se le asigna la direccion de x[10] *pb = 0; //Al valor que tiene el puntero se le asigna 0 //Esto es igual que decir: x[10] = 0 *pb += 2; //El valor del puntero se incrementa en dos unidades, es decir x[10] = 2 (*pb)--; //El valor del puntero se decrementa en una unidad. x[0] = *pb--; //A x[0] se le pasa el valor de x[10] y el puntero pb, pasa a apuntar a x[9] //recorda, que -- es post-incremento, primero asignara y luego restara. return 0; }
rromanchuk/xptools
src/Utils/RoadNetUtils.h
<filename>src/Utils/RoadNetUtils.h /* * Copyright (c) 2004, Laminar Research. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * */ #ifndef ROADNETUTILS_H #define ROADNETUTILS_H #include "XNetworkDefs.h" #include "XObjDefs.h" #include "ExtrudeFunc.h" bool LoadNetworkDefs( const char * inFileName, NetworkDef_t& outDefs, vector<string> * outDesiredObjs); void ExtrudeNetwork( const NetworkDef_t& inDefs, const NetworkData_t& inRoads, ExtrudeFunc_f extrudeFunc, ReceiveObj_f objReceiveFunc, void * inExtrudeRef, void * inObjReceiveRef); #endif
elimak/music-tag-extractor
node_modules/redux-devtools-themes/lib/index.js
<gh_stars>1-10 'use strict'; exports.__esModule = true; function _interopRequire(obj) { return obj && obj.__esModule ? obj['default'] : obj; } function _interopExportWildcard(obj, defaults) { var newObj = defaults({}, obj); delete newObj['default']; return newObj; } function _defaults(obj, defaults) { var keys = Object.getOwnPropertyNames(defaults); for (var i = 0; i < keys.length; i++) { var key = keys[i]; var value = Object.getOwnPropertyDescriptor(defaults, key); if (value && value.configurable && obj[key] === undefined) { Object.defineProperty(obj, key, value); } } return obj; } var _base16 = require('base16'); _defaults(exports, _interopExportWildcard(_base16, _defaults)); var _nicinabox = require('./nicinabox'); exports.nicinabox = _interopRequire(_nicinabox);
dmgerman/zephyrd3
ext/hal/nordic/nrfx_config_nrf52810.h
<filename>ext/hal/nordic/nrfx_config_nrf52810.h DECL|NRFX_CLOCK_CONFIG_DEBUG_COLOR|macro|NRFX_CLOCK_CONFIG_DEBUG_COLOR DECL|NRFX_CLOCK_CONFIG_INFO_COLOR|macro|NRFX_CLOCK_CONFIG_INFO_COLOR DECL|NRFX_CLOCK_CONFIG_IRQ_PRIORITY|macro|NRFX_CLOCK_CONFIG_IRQ_PRIORITY DECL|NRFX_CLOCK_CONFIG_LF_SRC|macro|NRFX_CLOCK_CONFIG_LF_SRC DECL|NRFX_CLOCK_CONFIG_LOG_ENABLED|macro|NRFX_CLOCK_CONFIG_LOG_ENABLED DECL|NRFX_CLOCK_CONFIG_LOG_LEVEL|macro|NRFX_CLOCK_CONFIG_LOG_LEVEL DECL|NRFX_CLOCK_ENABLED|macro|NRFX_CLOCK_ENABLED DECL|NRFX_COMP_CONFIG_DEBUG_COLOR|macro|NRFX_COMP_CONFIG_DEBUG_COLOR DECL|NRFX_COMP_CONFIG_HYST|macro|NRFX_COMP_CONFIG_HYST DECL|NRFX_COMP_CONFIG_INFO_COLOR|macro|NRFX_COMP_CONFIG_INFO_COLOR DECL|NRFX_COMP_CONFIG_INPUT|macro|NRFX_COMP_CONFIG_INPUT DECL|NRFX_COMP_CONFIG_IRQ_PRIORITY|macro|NRFX_COMP_CONFIG_IRQ_PRIORITY DECL|NRFX_COMP_CONFIG_ISOURCE|macro|NRFX_COMP_CONFIG_ISOURCE DECL|NRFX_COMP_CONFIG_LOG_ENABLED|macro|NRFX_COMP_CONFIG_LOG_ENABLED DECL|NRFX_COMP_CONFIG_LOG_LEVEL|macro|NRFX_COMP_CONFIG_LOG_LEVEL DECL|NRFX_COMP_CONFIG_MAIN_MODE|macro|NRFX_COMP_CONFIG_MAIN_MODE DECL|NRFX_COMP_CONFIG_REF|macro|NRFX_COMP_CONFIG_REF DECL|NRFX_COMP_CONFIG_SPEED_MODE|macro|NRFX_COMP_CONFIG_SPEED_MODE DECL|NRFX_COMP_ENABLED|macro|NRFX_COMP_ENABLED DECL|NRFX_CONFIG_NRF52810_H__|macro|NRFX_CONFIG_NRF52810_H__ DECL|NRFX_EGU_ENABLED|macro|NRFX_EGU_ENABLED DECL|NRFX_GPIOTE_CONFIG_DEBUG_COLOR|macro|NRFX_GPIOTE_CONFIG_DEBUG_COLOR DECL|NRFX_GPIOTE_CONFIG_INFO_COLOR|macro|NRFX_GPIOTE_CONFIG_INFO_COLOR DECL|NRFX_GPIOTE_CONFIG_IRQ_PRIORITY|macro|NRFX_GPIOTE_CONFIG_IRQ_PRIORITY DECL|NRFX_GPIOTE_CONFIG_LOG_ENABLED|macro|NRFX_GPIOTE_CONFIG_LOG_ENABLED DECL|NRFX_GPIOTE_CONFIG_LOG_LEVEL|macro|NRFX_GPIOTE_CONFIG_LOG_LEVEL DECL|NRFX_GPIOTE_CONFIG_NUM_OF_LOW_POWER_EVENTS|macro|NRFX_GPIOTE_CONFIG_NUM_OF_LOW_POWER_EVENTS DECL|NRFX_GPIOTE_ENABLED|macro|NRFX_GPIOTE_ENABLED DECL|NRFX_PDM_CONFIG_CLOCK_FREQ|macro|NRFX_PDM_CONFIG_CLOCK_FREQ DECL|NRFX_PDM_CONFIG_DEBUG_COLOR|macro|NRFX_PDM_CONFIG_DEBUG_COLOR DECL|NRFX_PDM_CONFIG_EDGE|macro|NRFX_PDM_CONFIG_EDGE DECL|NRFX_PDM_CONFIG_INFO_COLOR|macro|NRFX_PDM_CONFIG_INFO_COLOR DECL|NRFX_PDM_CONFIG_IRQ_PRIORITY|macro|NRFX_PDM_CONFIG_IRQ_PRIORITY DECL|NRFX_PDM_CONFIG_LOG_ENABLED|macro|NRFX_PDM_CONFIG_LOG_ENABLED DECL|NRFX_PDM_CONFIG_LOG_LEVEL|macro|NRFX_PDM_CONFIG_LOG_LEVEL DECL|NRFX_PDM_CONFIG_MODE|macro|NRFX_PDM_CONFIG_MODE DECL|NRFX_PDM_ENABLED|macro|NRFX_PDM_ENABLED DECL|NRFX_POWER_CONFIG_DEFAULT_DCDCENHV|macro|NRFX_POWER_CONFIG_DEFAULT_DCDCENHV DECL|NRFX_POWER_CONFIG_DEFAULT_DCDCEN|macro|NRFX_POWER_CONFIG_DEFAULT_DCDCEN DECL|NRFX_POWER_CONFIG_IRQ_PRIORITY|macro|NRFX_POWER_CONFIG_IRQ_PRIORITY DECL|NRFX_POWER_ENABLED|macro|NRFX_POWER_ENABLED DECL|NRFX_PPI_CONFIG_DEBUG_COLOR|macro|NRFX_PPI_CONFIG_DEBUG_COLOR DECL|NRFX_PPI_CONFIG_INFO_COLOR|macro|NRFX_PPI_CONFIG_INFO_COLOR DECL|NRFX_PPI_CONFIG_LOG_ENABLED|macro|NRFX_PPI_CONFIG_LOG_ENABLED DECL|NRFX_PPI_CONFIG_LOG_LEVEL|macro|NRFX_PPI_CONFIG_LOG_LEVEL DECL|NRFX_PPI_ENABLED|macro|NRFX_PPI_ENABLED DECL|NRFX_PRS_BOX_0_ENABLED|macro|NRFX_PRS_BOX_0_ENABLED DECL|NRFX_PRS_BOX_1_ENABLED|macro|NRFX_PRS_BOX_1_ENABLED DECL|NRFX_PRS_BOX_2_ENABLED|macro|NRFX_PRS_BOX_2_ENABLED DECL|NRFX_PRS_BOX_3_ENABLED|macro|NRFX_PRS_BOX_3_ENABLED DECL|NRFX_PRS_BOX_4_ENABLED|macro|NRFX_PRS_BOX_4_ENABLED DECL|NRFX_PRS_CONFIG_DEBUG_COLOR|macro|NRFX_PRS_CONFIG_DEBUG_COLOR DECL|NRFX_PRS_CONFIG_INFO_COLOR|macro|NRFX_PRS_CONFIG_INFO_COLOR DECL|NRFX_PRS_CONFIG_LOG_ENABLED|macro|NRFX_PRS_CONFIG_LOG_ENABLED DECL|NRFX_PRS_CONFIG_LOG_LEVEL|macro|NRFX_PRS_CONFIG_LOG_LEVEL DECL|NRFX_PRS_ENABLED|macro|NRFX_PRS_ENABLED DECL|NRFX_PWM0_ENABLED|macro|NRFX_PWM0_ENABLED DECL|NRFX_PWM_CONFIG_DEBUG_COLOR|macro|NRFX_PWM_CONFIG_DEBUG_COLOR DECL|NRFX_PWM_CONFIG_INFO_COLOR|macro|NRFX_PWM_CONFIG_INFO_COLOR DECL|NRFX_PWM_CONFIG_LOG_ENABLED|macro|NRFX_PWM_CONFIG_LOG_ENABLED DECL|NRFX_PWM_CONFIG_LOG_LEVEL|macro|NRFX_PWM_CONFIG_LOG_LEVEL DECL|NRFX_PWM_DEFAULT_CONFIG_BASE_CLOCK|macro|NRFX_PWM_DEFAULT_CONFIG_BASE_CLOCK DECL|NRFX_PWM_DEFAULT_CONFIG_COUNT_MODE|macro|NRFX_PWM_DEFAULT_CONFIG_COUNT_MODE DECL|NRFX_PWM_DEFAULT_CONFIG_IRQ_PRIORITY|macro|NRFX_PWM_DEFAULT_CONFIG_IRQ_PRIORITY DECL|NRFX_PWM_DEFAULT_CONFIG_LOAD_MODE|macro|NRFX_PWM_DEFAULT_CONFIG_LOAD_MODE DECL|NRFX_PWM_DEFAULT_CONFIG_OUT0_PIN|macro|NRFX_PWM_DEFAULT_CONFIG_OUT0_PIN DECL|NRFX_PWM_DEFAULT_CONFIG_OUT1_PIN|macro|NRFX_PWM_DEFAULT_CONFIG_OUT1_PIN DECL|NRFX_PWM_DEFAULT_CONFIG_OUT2_PIN|macro|NRFX_PWM_DEFAULT_CONFIG_OUT2_PIN DECL|NRFX_PWM_DEFAULT_CONFIG_OUT3_PIN|macro|NRFX_PWM_DEFAULT_CONFIG_OUT3_PIN DECL|NRFX_PWM_DEFAULT_CONFIG_STEP_MODE|macro|NRFX_PWM_DEFAULT_CONFIG_STEP_MODE DECL|NRFX_PWM_DEFAULT_CONFIG_TOP_VALUE|macro|NRFX_PWM_DEFAULT_CONFIG_TOP_VALUE DECL|NRFX_PWM_ENABLED|macro|NRFX_PWM_ENABLED DECL|NRFX_QDEC_CONFIG_DBFEN|macro|NRFX_QDEC_CONFIG_DBFEN DECL|NRFX_QDEC_CONFIG_DEBUG_COLOR|macro|NRFX_QDEC_CONFIG_DEBUG_COLOR DECL|NRFX_QDEC_CONFIG_INFO_COLOR|macro|NRFX_QDEC_CONFIG_INFO_COLOR DECL|NRFX_QDEC_CONFIG_IRQ_PRIORITY|macro|NRFX_QDEC_CONFIG_IRQ_PRIORITY DECL|NRFX_QDEC_CONFIG_LEDPOL|macro|NRFX_QDEC_CONFIG_LEDPOL DECL|NRFX_QDEC_CONFIG_LEDPRE|macro|NRFX_QDEC_CONFIG_LEDPRE DECL|NRFX_QDEC_CONFIG_LOG_ENABLED|macro|NRFX_QDEC_CONFIG_LOG_ENABLED DECL|NRFX_QDEC_CONFIG_LOG_LEVEL|macro|NRFX_QDEC_CONFIG_LOG_LEVEL DECL|NRFX_QDEC_CONFIG_PIO_A|macro|NRFX_QDEC_CONFIG_PIO_A DECL|NRFX_QDEC_CONFIG_PIO_B|macro|NRFX_QDEC_CONFIG_PIO_B DECL|NRFX_QDEC_CONFIG_PIO_LED|macro|NRFX_QDEC_CONFIG_PIO_LED DECL|NRFX_QDEC_CONFIG_REPORTPER|macro|NRFX_QDEC_CONFIG_REPORTPER DECL|NRFX_QDEC_CONFIG_SAMPLEPER|macro|NRFX_QDEC_CONFIG_SAMPLEPER DECL|NRFX_QDEC_CONFIG_SAMPLE_INTEN|macro|NRFX_QDEC_CONFIG_SAMPLE_INTEN DECL|NRFX_QDEC_ENABLED|macro|NRFX_QDEC_ENABLED DECL|NRFX_RNG_CONFIG_DEBUG_COLOR|macro|NRFX_RNG_CONFIG_DEBUG_COLOR DECL|NRFX_RNG_CONFIG_ERROR_CORRECTION|macro|NRFX_RNG_CONFIG_ERROR_CORRECTION DECL|NRFX_RNG_CONFIG_INFO_COLOR|macro|NRFX_RNG_CONFIG_INFO_COLOR DECL|NRFX_RNG_CONFIG_IRQ_PRIORITY|macro|NRFX_RNG_CONFIG_IRQ_PRIORITY DECL|NRFX_RNG_CONFIG_LOG_ENABLED|macro|NRFX_RNG_CONFIG_LOG_ENABLED DECL|NRFX_RNG_CONFIG_LOG_LEVEL|macro|NRFX_RNG_CONFIG_LOG_LEVEL DECL|NRFX_RNG_ENABLED|macro|NRFX_RNG_ENABLED DECL|NRFX_RTC0_ENABLED|macro|NRFX_RTC0_ENABLED DECL|NRFX_RTC1_ENABLED|macro|NRFX_RTC1_ENABLED DECL|NRFX_RTC_CONFIG_DEBUG_COLOR|macro|NRFX_RTC_CONFIG_DEBUG_COLOR DECL|NRFX_RTC_CONFIG_INFO_COLOR|macro|NRFX_RTC_CONFIG_INFO_COLOR DECL|NRFX_RTC_CONFIG_LOG_ENABLED|macro|NRFX_RTC_CONFIG_LOG_ENABLED DECL|NRFX_RTC_CONFIG_LOG_LEVEL|macro|NRFX_RTC_CONFIG_LOG_LEVEL DECL|NRFX_RTC_DEFAULT_CONFIG_FREQUENCY|macro|NRFX_RTC_DEFAULT_CONFIG_FREQUENCY DECL|NRFX_RTC_DEFAULT_CONFIG_IRQ_PRIORITY|macro|NRFX_RTC_DEFAULT_CONFIG_IRQ_PRIORITY DECL|NRFX_RTC_DEFAULT_CONFIG_RELIABLE|macro|NRFX_RTC_DEFAULT_CONFIG_RELIABLE DECL|NRFX_RTC_ENABLED|macro|NRFX_RTC_ENABLED DECL|NRFX_RTC_MAXIMUM_LATENCY_US|macro|NRFX_RTC_MAXIMUM_LATENCY_US DECL|NRFX_SAADC_CONFIG_DEBUG_COLOR|macro|NRFX_SAADC_CONFIG_DEBUG_COLOR DECL|NRFX_SAADC_CONFIG_INFO_COLOR|macro|NRFX_SAADC_CONFIG_INFO_COLOR DECL|NRFX_SAADC_CONFIG_IRQ_PRIORITY|macro|NRFX_SAADC_CONFIG_IRQ_PRIORITY DECL|NRFX_SAADC_CONFIG_LOG_ENABLED|macro|NRFX_SAADC_CONFIG_LOG_ENABLED DECL|NRFX_SAADC_CONFIG_LOG_LEVEL|macro|NRFX_SAADC_CONFIG_LOG_LEVEL DECL|NRFX_SAADC_CONFIG_LP_MODE|macro|NRFX_SAADC_CONFIG_LP_MODE DECL|NRFX_SAADC_CONFIG_OVERSAMPLE|macro|NRFX_SAADC_CONFIG_OVERSAMPLE DECL|NRFX_SAADC_CONFIG_RESOLUTION|macro|NRFX_SAADC_CONFIG_RESOLUTION DECL|NRFX_SAADC_ENABLED|macro|NRFX_SAADC_ENABLED DECL|NRFX_SPIM0_ENABLED|macro|NRFX_SPIM0_ENABLED DECL|NRFX_SPIM_CONFIG_DEBUG_COLOR|macro|NRFX_SPIM_CONFIG_DEBUG_COLOR DECL|NRFX_SPIM_CONFIG_INFO_COLOR|macro|NRFX_SPIM_CONFIG_INFO_COLOR DECL|NRFX_SPIM_CONFIG_LOG_ENABLED|macro|NRFX_SPIM_CONFIG_LOG_ENABLED DECL|NRFX_SPIM_CONFIG_LOG_LEVEL|macro|NRFX_SPIM_CONFIG_LOG_LEVEL DECL|NRFX_SPIM_DEFAULT_CONFIG_IRQ_PRIORITY|macro|NRFX_SPIM_DEFAULT_CONFIG_IRQ_PRIORITY DECL|NRFX_SPIM_ENABLED|macro|NRFX_SPIM_ENABLED DECL|NRFX_SPIM_MISO_PULL_CFG|macro|NRFX_SPIM_MISO_PULL_CFG DECL|NRFX_SPIS0_ENABLED|macro|NRFX_SPIS0_ENABLED DECL|NRFX_SPIS_CONFIG_DEBUG_COLOR|macro|NRFX_SPIS_CONFIG_DEBUG_COLOR DECL|NRFX_SPIS_CONFIG_INFO_COLOR|macro|NRFX_SPIS_CONFIG_INFO_COLOR DECL|NRFX_SPIS_CONFIG_LOG_ENABLED|macro|NRFX_SPIS_CONFIG_LOG_ENABLED DECL|NRFX_SPIS_CONFIG_LOG_LEVEL|macro|NRFX_SPIS_CONFIG_LOG_LEVEL DECL|NRFX_SPIS_DEFAULT_CONFIG_IRQ_PRIORITY|macro|NRFX_SPIS_DEFAULT_CONFIG_IRQ_PRIORITY DECL|NRFX_SPIS_DEFAULT_DEF|macro|NRFX_SPIS_DEFAULT_DEF DECL|NRFX_SPIS_DEFAULT_ORC|macro|NRFX_SPIS_DEFAULT_ORC DECL|NRFX_SPIS_ENABLED|macro|NRFX_SPIS_ENABLED DECL|NRFX_SWI0_DISABLED|macro|NRFX_SWI0_DISABLED DECL|NRFX_SWI1_DISABLED|macro|NRFX_SWI1_DISABLED DECL|NRFX_SWI2_DISABLED|macro|NRFX_SWI2_DISABLED DECL|NRFX_SWI3_DISABLED|macro|NRFX_SWI3_DISABLED DECL|NRFX_SWI4_DISABLED|macro|NRFX_SWI4_DISABLED DECL|NRFX_SWI5_DISABLED|macro|NRFX_SWI5_DISABLED DECL|NRFX_SWI_CONFIG_DEBUG_COLOR|macro|NRFX_SWI_CONFIG_DEBUG_COLOR DECL|NRFX_SWI_CONFIG_INFO_COLOR|macro|NRFX_SWI_CONFIG_INFO_COLOR DECL|NRFX_SWI_CONFIG_LOG_ENABLED|macro|NRFX_SWI_CONFIG_LOG_ENABLED DECL|NRFX_SWI_CONFIG_LOG_LEVEL|macro|NRFX_SWI_CONFIG_LOG_LEVEL DECL|NRFX_SWI_ENABLED|macro|NRFX_SWI_ENABLED DECL|NRFX_SYSTICK_ENABLED|macro|NRFX_SYSTICK_ENABLED DECL|NRFX_TIMER0_ENABLED|macro|NRFX_TIMER0_ENABLED DECL|NRFX_TIMER1_ENABLED|macro|NRFX_TIMER1_ENABLED DECL|NRFX_TIMER2_ENABLED|macro|NRFX_TIMER2_ENABLED DECL|NRFX_TIMER_CONFIG_DEBUG_COLOR|macro|NRFX_TIMER_CONFIG_DEBUG_COLOR DECL|NRFX_TIMER_CONFIG_INFO_COLOR|macro|NRFX_TIMER_CONFIG_INFO_COLOR DECL|NRFX_TIMER_CONFIG_LOG_ENABLED|macro|NRFX_TIMER_CONFIG_LOG_ENABLED DECL|NRFX_TIMER_CONFIG_LOG_LEVEL|macro|NRFX_TIMER_CONFIG_LOG_LEVEL DECL|NRFX_TIMER_DEFAULT_CONFIG_BIT_WIDTH|macro|NRFX_TIMER_DEFAULT_CONFIG_BIT_WIDTH DECL|NRFX_TIMER_DEFAULT_CONFIG_FREQUENCY|macro|NRFX_TIMER_DEFAULT_CONFIG_FREQUENCY DECL|NRFX_TIMER_DEFAULT_CONFIG_IRQ_PRIORITY|macro|NRFX_TIMER_DEFAULT_CONFIG_IRQ_PRIORITY DECL|NRFX_TIMER_DEFAULT_CONFIG_MODE|macro|NRFX_TIMER_DEFAULT_CONFIG_MODE DECL|NRFX_TIMER_ENABLED|macro|NRFX_TIMER_ENABLED DECL|NRFX_TWIM0_ENABLED|macro|NRFX_TWIM0_ENABLED DECL|NRFX_TWIM_CONFIG_DEBUG_COLOR|macro|NRFX_TWIM_CONFIG_DEBUG_COLOR DECL|NRFX_TWIM_CONFIG_INFO_COLOR|macro|NRFX_TWIM_CONFIG_INFO_COLOR DECL|NRFX_TWIM_CONFIG_LOG_ENABLED|macro|NRFX_TWIM_CONFIG_LOG_ENABLED DECL|NRFX_TWIM_CONFIG_LOG_LEVEL|macro|NRFX_TWIM_CONFIG_LOG_LEVEL DECL|NRFX_TWIM_DEFAULT_CONFIG_FREQUENCY|macro|NRFX_TWIM_DEFAULT_CONFIG_FREQUENCY DECL|NRFX_TWIM_DEFAULT_CONFIG_HOLD_BUS_UNINIT|macro|NRFX_TWIM_DEFAULT_CONFIG_HOLD_BUS_UNINIT DECL|NRFX_TWIM_DEFAULT_CONFIG_IRQ_PRIORITY|macro|NRFX_TWIM_DEFAULT_CONFIG_IRQ_PRIORITY DECL|NRFX_TWIM_ENABLED|macro|NRFX_TWIM_ENABLED DECL|NRFX_TWIS0_ENABLED|macro|NRFX_TWIS0_ENABLED DECL|NRFX_TWIS_ASSUME_INIT_AFTER_RESET_ONLY|macro|NRFX_TWIS_ASSUME_INIT_AFTER_RESET_ONLY DECL|NRFX_TWIS_CONFIG_DEBUG_COLOR|macro|NRFX_TWIS_CONFIG_DEBUG_COLOR DECL|NRFX_TWIS_CONFIG_INFO_COLOR|macro|NRFX_TWIS_CONFIG_INFO_COLOR DECL|NRFX_TWIS_CONFIG_LOG_ENABLED|macro|NRFX_TWIS_CONFIG_LOG_ENABLED DECL|NRFX_TWIS_CONFIG_LOG_LEVEL|macro|NRFX_TWIS_CONFIG_LOG_LEVEL DECL|NRFX_TWIS_DEFAULT_CONFIG_ADDR0|macro|NRFX_TWIS_DEFAULT_CONFIG_ADDR0 DECL|NRFX_TWIS_DEFAULT_CONFIG_ADDR1|macro|NRFX_TWIS_DEFAULT_CONFIG_ADDR1 DECL|NRFX_TWIS_DEFAULT_CONFIG_IRQ_PRIORITY|macro|NRFX_TWIS_DEFAULT_CONFIG_IRQ_PRIORITY DECL|NRFX_TWIS_DEFAULT_CONFIG_SCL_PULL|macro|NRFX_TWIS_DEFAULT_CONFIG_SCL_PULL DECL|NRFX_TWIS_DEFAULT_CONFIG_SDA_PULL|macro|NRFX_TWIS_DEFAULT_CONFIG_SDA_PULL DECL|NRFX_TWIS_ENABLED|macro|NRFX_TWIS_ENABLED DECL|NRFX_TWIS_NO_SYNC_MODE|macro|NRFX_TWIS_NO_SYNC_MODE DECL|NRFX_UARTE0_ENABLED|macro|NRFX_UARTE0_ENABLED DECL|NRFX_UARTE_CONFIG_DEBUG_COLOR|macro|NRFX_UARTE_CONFIG_DEBUG_COLOR DECL|NRFX_UARTE_CONFIG_INFO_COLOR|macro|NRFX_UARTE_CONFIG_INFO_COLOR DECL|NRFX_UARTE_CONFIG_LOG_ENABLED|macro|NRFX_UARTE_CONFIG_LOG_ENABLED DECL|NRFX_UARTE_CONFIG_LOG_LEVEL|macro|NRFX_UARTE_CONFIG_LOG_LEVEL DECL|NRFX_UARTE_DEFAULT_CONFIG_BAUDRATE|macro|NRFX_UARTE_DEFAULT_CONFIG_BAUDRATE DECL|NRFX_UARTE_DEFAULT_CONFIG_HWFC|macro|NRFX_UARTE_DEFAULT_CONFIG_HWFC DECL|NRFX_UARTE_DEFAULT_CONFIG_IRQ_PRIORITY|macro|NRFX_UARTE_DEFAULT_CONFIG_IRQ_PRIORITY DECL|NRFX_UARTE_DEFAULT_CONFIG_PARITY|macro|NRFX_UARTE_DEFAULT_CONFIG_PARITY DECL|NRFX_UARTE_ENABLED|macro|NRFX_UARTE_ENABLED DECL|NRFX_WDT_CONFIG_BEHAVIOUR|macro|NRFX_WDT_CONFIG_BEHAVIOUR DECL|NRFX_WDT_CONFIG_DEBUG_COLOR|macro|NRFX_WDT_CONFIG_DEBUG_COLOR DECL|NRFX_WDT_CONFIG_INFO_COLOR|macro|NRFX_WDT_CONFIG_INFO_COLOR DECL|NRFX_WDT_CONFIG_IRQ_PRIORITY|macro|NRFX_WDT_CONFIG_IRQ_PRIORITY DECL|NRFX_WDT_CONFIG_LOG_ENABLED|macro|NRFX_WDT_CONFIG_LOG_ENABLED DECL|NRFX_WDT_CONFIG_LOG_LEVEL|macro|NRFX_WDT_CONFIG_LOG_LEVEL DECL|NRFX_WDT_CONFIG_RELOAD_VALUE|macro|NRFX_WDT_CONFIG_RELOAD_VALUE DECL|NRFX_WDT_ENABLED|macro|NRFX_WDT_ENABLED
aafulei/leetcode
c++/0876-middle-of-the-linked-list.cpp
<filename>c++/0876-middle-of-the-linked-list.cpp // 22/03/16 = Wed // 876. Middle of the Linked List [Easy] // Given the head of a singly linked list, return the middle node of the linked // list. // If there are two middle nodes, return the second middle node. // Example 1: // Input: head = [1,2,3,4,5] // Output: [3,4,5] // Explanation: The middle node of the list is node 3. // Example 2: // Input: head = [1,2,3,4,5,6] // Output: [4,5,6] // Explanation: Since the list has two middle nodes with values 3 and 4, we // return the second one. // Constraints: // The number of nodes in the list is in the range [1, 100]. // 1 <= Node.val <= 100 // Related Topics: // [Linked List] [Two Pointers] class Solution { public: ListNode *middleNode(ListNode *head) { ListNode *slow = head; for (ListNode *fast = head; fast; slow = slow->next, fast = fast->next) { if (!(fast = fast->next)) { break; } } return slow; } };
IdrisDose/Nucleus
src/main/java/io/github/nucleuspowered/nucleus/modules/message/commands/ReplyCommand.java
<gh_stars>0 /* * This file is part of Nucleus, licensed under the MIT License (MIT). See the LICENSE.txt file * at the root of this project for more details. */ package io.github.nucleuspowered.nucleus.modules.message.commands; import com.google.inject.Inject; import io.github.nucleuspowered.nucleus.Util; import io.github.nucleuspowered.nucleus.internal.annotations.ConfigCommandAlias; import io.github.nucleuspowered.nucleus.internal.annotations.Permissions; import io.github.nucleuspowered.nucleus.internal.annotations.RegisterCommand; import io.github.nucleuspowered.nucleus.internal.annotations.RunAsync; import io.github.nucleuspowered.nucleus.internal.permissions.SuggestedLevel; import io.github.nucleuspowered.nucleus.modules.message.handlers.MessageHandler; import org.spongepowered.api.command.CommandResult; import org.spongepowered.api.command.CommandSource; import org.spongepowered.api.command.args.CommandContext; import org.spongepowered.api.command.args.CommandElement; import org.spongepowered.api.command.args.GenericArguments; import org.spongepowered.api.entity.living.player.Player; import org.spongepowered.api.text.Text; /** * Replies to the last player who sent a message. */ @Permissions(mainOverride = "message", suggestedLevel = SuggestedLevel.USER) @RunAsync @ConfigCommandAlias(value = "message", generate = false) @RegisterCommand({"reply", "r"}) public class ReplyCommand extends io.github.nucleuspowered.nucleus.internal.command.AbstractCommand<CommandSource> { private final String message = "message"; @Inject private MessageHandler handler; @Override public CommandElement[] getArguments() { return new CommandElement[] {GenericArguments.onlyOne(GenericArguments.remainingJoinedStrings(Text.of(message)))}; } @Override public CommandResult executeCommand(CommandSource src, CommandContext args) throws Exception { if (alertOnAfk()) { handler.getPlayerToReplyTo(Util.getUUID(src)).ifPresent(x -> { if (x instanceof Player && isAfk((Player)x)) { sendAfkMessage(src, (Player)x); } }); } boolean b = handler.replyMessage(src, args.<String>getOne(message).get()); return b ? CommandResult.success() : CommandResult.empty(); } }
javg15/sirh_backend
app/models/user.model.js
module.exports = (sequelize, DataTypes) => { return sequelize.define('usuarios', { id: { autoIncrement: true, type: DataTypes.INTEGER, allowNull: false, primaryKey: true }, username: { type: DataTypes.STRING(10), allowNull: false }, pass: { type: DataTypes.STRING(100), allowNull: false }, u_passenc: { type: DataTypes.STRING(100), allowNull: true }, perfil: { type: DataTypes.DOUBLE, allowNull: true }, nombre: { type: DataTypes.STRING(100), allowNull: true }, numemp: { type: DataTypes.STRING(6), allowNull: true }, created_at: { type: DataTypes.DATE, allowNull: true }, updated_at: { type: DataTypes.DATE, allowNull: true }, id_permgrupos: { type: DataTypes.INTEGER, allowNull: true }, id_usuarios_r: { type: DataTypes.INTEGER, allowNull: true }, state: { type: DataTypes.CHAR(1), allowNull: true, defaultValue: "A" }, email: { type: DataTypes.STRING, allowNull: true }, id_archivos_avatar: { type: DataTypes.INTEGER, allowNull: true }, }, { sequelize, tableName: 'usuarios', schema: 'public', //timestamps: false createdAt: 'created_at', updatedAt: 'updated_at', }); };
PetroTsapei/Student-Reminder
student-reminder-web/src/stores/GroupStore.js
<reponame>PetroTsapei/Student-Reminder import { observable, action } from 'mobx'; import GroupApi from '../api/groups'; import { authStore } from '../stores/AuthStore'; import handleError from '../helpers/handleError'; export class GroupStore { @observable groupList = []; @observable closeModal = false; @action async getAll() { try { this.groupList = await GroupApi.getAll(authStore.token); } catch (error) { handleError(error); } finally { } } @action async create(data) { try { const result = await GroupApi.create({ token: authStore.token, ...data }); this.closeModal = true; this.groupList.push(result.group_info); } catch (error) { handleError(error); } finally { this.closeModal = false; } } @action async update(groupId, data) { try { const result = await GroupApi.updateById(authStore.token, groupId, data); this.closeModal = true; this.groupList = this.groupList.map(el => el._id === groupId ? result : el); } catch (error) { handleError(error); } finally { this.closeModal = false; } } @action async delete(id) { try { await GroupApi.delete(authStore.token, id); this.groupList = this.groupList.filter(e => e._id !== id); } catch (error) { handleError(error); } } @action setListToInitState() { this.groupList = []; } } export const groupStore = new GroupStore();
Oneflow-Inc/Oneflow-Model-Compression
model_compress/distil/examples/teacher_bert/task_teacher.py
<filename>model_compress/distil/examples/teacher_bert/task_teacher.py """ Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import math import numpy as np import oneflow as flow import sys curPath = os.path.abspath(os.path.dirname(__file__)) rootPath = os.path.split(curPath)[0] sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "./src"))) from classifier import GlueBERT from util import Snapshot, Summary, InitNodes, Metric, CreateOptimizer, GetFunctionConfig, getdirsize, remove_optimizer_params, remove_teacher_params import config as configs from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score import argparse import time import json import shutil def str2bool(v): if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Unsupported value encountered.') parser = configs.get_parser() parser.add_argument("--task_name", type=str, default='CoLA') parser.add_argument('--num_epochs', type=int, default=3, help='number of epochs') parser.add_argument("--train_data_dir", type=str, default=None) parser.add_argument("--train_data_prefix", type=str, default='train.of_record-') parser.add_argument("--train_example_num", type=int, default=88614, help="example number in dataset") parser.add_argument("--batch_size_per_device", type=int, default=32) parser.add_argument("--train_data_part_num", type=int, default=1, help="data part number in dataset") parser.add_argument("--eval_data_dir", type=str, default=None) parser.add_argument("--eval_data_prefix", type=str, default='eval.of_record-') parser.add_argument("--eval_example_num", type=int, default=10833, help="example number in dataset") parser.add_argument("--eval_batch_size_per_device", type=int, default=64) parser.add_argument("--eval_data_part_num", type=int, default=1, help="data part number in dataset") parser.add_argument("--result_dir", type=str, default="", help="the save directory of results") parser.add_argument('--serve_for_online', type=str2bool, nargs='?', const=False, help='if True, then after training, will delete the teacher params and optimizer parmas from model_save_dir') args = parser.parse_args() task_name = args.task_name.lower() batch_size = args.num_nodes * args.gpu_num_per_node * args.batch_size_per_device eval_batch_size = args.num_nodes * args.gpu_num_per_node * args.eval_batch_size_per_device epoch_size = math.ceil(args.train_example_num / batch_size) num_eval_steps = math.ceil(args.eval_example_num / eval_batch_size) args.iter_num = epoch_size * args.num_epochs configs.print_args(args) glue_output_modes = { "cola": "classification", "mnli": "classification", "mnli-mm": "classification", "mrpc": "classification", "sst-2": "classification", "sts-b": "regression", "qqp": "classification", "qnli": "classification", "rte": "classification", "wnli": "classification", } acc_tasks = ["mnli", "mrpc", "sst-2", "qqp", "qnli", "rte"] corr_tasks = ["sts-b"] mcc_tasks = ["cola"] def BertDecoder( data_dir, batch_size, data_part_num, seq_length, part_name_prefix, shuffle=True ): with flow.scope.placement("cpu", "0:0"): ofrecord = flow.data.ofrecord_reader(data_dir, batch_size=batch_size, data_part_num=data_part_num, part_name_prefix=part_name_prefix, random_shuffle=shuffle, shuffle_after_epoch=shuffle) blob_confs = {} def _blob_conf(name, shape, dtype=flow.int32): blob_confs[name] = flow.data.OFRecordRawDecoder(ofrecord, name, shape=shape, dtype=dtype) _blob_conf("input_ids", [seq_length]) _blob_conf("input_mask", [seq_length]) _blob_conf("segment_ids", [seq_length]) _blob_conf("label_ids", [1]) _blob_conf("is_real_example", [1]) return blob_confs def BuildBert( batch_size, data_part_num, data_dir, part_name_prefix, shuffle=True ): hidden_size = 64 * args.num_attention_heads # , H = 64, size per head intermediate_size = hidden_size * 4 # intermediate_size=1200 decoders = BertDecoder( data_dir, batch_size, data_part_num, args.seq_length, part_name_prefix, shuffle=shuffle ) #is_real_example = decoders['is_real_example'] loss, logits = GlueBERT( decoders['input_ids'], decoders['input_mask'], decoders['segment_ids'], decoders['label_ids'], args.vocab_size, seq_length=args.seq_length, hidden_size=hidden_size, num_hidden_layers=args.num_hidden_layers, num_attention_heads=args.num_attention_heads, intermediate_size=intermediate_size, hidden_act="gelu", hidden_dropout_prob=args.hidden_dropout_prob, attention_probs_dropout_prob=args.attention_probs_dropout_prob, max_position_embeddings=args.max_position_embeddings, type_vocab_size=args.type_vocab_size, initializer_range=0.02, ) return loss, logits, decoders['label_ids'] @flow.global_function(type='train', function_config=GetFunctionConfig(args)) def BertGlueFinetuneJob(): loss, logits, _ = BuildBert( batch_size, args.train_data_part_num, args.train_data_dir, args.train_data_prefix, ) flow.losses.add_loss(loss) opt = CreateOptimizer(args) opt.minimize(loss) return {'loss': loss} @flow.global_function(type='predict', function_config=GetFunctionConfig(args)) def BertGlueEvalTrainJob(): _, logits, label_ids = BuildBert( batch_size, args.train_data_part_num, args.train_data_dir, args.train_data_prefix, shuffle=False ) return logits, label_ids @flow.global_function(type='predict', function_config=GetFunctionConfig(args)) def BertGlueEvalValJob(): #8551 or 1042 _, logits, label_ids = BuildBert( eval_batch_size, args.eval_data_part_num, args.eval_data_dir, args.eval_data_prefix, shuffle=False ) return logits, label_ids def run_eval_job(eval_job_func, num_steps, desc='train'): labels = [] predictions = [] start_time = time.time() for index in range(num_steps): logits, label = eval_job_func().get() predictions.extend(list(logits.numpy().argmax(axis=1))) labels.extend(list(label)) end_time = time.time() cost_time = end_time-start_time print('cost time: {} s'.format(cost_time)) model_size = getdirsize(args.model_save_dir) print('model_size: %d Mbytes' % (model_size/1024/1024)) # Mbytes accuracy = accuracy_score(labels, predictions) mcc = matthews_corrcoef(labels, predictions) precision = precision_score(labels, predictions) recall = recall_score(labels, predictions) f_1 = f1_score(labels, predictions) save_dict = {"accuracy":"%.2f" % accuracy, "MCC":"%.2f" % mcc, "precision": "%.2f" % precision, "recall": "%.2f" % recall, "f_1": "%.2f" % f_1, "modelSize":"%d" % (model_size/1024/1024), "reasoningTime":"%.2f" % (args.eval_example_num / cost_time)} # sample/second if args.result_dir == "": args.result_dir = args.model_save_dir if not os.path.exists(args.result_dir): os.makedirs(args.result_dir) with open(os.path.join(args.result_dir, 'results_{}.json'.format(desc)), "w") as f: json.dump(save_dict, f) def metric_fn(predictions, labels): return { "accuracy": accuracy, "matthews_corrcoef": mcc, "precision": precision, "recall": recall, "f1": f_1, } metric_dict = metric_fn(predictions, labels) print(desc, ', '.join('{}: {:.3f}'.format(k, v) for k, v in metric_dict.items())) #pd.DataFrame({'predictions': predictions, 'labels': labels}).to_csv('predictions_{0}.csv'.format(step), index=False) return metric_dict def main(): flow.config.gpu_device_num(args.gpu_num_per_node) flow.env.log_dir(args.log_dir) InitNodes(args) if args.do_train: snapshot = Snapshot(args.model_save_dir, args.model_load_dir) summary = Summary(args.log_dir, args) best_dev_acc = 0.0 best_result = {} for epoch in range(args.num_epochs): metric = Metric(desc='finetune', print_steps=args.loss_print_every_n_iter, summary=summary, batch_size=batch_size, keys=['loss']) for step in range(epoch_size): BertGlueFinetuneJob().async_get(metric.metric_cb(step, epoch=epoch)) #if 1: #step % args.loss_print_every_n_iter == 0: run_eval_job(BertGlueEvalTrainJob, epoch_size, desc='train') result = run_eval_job(BertGlueEvalValJob, num_eval_steps, desc='eval') save_model = False if task_name in acc_tasks and result['accuracy'] > best_dev_acc: best_dev_acc = result['accuracy'] best_result = result save_model = True print('Best result:', result) # if task_name in corr_tasks and result['corr'] > best_dev_acc: # best_dev_acc = result['corr'] # best_result = result # save_model = True #print('Best result:', result) if task_name in mcc_tasks and result['matthews_corrcoef'] > best_dev_acc: best_dev_acc = result['matthews_corrcoef'] best_result = result save_model = True print('Best result:', result) if save_model: if not os.path.exists(args.model_save_dir): os.makedirs(args.model_save_dir) # snapshot_save_path = os.path.join(args.model_save_dir) # print("Saving best model to {}".format(snapshot_save_path)) snapshot.save('best') flow.sync_default_session() print('Best result:',best_result ) print("Saving best model to "+os.path.join(args.model_save_dir,'snapshot_best')) if args.serve_for_online: print('Deleting the optimizer parmas from model_save_dir...') remove_optimizer_params(os.path.join(args.model_save_dir,'snapshot_best')) # if args.save_last_snapshot: # snapshot.save("last_snapshot") if args.do_eval: print('Loading model...') print(args.model_save_dir) if not args.do_train: check_point = flow.train.CheckPoint() check_point.load(args.model_save_dir) print('Evaluation...') run_eval_job(BertGlueEvalValJob, num_eval_steps, desc='eval') if __name__ == "__main__": main()
mtunganati/oneops
crawler/src/generated-sources/java/com/oneops/crawler/jooq/cms/routines/CmsAcquireLock.java
/* * This file is generated by jOOQ. */ package com.oneops.crawler.jooq.cms.routines; import com.oneops.crawler.jooq.cms.Kloopzcm; import javax.annotation.Generated; import org.jooq.Field; import org.jooq.Parameter; import org.jooq.impl.AbstractRoutine; /** * This class is generated by jOOQ. */ @Generated( value = { "http://www.jooq.org", "jOOQ version:3.10.0" }, comments = "This class is generated by jOOQ" ) @SuppressWarnings({ "all", "unchecked", "rawtypes" }) public class CmsAcquireLock extends AbstractRoutine<Boolean> { private static final long serialVersionUID = -49988601; /** * The parameter <code>kloopzcm.cms_acquire_lock.RETURN_VALUE</code>. */ public static final Parameter<Boolean> RETURN_VALUE = createParameter("RETURN_VALUE", org.jooq.impl.SQLDataType.BOOLEAN, false, false); /** * The parameter <code>kloopzcm.cms_acquire_lock.p_lock_name</code>. */ public static final Parameter<String> P_LOCK_NAME = createParameter("p_lock_name", org.jooq.impl.SQLDataType.VARCHAR, false, false); /** * The parameter <code>kloopzcm.cms_acquire_lock.p_locked_by</code>. */ public static final Parameter<String> P_LOCKED_BY = createParameter("p_locked_by", org.jooq.impl.SQLDataType.VARCHAR, false, false); /** * The parameter <code>kloopzcm.cms_acquire_lock.p_stale_timeout</code>. */ public static final Parameter<Integer> P_STALE_TIMEOUT = createParameter("p_stale_timeout", org.jooq.impl.SQLDataType.INTEGER, false, false); /** * Create a new routine call instance */ public CmsAcquireLock() { super("cms_acquire_lock", Kloopzcm.KLOOPZCM, org.jooq.impl.SQLDataType.BOOLEAN); setReturnParameter(RETURN_VALUE); addInParameter(P_LOCK_NAME); addInParameter(P_LOCKED_BY); addInParameter(P_STALE_TIMEOUT); } /** * Set the <code>p_lock_name</code> parameter IN value to the routine */ public void setPLockName(String value) { setValue(P_LOCK_NAME, value); } /** * Set the <code>p_lock_name</code> parameter to the function to be used with a {@link org.jooq.Select} statement */ public void setPLockName(Field<String> field) { setField(P_LOCK_NAME, field); } /** * Set the <code>p_locked_by</code> parameter IN value to the routine */ public void setPLockedBy(String value) { setValue(P_LOCKED_BY, value); } /** * Set the <code>p_locked_by</code> parameter to the function to be used with a {@link org.jooq.Select} statement */ public void setPLockedBy(Field<String> field) { setField(P_LOCKED_BY, field); } /** * Set the <code>p_stale_timeout</code> parameter IN value to the routine */ public void setPStaleTimeout(Integer value) { setValue(P_STALE_TIMEOUT, value); } /** * Set the <code>p_stale_timeout</code> parameter to the function to be used with a {@link org.jooq.Select} statement */ public void setPStaleTimeout(Field<Integer> field) { setField(P_STALE_TIMEOUT, field); } }
ceekay1991/AliPayForDebug
AliPayForDebug/AliPayForDebug/AlipayWallet_Headers/TAJsApiHandler4SetBackgroundColor.h
<gh_stars>1-10 // // Generated by class-dump 3.5 (64 bit) (Debug version compiled Sep 17 2017 16:24:48). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by <NAME>. // #import "PSDJsApiHandler.h" @class UIView; @interface TAJsApiHandler4SetBackgroundColor : PSDJsApiHandler { UIView *_topView; UIView *_bottomView; } @property(retain, nonatomic) UIView *bottomView; // @synthesize bottomView=_bottomView; @property(retain, nonatomic) UIView *topView; // @synthesize topView=_topView; - (void).cxx_destruct; - (double)coverViewHeight; - (double)coverViewWidth; - (id)contentView; - (void)handler:(id)arg1 context:(id)arg2 callback:(CDUnknownBlockType)arg3; @end
nekrut/anvil-portal
src/hooks/outline-query.js
<reponame>nekrut/anvil-portal import {useStaticQuery, graphql} from 'gatsby'; export const OutlineStaticQuery = () => { const {allFile} = useStaticQuery( graphql` query OutlineStaticQuery { allFile(filter: {relativeDirectory: {nin: ["", "contact", "featured-workspaces", "help", "implementation", "implementation/_images"]}}) { group(field: relativePath) { slug: fieldValue nodes { childMarkdownRemark { htmlAst } } } } } ` ); return allFile.group; };
FrantisekGazo/Knight
knight-compiler/src/main/java/eu/inloop/knight/builder/Injectable.java
package eu.inloop.knight.builder; import com.squareup.javapoet.ClassName; import java.util.HashSet; import java.util.Set; import javax.lang.model.element.TypeElement; /** * Class {@link Injectable} * * @author FrantisekGazo * @version 2015-11-16 */ public class Injectable { private TypeElement mElement; private ClassName mClassName; private Set<ClassName> mFromActivities; private ClassName mFromApp; public Injectable(TypeElement e) { mElement = e; mClassName = ClassName.get(e); mFromActivities = new HashSet<>(); mFromApp = null; } public TypeElement getElement() { return mElement; } public Set<ClassName> getFromActivities() { return mFromActivities; } public ClassName getFromApp() { return mFromApp; } public ClassName getClassName() { return mClassName; } public void addFromActivity(ClassName activityClassName) { mFromActivities.add(activityClassName); } public void setFromApp(ClassName appClassName) { mFromApp = appClassName; } public boolean isValid() { return mFromApp != null || !mFromActivities.isEmpty(); } }
moni-roy/COPC
Online Judges/CodeForces/141C/3693775_AC_92ms_5636kB.cpp
#include <bits/stdc++.h> using namespace std; struct ST{ string nm; int vl; int hg; bool operator<(const ST &p) const{ return vl<p.vl; } }st[100010]; int main(){ int n; cin>>n; for(int i=0;i<n;i++){ cin>>st[i].nm>>st[i].vl; } sort(st,st+n); int flg=0; for(int i=0;i<n;i++){ if(st[i].vl>i){ flg=1; break; } int sm=i-st[i].vl+1; st[i].hg=sm; for(int j=i-1;j>=0;j--){ if(st[i].hg<=st[j].hg) st[j].hg++; } } if(flg) cout<<"-1"<<endl; else{ for(int i=0;i<n;i++){ cout<<st[i].nm<<" "<<st[i].hg<<endl; } } }
jvanelteren/advent_of_code
advent_of_code_2017/day 24/solution_refractored.py
<reponame>jvanelteren/advent_of_code<gh_stars>1-10 #%% # read full assignment # think algo before implementing # dont use a dict when you need a list # assignment is still = and not == # dont use itertools when you can use np.roll # check mathemathical functions, e.g. if the parentheses are ok # networkx is awesome # sometimes while true is better than just too small for loop # networkx addes nodes when adding edge to nonexistent node # bitwise comparison is a nice trick # fiddling with regex can take a lot of time # insert on a list takes too much time when done often # check the values from a dict or list after you set them # check the input for edge cases # implement very carefully, e.g. >0 instead of !=0 # day 23 was reverse engineering, took a lot of time. write down states of the program before tinkering with instructions # with a difficult problem, use the test cases first. and use a generator with a recursive function # %% import os import re import numpy as np try: os.chdir(os.path.join(os.getcwd(), 'day 24')) print(os.getcwd()) except: pass # %% with open('test0.txt','r') as f: ports = [list(map(int,line.split('/'))) for line in f] def findnext(left,path,search): poss = [] for i in left: if int(i[0])==search: poss.append({'p': path+i, 'newsearch': i[1], 'left':left}) poss[-1]['left'].remove(i) if i[1]==search and i[0]!=i[1]: poss.append({'p': path+i, 'newsearch': i[0], 'left':left}) poss[-1]['left'].remove(i) if len(poss)==0: yield sum(path),len(path) else: for p in poss: yield from findnext(p['left'],p['p'],p['newsearch']) search = 0 paths = [] res = list(findnext(ports.copy(),paths,0)) strongest = max([i[0] for i in res]) longest = max([i[1] for i in res]) print(strongest) print(max([i[0] for i in res if i[1]==longest])) # %% with open('test0.txt','r') as f: ports = [list(map(int,line.split('/'))) for line in f] ports # %% import aoc with open('test0.txt','r') as f: ports = [aoc.to_int((line.split('/'))) for line in f] ports # %%
benemon/hawtio
tooling/hawtio-junit/src/main/java/io/hawt/junit/InProgressRunListener.java
<filename>tooling/hawtio-junit/src/main/java/io/hawt/junit/InProgressRunListener.java<gh_stars>100-1000 package io.hawt.junit; import org.junit.runner.Description; import org.junit.runner.Result; import org.junit.runner.notification.Failure; import org.junit.runner.notification.RunListener; public class InProgressRunListener extends RunListener { private long startTime; private long runTime; private final InProgressDTO inProgressDTO; public InProgressRunListener(InProgressDTO inProgressDTO) { this.inProgressDTO = inProgressDTO; } @Override public void testRunStarted(Description description) throws Exception { startTime = System.currentTimeMillis(); inProgressDTO.setStartTime(startTime); } @Override public void testRunFinished(Result result) throws Exception { long endTime = System.currentTimeMillis(); inProgressDTO.setEndTime(endTime); runTime += endTime - startTime; inProgressDTO.updateRuntime(runTime); } @Override public void testStarted(Description description) throws Exception { inProgressDTO.setTestClass(description.getTestClass().getName()); inProgressDTO.setTestMethod(description.getMethodName()); } @Override public void testFinished(Description description) throws Exception { inProgressDTO.onTestFinished(); inProgressDTO.setTestClass(null); inProgressDTO.setTestMethod(null); } @Override public void testFailure(Failure failure) throws Exception { inProgressDTO.onTestFailed(); inProgressDTO.setTestClass(null); inProgressDTO.setTestMethod(null); } @Override public void testAssumptionFailure(Failure failure) { // noop } @Override public void testIgnored(Description description) throws Exception { inProgressDTO.onTestIgnored(); inProgressDTO.setTestClass(null); inProgressDTO.setTestMethod(null); } }
stbrody/mongo
src/mongo/db/index/expression_params.cpp
/** * Copyright (C) 2015 MongoDB Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License, version 3, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * As a special exception, the copyright holders give permission to link the * code of portions of this program with the OpenSSL library under certain * conditions as described in each individual source file and distribute * linked combinations including the program with the OpenSSL library. You * must comply with the GNU Affero General Public License in all respects for * all of the code used other than as permitted herein. If you modify file(s) * with this exception, you may extend this exception to your version of the * file(s), but you are not obligated to do so. If you do not wish to do so, * delete this exception statement from your version. If you delete this * exception statement from all source files in the program, then also delete * it in the license file. */ #include "mongo/db/index/expression_params.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/geo/geoconstants.h" #include "mongo/db/hasher.h" #include "mongo/db/index_names.h" #include "mongo/db/index/2d_common.h" #include "mongo/db/index/s2_indexing_params.h" #include "mongo/util/mongoutils/str.h" namespace mongo { using mongoutils::str::stream; void ExpressionParams::parseTwoDParams(const BSONObj& infoObj, TwoDIndexingParams* out) { BSONObjIterator i(infoObj.getObjectField("key")); while (i.more()) { BSONElement e = i.next(); if (e.type() == String && IndexNames::GEO_2D == e.valuestr()) { uassert(16800, "can't have 2 geo fields", out->geo.size() == 0); uassert(16801, "2d has to be first in index", out->other.size() == 0); out->geo = e.fieldName(); } else { int order = 1; if (e.isNumber()) { order = static_cast<int>(e.Number()); } out->other.push_back(std::make_pair(e.fieldName(), order)); } } uassert(16802, "no geo field specified", out->geo.size()); GeoHashConverter::Parameters hashParams; Status paramStatus = GeoHashConverter::parseParameters(infoObj, &hashParams); uassertStatusOK(paramStatus); out->geoHashConverter.reset(new GeoHashConverter(hashParams)); } void ExpressionParams::parseHashParams(const BSONObj& infoObj, HashSeed* seedOut, int* versionOut, std::string* fieldOut) { // Default _seed to DEFAULT_HASH_SEED if "seed" is not included in the index spec // or if the value of "seed" is not a number // *** WARNING *** // Choosing non-default seeds will invalidate hashed sharding // Changing the seed default will break existing indexes and sharded collections if (infoObj["seed"].eoo()) { *seedOut = BSONElementHasher::DEFAULT_HASH_SEED; } else { *seedOut = infoObj["seed"].numberInt(); } // In case we have hashed indexes based on other hash functions in the future, we store // a hashVersion number. If hashVersion changes, "makeSingleHashKey" will need to change // accordingly. Defaults to 0 if "hashVersion" is not included in the index spec or if // the value of "hashversion" is not a number *versionOut = infoObj["hashVersion"].numberInt(); // Get the hashfield name BSONElement firstElt = infoObj.getObjectField("key").firstElement(); massert(16765, "error: no hashed index field", firstElt.str().compare(IndexNames::HASHED) == 0); *fieldOut = firstElt.fieldName(); } void ExpressionParams::parseHaystackParams(const BSONObj& infoObj, std::string* geoFieldOut, std::vector<std::string>* otherFieldsOut, double* bucketSizeOut) { BSONElement e = infoObj["bucketSize"]; uassert(16777, "need bucketSize", e.isNumber()); *bucketSizeOut = e.numberDouble(); uassert(16769, "bucketSize cannot be zero", *bucketSizeOut != 0.0); // Example: // db.foo.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 }) BSONObjIterator i(infoObj.getObjectField("key")); while (i.more()) { BSONElement e = i.next(); if (e.type() == String && IndexNames::GEO_HAYSTACK == e.valuestr()) { uassert(16770, "can't have more than one geo field", geoFieldOut->size() == 0); uassert(16771, "the geo field has to be first in index", otherFieldsOut->size() == 0); *geoFieldOut = e.fieldName(); } else { uassert(16772, "geoSearch can only have 1 non-geo field for now", otherFieldsOut->size() == 0); otherFieldsOut->push_back(e.fieldName()); } } } void ExpressionParams::parse2dsphereParams(const BSONObj& infoObj, S2IndexingParams* out) { // Set up basic params. out->maxKeysPerInsert = 200; // Near distances are specified in meters...sometimes. out->radius = kRadiusOfEarthInMeters; static const std::string kIndexVersionFieldName("2dsphereIndexVersion"); static const std::string kFinestIndexedLevel("finestIndexedLevel"); static const std::string kCoarsestIndexedLevel("coarsestIndexedLevel"); long long indexVersion; // Determine which version of this index we're using. If none was set in the descriptor, // assume S2_INDEX_VERSION_1 (alas, the first version predates the existence of the version // field). bsonExtractIntegerFieldWithDefault( infoObj, kIndexVersionFieldName, S2_INDEX_VERSION_1, &indexVersion); out->indexVersion = static_cast<S2IndexVersion>(indexVersion); // Note: In version > 2, these levels are for non-points. // Points are always indexed to the finest level. // Default levels were optimized for buildings and state regions long long defaultFinestIndexedLevel = S2::kAvgEdge.GetClosestLevel(110.0 / out->radius); long long defaultCoarsestIndexedLevel = S2::kAvgEdge.GetClosestLevel(2000 * 1000.0 / out->radius); long long defaultMaxCellsInCovering = 20; if (out->indexVersion <= S2_INDEX_VERSION_2) { defaultFinestIndexedLevel = S2::kAvgEdge.GetClosestLevel(500.0 / out->radius); defaultCoarsestIndexedLevel = S2::kAvgEdge.GetClosestLevel(100.0 * 1000 / out->radius); defaultMaxCellsInCovering = 50; } long long finestIndexedLevel, coarsestIndexedLevel, maxCellsInCovering; bsonExtractIntegerFieldWithDefault( infoObj, "finestIndexedLevel", defaultFinestIndexedLevel, &finestIndexedLevel); bsonExtractIntegerFieldWithDefault( infoObj, "coarsestIndexedLevel", defaultCoarsestIndexedLevel, &coarsestIndexedLevel); bsonExtractIntegerFieldWithDefault( infoObj, "maxCellsInCovering", defaultMaxCellsInCovering, &maxCellsInCovering); // This is advisory. out->maxCellsInCovering = maxCellsInCovering; // These are not advisory. out->finestIndexedLevel = finestIndexedLevel; out->coarsestIndexedLevel = coarsestIndexedLevel; uassert(16747, "coarsestIndexedLevel must be >= 0", out->coarsestIndexedLevel >= 0); uassert(16748, "finestIndexedLevel must be <= 30", out->finestIndexedLevel <= 30); uassert(16749, "finestIndexedLevel must be >= coarsestIndexedLevel", out->finestIndexedLevel >= out->coarsestIndexedLevel); massert(17395, stream() << "unsupported geo index version { " << kIndexVersionFieldName << " : " << out->indexVersion << " }, only support versions: [" << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << "," << S2_INDEX_VERSION_3 << "]", out->indexVersion == S2_INDEX_VERSION_3 || out->indexVersion == S2_INDEX_VERSION_2 || out->indexVersion == S2_INDEX_VERSION_1); } } // namespace mongo
synopsys-sig/coverity-common-api
src/main/java/com/synopsys/integration/coverity/api/ws/configuration/LdapConfigurationDataObj.java
<filename>src/main/java/com/synopsys/integration/coverity/api/ws/configuration/LdapConfigurationDataObj.java /** * coverity-common-api * * Copyright (c) 2020 Synopsys, Inc. * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.synopsys.integration.coverity.api.ws.configuration; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlType; /** * <p>Java class for ldapConfigurationDataObj complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="ldapConfigurationDataObj"&gt; * &lt;complexContent&gt; * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"&gt; * &lt;sequence&gt; * &lt;element name="anonymousBind" type="{http://www.w3.org/2001/XMLSchema}boolean"/&gt; * &lt;element name="baseDN" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="bindName" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="bindPassword" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="groupFilter" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="groupFullName" type="{http://www.w3.org/2001/XMLSchema}boolean"/&gt; * &lt;element name="groupMember" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="groupName" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="groupObjectClass" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="groupSearchBase" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="primary" type="{http://www.w3.org/2001/XMLSchema}boolean"/&gt; * &lt;element name="secureConnection" type="{http://www.w3.org/2001/XMLSchema}boolean"/&gt; * &lt;element name="serverDomain" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="serverDomainIdDataObj" type="{http://ws.coverity.com/v9}serverDomainIdDataObj" minOccurs="0"/&gt; * &lt;element name="serverPort" type="{http://www.w3.org/2001/XMLSchema}long" minOccurs="0"/&gt; * &lt;element name="tlsEnabled" type="{http://www.w3.org/2001/XMLSchema}boolean"/&gt; * &lt;element name="userEmail" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="userFirstName" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="userLastName" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="userName" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="userObjectClass" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;element name="userSearchBase" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/&gt; * &lt;/sequence&gt; * &lt;/restriction&gt; * &lt;/complexContent&gt; * &lt;/complexType&gt; * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "ldapConfigurationDataObj", propOrder = { "anonymousBind", "baseDN", "bindName", "bindPassword", "groupFilter", "groupFullName", "groupMember", "groupName", "groupObjectClass", "groupSearchBase", "primary", "secureConnection", "serverDomain", "serverDomainIdDataObj", "serverPort", "tlsEnabled", "userEmail", "userFirstName", "userLastName", "userName", "userObjectClass", "userSearchBase" }) public class LdapConfigurationDataObj { protected boolean anonymousBind; protected String baseDN; protected String bindName; protected String bindPassword; protected String groupFilter; protected boolean groupFullName; protected String groupMember; protected String groupName; protected String groupObjectClass; protected String groupSearchBase; protected boolean primary; protected boolean secureConnection; protected String serverDomain; protected ServerDomainIdDataObj serverDomainIdDataObj; protected Long serverPort; protected boolean tlsEnabled; protected String userEmail; protected String userFirstName; protected String userLastName; protected String userName; protected String userObjectClass; protected String userSearchBase; /** * Gets the value of the anonymousBind property. * */ public boolean isAnonymousBind() { return anonymousBind; } /** * Sets the value of the anonymousBind property. * */ public void setAnonymousBind(boolean value) { this.anonymousBind = value; } /** * Gets the value of the baseDN property. * * @return * possible object is * {@link String } * */ public String getBaseDN() { return baseDN; } /** * Sets the value of the baseDN property. * * @param value * allowed object is * {@link String } * */ public void setBaseDN(String value) { this.baseDN = value; } /** * Gets the value of the bindName property. * * @return * possible object is * {@link String } * */ public String getBindName() { return bindName; } /** * Sets the value of the bindName property. * * @param value * allowed object is * {@link String } * */ public void setBindName(String value) { this.bindName = value; } /** * Gets the value of the bindPassword property. * * @return * possible object is * {@link String } * */ public String getBindPassword() { return bindPassword; } /** * Sets the value of the bindPassword property. * * @param value * allowed object is * {@link String } * */ public void setBindPassword(String value) { this.bindPassword = value; } /** * Gets the value of the groupFilter property. * * @return * possible object is * {@link String } * */ public String getGroupFilter() { return groupFilter; } /** * Sets the value of the groupFilter property. * * @param value * allowed object is * {@link String } * */ public void setGroupFilter(String value) { this.groupFilter = value; } /** * Gets the value of the groupFullName property. * */ public boolean isGroupFullName() { return groupFullName; } /** * Sets the value of the groupFullName property. * */ public void setGroupFullName(boolean value) { this.groupFullName = value; } /** * Gets the value of the groupMember property. * * @return * possible object is * {@link String } * */ public String getGroupMember() { return groupMember; } /** * Sets the value of the groupMember property. * * @param value * allowed object is * {@link String } * */ public void setGroupMember(String value) { this.groupMember = value; } /** * Gets the value of the groupName property. * * @return * possible object is * {@link String } * */ public String getGroupName() { return groupName; } /** * Sets the value of the groupName property. * * @param value * allowed object is * {@link String } * */ public void setGroupName(String value) { this.groupName = value; } /** * Gets the value of the groupObjectClass property. * * @return * possible object is * {@link String } * */ public String getGroupObjectClass() { return groupObjectClass; } /** * Sets the value of the groupObjectClass property. * * @param value * allowed object is * {@link String } * */ public void setGroupObjectClass(String value) { this.groupObjectClass = value; } /** * Gets the value of the groupSearchBase property. * * @return * possible object is * {@link String } * */ public String getGroupSearchBase() { return groupSearchBase; } /** * Sets the value of the groupSearchBase property. * * @param value * allowed object is * {@link String } * */ public void setGroupSearchBase(String value) { this.groupSearchBase = value; } /** * Gets the value of the primary property. * */ public boolean isPrimary() { return primary; } /** * Sets the value of the primary property. * */ public void setPrimary(boolean value) { this.primary = value; } /** * Gets the value of the secureConnection property. * */ public boolean isSecureConnection() { return secureConnection; } /** * Sets the value of the secureConnection property. * */ public void setSecureConnection(boolean value) { this.secureConnection = value; } /** * Gets the value of the serverDomain property. * * @return * possible object is * {@link String } * */ public String getServerDomain() { return serverDomain; } /** * Sets the value of the serverDomain property. * * @param value * allowed object is * {@link String } * */ public void setServerDomain(String value) { this.serverDomain = value; } /** * Gets the value of the serverDomainIdDataObj property. * * @return * possible object is * {@link ServerDomainIdDataObj } * */ public ServerDomainIdDataObj getServerDomainIdDataObj() { return serverDomainIdDataObj; } /** * Sets the value of the serverDomainIdDataObj property. * * @param value * allowed object is * {@link ServerDomainIdDataObj } * */ public void setServerDomainIdDataObj(ServerDomainIdDataObj value) { this.serverDomainIdDataObj = value; } /** * Gets the value of the serverPort property. * * @return * possible object is * {@link Long } * */ public Long getServerPort() { return serverPort; } /** * Sets the value of the serverPort property. * * @param value * allowed object is * {@link Long } * */ public void setServerPort(Long value) { this.serverPort = value; } /** * Gets the value of the tlsEnabled property. * */ public boolean isTlsEnabled() { return tlsEnabled; } /** * Sets the value of the tlsEnabled property. * */ public void setTlsEnabled(boolean value) { this.tlsEnabled = value; } /** * Gets the value of the userEmail property. * * @return * possible object is * {@link String } * */ public String getUserEmail() { return userEmail; } /** * Sets the value of the userEmail property. * * @param value * allowed object is * {@link String } * */ public void setUserEmail(String value) { this.userEmail = value; } /** * Gets the value of the userFirstName property. * * @return * possible object is * {@link String } * */ public String getUserFirstName() { return userFirstName; } /** * Sets the value of the userFirstName property. * * @param value * allowed object is * {@link String } * */ public void setUserFirstName(String value) { this.userFirstName = value; } /** * Gets the value of the userLastName property. * * @return * possible object is * {@link String } * */ public String getUserLastName() { return userLastName; } /** * Sets the value of the userLastName property. * * @param value * allowed object is * {@link String } * */ public void setUserLastName(String value) { this.userLastName = value; } /** * Gets the value of the userName property. * * @return * possible object is * {@link String } * */ public String getUserName() { return userName; } /** * Sets the value of the userName property. * * @param value * allowed object is * {@link String } * */ public void setUserName(String value) { this.userName = value; } /** * Gets the value of the userObjectClass property. * * @return * possible object is * {@link String } * */ public String getUserObjectClass() { return userObjectClass; } /** * Sets the value of the userObjectClass property. * * @param value * allowed object is * {@link String } * */ public void setUserObjectClass(String value) { this.userObjectClass = value; } /** * Gets the value of the userSearchBase property. * * @return * possible object is * {@link String } * */ public String getUserSearchBase() { return userSearchBase; } /** * Sets the value of the userSearchBase property. * * @param value * allowed object is * {@link String } * */ public void setUserSearchBase(String value) { this.userSearchBase = value; } }
txtbits/daw-java
Interfaces/src/PruebaMatriz.java
<reponame>txtbits/daw-java import maths.*; public class PruebaMatriz { public static void main (String [] args) { int filas = 3; int columnas = 4; double [][] a = new double[filas][columnas]; for (int i=0; i<a.length; i++) { for (int j=0; j<a[i].length; j++) { a[i][j] = 10*Math.random(); System.out.println("a["+i+"] ["+j+"] = "+a[i][j]); } } System.out.println("Minimo : " + Matriz.min(a)); System.out.println("Maximo : " + Matriz.max(a)); System.out.println("Sumatorio : " + Matriz.sum(a)); } }
Montimage/mmt-dpi
src/mmt_tcpip/lib/tcp_segment.h
/** * TCP segment * to store the TCP segment */ #include <stdlib.h> #include <stdint.h> // #include <sys/types.h> // #include <sys/stat.h> // #include <sys/time.h> // #include <fcntl.h> // #include <getopt.h> // #include <signal.h> // #include <errno.h> #ifndef TCP_SEGMENT_H #define TCP_SEGMENT_H /** * Present a TCP segment */ typedef struct tcp_seg_struct { uint64_t packet_id; // id of the packet which contains the segment uint64_t seq; // Sequence number uint64_t next_seq; // Next segment sequence number uint64_t ack; // Acknowledgement number uint16_t len; // Len of segment uint8_t *data; // data of segment struct tcp_seg_struct *next; // Next segment in link-list struct tcp_seg_struct *prev; // Previous segment in link-list } tcp_seg_t; /** * Create a new TCP segment * @param seq Sequence number * @param next_seq Next segment sequence number * @param ack Acknowledgement number * @param len len of segment * @param data data of segment * @return NULL if cannot allocate memory for a new TCP segment * a pointer points to new TCP segment. The new node has the key = 0, all other attributes are NULL */ tcp_seg_t *tcp_seg_new(uint64_t packet_id, uint64_t seq, uint64_t next_seq, uint64_t ack, uint16_t len, uint8_t *data); /** * Free an TCP segment * @param node TCP segment to be free */ void tcp_seg_free(tcp_seg_t *seg); /** * Free a segment link-list * @param node head of the link-list */ void tcp_seg_free_list(tcp_seg_t *head); /** * Insert a new node into a Link-list of tcp segment * @param root current root of Link-list of tcp segment * @param node new node to be inserted * @return new root of the Link-list of tcp segment */ tcp_seg_t *tcp_seg_insert(tcp_seg_t *root, tcp_seg_t *seg); /** * Search in the given Link-list of tcp segment a node which has the seq equals with given seq * @param root root of Link-list of tcp segment * @param key key value to search the node * @return NULL - if there isn't any node in given Link-list of tcp segment which has the given key value * a pointer points to the node which has given key value */ tcp_seg_t *tcp_seg_find(tcp_seg_t *root, uint64_t seq); /** * Show current Link-list of tcp segment structure * @param node root of the Link-list of tcp segment */ void tcp_seg_show_list(tcp_seg_t *node); /** * Show current TCP segment * @param node given node */ void tcp_seg_show(tcp_seg_t *node); /** * Get the number of node in the tree * @param node root of the tree * @return number of seg in the Link-list */ int tcp_seg_size(tcp_seg_t *node); int tcp_seg_reassembly(uint8_t *data, tcp_seg_t *root, uint32_t len); #endif // End of TCP_SEGMENT_H
HRashidi/freeCodeCamp
Coding-Interview-Prep/Rosetta-Code/quibble.js
<gh_stars>0 'use strict' function quibble(words) { let line = "{"; for (let i = 0; i < words.length; i++) { const el = words[i]; if(i === 0) { line += `${el}` } else if(i === words.length -1) { line += ` and ${el}` } else { line += `, ${el}` } } line += "}" return line; } console.log(quibble([])); // "{}" console.log(quibble(["ABC", "DEF", "G", "H"])); // "{ABC,DEF,G and H}" console.log(quibble(["ABC", "DEF"])); // "{ABC and DEF}" console.log(quibble(["ABC", "DEF", "G", "H"])); // "{ABC,DEF,G and H}"
situx/kiwi-postgis
src/org/openrdf/query/algebra/evaluation/function/postgis/linestring/exporter/AsEncodedPolyline.java
<filename>src/org/openrdf/query/algebra/evaluation/function/postgis/linestring/exporter/AsEncodedPolyline.java package org.openrdf.query.algebra.evaluation.function.postgis.linestring.exporter; import org.openrdf.model.vocabulary.POSTGIS; import org.locationtech.jts.geom.Coordinate; import org.locationtech.jts.geom.Geometry; import org.locationtech.jts.geom.LineString; import org.openrdf.query.algebra.evaluation.function.postgis.geometry.base.GeometricStringExportFunction; public class AsEncodedPolyline extends GeometricStringExportFunction { @Override public String operation(Geometry geom) { if (geom instanceof LineString) { String result=encodePolyline((LineString)geom); return result; } return null; } public static String encodePolyline(final LineString linestring) { long lastLat = 0; long lastLng = 0; final StringBuffer result = new StringBuffer(); for (final Coordinate point : linestring.getCoordinates()) { long lat = Math.round(point.x * 1e5); long lng = Math.round(point.y * 1e5); long dLat = lat - lastLat; long dLng = lng - lastLng; encode(dLat, result); encode(dLng, result); lastLat = lat; lastLng = lng; } return result.toString(); } private static void encode(long v, StringBuffer result) { v = v < 0 ? ~(v << 1) : v << 1; while (v >= 0x20) { result.append(Character.toChars((int) ((0x20 | (v & 0x1f)) + 63))); v >>= 5; } result.append(Character.toChars((int) (v + 63))); } @Override public String getURI() { return POSTGIS.ST_ASENCODEDPOLYLINE.stringValue(); } }
keremkoseoglu/Kifu
web/static/openui5/sap/ui/core/util/ExportType-dbg.js
<filename>web/static/openui5/sap/ui/core/util/ExportType-dbg.js /*! * OpenUI5 * (c) Copyright 2009-2020 SAP SE or an SAP affiliate company. * Licensed under the Apache License, Version 2.0 - see LICENSE.txt. */ // Provides class sap.ui.core.util.ExportType sap.ui.define(['sap/ui/base/ManagedObject'], function(ManagedObject) { 'use strict'; /** * Constructor for a new ExportType. * * @param {string} [sId] id for the new control, generated automatically if no id is given * @param {object} [mSettings] initial settings for the new control * * @class * Base export type. Subclasses can be used for {@link sap.ui.core.util.Export Export}. * @extends sap.ui.base.ManagedObject * * @author SAP SE * @version 1.84.9 * @since 1.22.0 * * @public * @deprecated Since version 1.73 * @alias sap.ui.core.util.ExportType */ var ExportType = ManagedObject.extend('sap.ui.core.util.ExportType', { metadata: { library: "sap.ui.core", properties: { /** * File extension. */ fileExtension: 'string', /** * MIME type. */ mimeType: 'string', /** * Charset. */ charset: 'string', /** * Whether to prepend an unicode byte order mark when saving as a file (only applies for utf-8 charset). */ byteOrderMark: { type: 'boolean', defaultValue: undefined } } } }); /** * @private */ ExportType.prototype.init = function() { this._oExport = null; }; /** * Handles the generation process of the file.<br> * * @param {sap.ui.core.util.Export} oExport export instance * @return {string} content * * @protected */ ExportType.prototype._generate = function(oExport) { this._oExport = oExport; var sContent = this.generate(); this._oExport = null; return sContent; }; /** * Generates the file content.<br> * Should be implemented by the individual types! * * @return {string} content * * @protected */ ExportType.prototype.generate = function() { return ''; }; /** * Returns the number of columns. * * @return {int} count * * @protected */ ExportType.prototype.getColumnCount = function() { if (this._oExport) { return this._oExport.getColumns().length; } return 0; }; /** * Returns the number of rows. * * @return {int} count * * @protected */ ExportType.prototype.getRowCount = function() { if (this._oExport && this._oExport.getBinding("rows")) { return this._oExport.getBinding("rows").getLength(); } return 0; }; /** * Creates a column "generator" (inspired by ES6 Generators) * * @return {Generator} generator * @protected */ ExportType.prototype.columnGenerator = function() { /* // Implementation using ES6 Generator function* cellGenerator() { var aColumns = this._oExport.getColumns(), iColumns = aColumns.length; for (var i = 0; i < iColumns; i++) { yield { index: i, name: aColumns[i].getName() }; } } */ var i = 0, aColumns = this._oExport.getColumns(), iColumns = aColumns.length; return { next: function() { if (i < iColumns) { var iIndex = i; i++; return { value: { index: iIndex, name: aColumns[iIndex].getName() }, done: false }; } else { return { value: undefined, done: true }; } } }; }; /** * Creates a cell "generator" (inspired by ES6 Generators) * * @return {Generator} generator * @protected */ ExportType.prototype.cellGenerator = function() { /* // Implementation using ES6 Generator function* cellGenerator() { var oRowTemplate = this._oExport.getAggregation('_template'), aCells = oRowTemplate.getCells(), iCells = aCells.length; for (var i = 0; i < iCells; i++) { yield { index: i, content: aCells[i].getContent() }; } } */ var i = 0, oRowTemplate = this._oExport.getAggregation('_template'), aCells = oRowTemplate.getCells(), iCells = aCells.length; return { next: function() { if (i < iCells) { var iIndex = i; i++; // convert customData object array to key-value map var mCustomData = {}; aCells[iIndex].getCustomData().forEach(function(oCustomData) { mCustomData[oCustomData.getKey()] = oCustomData.getValue(); }); return { value: { index: iIndex, content: aCells[iIndex].getContent(), customData: mCustomData }, done: false }; } else { return { value: undefined, done: true }; } } }; }; /** * Creates a row "generator" (inspired by ES6 Generators) * * @return {Generator} generator * @protected */ ExportType.prototype.rowGenerator = function() { /* // Implementation using ES6 Generator function* rowGenerator() { var oExport = this._oExport, oBinding = oExport.getBinding("rows"), mBindingInfos = oExport.getBindingInfo("rows"), aContexts = oBinding.getContexts(0, oBinding.getLength()), iContexts = aContexts.length, oRowTemplate = oExport.getAggregation('_template'); for (var i = 0; i < iCells; i++) { oRowTemplate.setBindingContext(aContexts[i], mBindingInfos.model); yield { index: i, cells: this.cellGenerator() }; } } */ var that = this, i = 0, oExport = this._oExport, oBinding = oExport.getBinding("rows"), mBindingInfos = oExport.getBindingInfo("rows"), aContexts = oBinding.getContexts(0, oBinding.getLength()), iContexts = aContexts.length, oRowTemplate = oExport.getAggregation('_template'); return { next: function() { if (i < iContexts) { var iIndex = i; i++; oRowTemplate.setBindingContext(aContexts[iIndex], mBindingInfos.model); return { value: { index: iIndex, cells: that.cellGenerator() }, done: false }; } else { return { value: undefined, done: true }; } } }; }; return ExportType; });
ShaharaSeiun/touhou3d
src/babylon-components/CustomCustomProceduralTexture.js
import { __extends } from 'tslib'; import { Vector3, Vector2 } from '@babylonjs/core/Maths/math.vector'; import { Color4, Color3 } from '@babylonjs/core/Maths/math.color'; import { Texture } from '@babylonjs/core/Materials/Textures/texture'; import { ProceduralTexture } from '@babylonjs/core/Materials/Textures/Procedurals/proceduralTexture'; /** * Procedural texturing is a way to programmatically create a texture. There are 2 types of procedural textures: code-only, and code that references some classic 2D images, sometimes called 'refMaps' or 'sampler' images. * Custom Procedural textures are the easiest way to create your own procedural in your application. * @see https://doc.babylonjs.com/how_to/how_to_use_procedural_textures#creating-custom-procedural-textures */ const readLatency = 16; export const allSyncs = { syncs: [], }; const _readTexturePixels = function (engine, texture, width, height, faceIndex, level, buffer, isPlayerBullet) { if (faceIndex === void 0) { faceIndex = -1; } if (level === void 0) { level = 0; } if (buffer === void 0) { buffer = null; } const numPPB = readLatency; var gl = engine._gl; if (!gl) { throw new Error('Engine does not have gl rendering context.'); } if (!engine._dummyFramebuffer) { var dummy = gl.createFramebuffer(); if (!dummy) { throw new Error('Unable to create dummy framebuffer'); } engine._dummyFramebuffer = dummy; } if (!texture._PPBWheel) { texture._PPBWheel = []; for (let i = 0; i < numPPB; i++) { const newPPB = gl.createBuffer(); if (!newPPB) { throw new Error('Unable to create PPB'); } texture._PPBWheel.push(newPPB); } texture._activePPB = texture._PPBWheel[0]; texture._activePPBIndex = 0; } //swap PIXEL_PACK_BUFFER texture._activePPBIndex = (texture._activePPBIndex + 1) % texture._PPBWheel.length; texture._activePPB = texture._PPBWheel[texture._activePPBIndex]; gl.bindFramebuffer(gl.FRAMEBUFFER, engine._dummyFramebuffer); if (faceIndex > -1) { gl.framebufferTexture2D( gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_CUBE_MAP_POSITIVE_X + faceIndex, texture._webGLTexture, level ); } else { gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture._webGLTexture, level); } var readType = texture.type !== undefined ? engine._getWebGLTextureType(texture.type) : gl.UNSIGNED_BYTE; switch (readType) { case gl.UNSIGNED_BYTE: if (!buffer) { buffer = new Uint8Array(4 * width * height); } readType = gl.UNSIGNED_BYTE; break; default: if (!buffer) { buffer = new Float32Array(4 * width * height); } readType = gl.FLOAT; break; } gl.bindBuffer(gl.PIXEL_PACK_BUFFER, texture._activePPB); gl.bufferData(gl.PIXEL_PACK_BUFFER, buffer.byteLength, gl.STREAM_READ); gl.readPixels(0, 0, width, height, gl.RGBA, readType, 0); gl.bindFramebuffer(gl.FRAMEBUFFER, engine._currentFramebuffer); var sync = gl.fenceSync(gl.SYNC_GPU_COMMANDS_COMPLETE, 0); if (!sync) { return null; } gl.flush(); let promiseResolve; let promiseReject; const returnPromise = new Promise(function (resolve, reject) { promiseResolve = resolve; promiseReject = reject; }); allSyncs.syncs.push({ sync, promiseResolve, promiseReject, buffer, PPB: texture._activePPB, }); return returnPromise; }; var CustomCustomProceduralTexture = /** @class */ (function (_super) { __extends(CustomCustomProceduralTexture, _super); /** * Instantiates a new Custom Procedural Texture. * Procedural texturing is a way to programmatically create a texture. There are 2 types of procedural textures: code-only, and code that references some classic 2D images, sometimes called 'refMaps' or 'sampler' images. * Custom Procedural textures are the easiest way to create your own procedural in your application. * @see https://doc.babylonjs.com/how_to/how_to_use_procedural_textures#creating-custom-procedural-textures * @param name Define the name of the texture * @param texturePath Define the folder path containing all the cutom texture related files (config, shaders...) * @param size Define the size of the texture to create * @param scene Define the scene the texture belongs to * @param fallbackTexture Define a fallback texture in case there were issues to create the custom texture * @param generateMipMaps Define if the texture should creates mip maps or not */ function CustomCustomProceduralTexture(name, texturePath, size, scene, fallbackTexture, generateMipMaps, isCube, type) { var _this = _super.call(this, name, size, null, scene, fallbackTexture, generateMipMaps, isCube, type) || this; _this._animate = true; _this._time = 0; _this._texturePath = texturePath; //Try to load json _this.setFragment(_this._texturePath); _this.refreshRate = 1; return _this; } /** * Is the texture ready to be used ? (rendered at least once) * @returns true if ready, otherwise, false. */ CustomCustomProceduralTexture.prototype.isReady = function () { if (this.sleep) return false; if (!_super.prototype.isReady.call(this)) { return false; } for (var name in this._textures) { var texture = this._textures[name]; if (!texture.isReady()) { return false; } } return true; }; /** * Render the texture to its associated render target. * @param useCameraPostProcess Define if camera post process should be applied to the texture */ CustomCustomProceduralTexture.prototype.render = function (useCameraPostProcess) { if (this.sleep) return; var scene = this.getScene(); if (this._animate && scene) { this._time += scene.getAnimationRatio() * 0.03; this.updateShaderUniforms(); } _super.prototype.render.call(this, useCameraPostProcess); }; CustomCustomProceduralTexture.prototype.dispose = function () { if (this._PPBWheel) { const engine = this._getEngine(); const gl = engine._gl; for (let buf of this._PPBWheel) { gl.deleteBuffer(buf); } } _super.prototype.dispose.call(this); }; /** * Update the list of dependant textures samplers in the shader. */ CustomCustomProceduralTexture.prototype.updateTextures = function () { for (var i = 0; i < this._config.sampler2Ds.length; i++) { this.setTexture( this._config.sampler2Ds[i].sample2Dname, new Texture(this._texturePath + '/' + this._config.sampler2Ds[i].textureRelativeUrl, this.getScene()) ); } }; /** * Update the uniform values of the procedural texture in the shader. */ CustomCustomProceduralTexture.prototype.updateShaderUniforms = function () { if (this._config) { for (var j = 0; j < this._config.uniforms.length; j++) { var uniform = this._config.uniforms[j]; switch (uniform.type) { case 'float': this.setFloat(uniform.name, uniform.value); break; case 'color3': this.setColor3(uniform.name, new Color3(uniform.r, uniform.g, uniform.b)); break; case 'color4': this.setColor4(uniform.name, new Color4(uniform.r, uniform.g, uniform.b, uniform.a)); break; case 'vector2': this.setVector2(uniform.name, new Vector2(uniform.x, uniform.y)); break; case 'vector3': this.setVector3(uniform.name, new Vector3(uniform.x, uniform.y, uniform.z)); break; default: throw new Error('Unsupported uniform type: ' + uniform.type); } } } this.setFloat('time', this._time); }; CustomCustomProceduralTexture.prototype.readPixels = function (faceIndex, level, buffer) { if (faceIndex === void 0) { faceIndex = 0; } if (level === void 0) { level = 0; } if (buffer === void 0) { buffer = null; } if (!this._texture) { return null; } var size = this.getSize(); var width = size.width; var height = size.height; var engine = this._getEngine(); if (!engine) { return null; } if (level !== 0) { width = width / Math.pow(2, level); height = height / Math.pow(2, level); width = Math.round(width); height = Math.round(height); } try { if (this._texture.isCube) { return _readTexturePixels(engine, this._texture, width, height, faceIndex, level, buffer); } return _readTexturePixels(engine, this._texture, width, height, -1, level, buffer); } catch (e) { console.warn(e); return null; } }; Object.defineProperty(CustomCustomProceduralTexture.prototype, 'animate', { /** * Define if the texture animates or not. */ get: function () { return this._animate; }, set: function (value) { this._animate = value; }, enumerable: false, configurable: true, }); return CustomCustomProceduralTexture; })(ProceduralTexture); export { CustomCustomProceduralTexture }; //# sourceMappingURL=CustomcustomProceduralTexture.js.map
michaelrk02/aksen
frontend/aksen/rpc.js
<filename>frontend/aksen/rpc.js import Channel from './rpc/Channel'; export {Channel};
nghialt/ApprovalTests.Java
approvaltests/src/main/java/org/approvaltests/reporters/GenericDiffReporter.java
package org.approvaltests.reporters; import java.io.File; import java.text.MessageFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import com.spun.util.ObjectUtils; import com.spun.util.SystemUtils; import com.spun.util.ThreadUtils; import com.spun.util.io.FileUtils; public class GenericDiffReporter implements EnvironmentAwareReporter { public static final String STANDARD_ARGUMENTS = "%s %s"; public static boolean REPORT_MISSING_FILES = false; protected String diffProgram; protected String arguments; protected String diffProgramNotFoundMessage; private List<String> validExtensions; public static List<String> TEXT_FILE_EXTENSIONS = Arrays.asList(".txt", ".csv", ".htm", ".html", ".xml", ".eml", ".java", ".css", ".js", ".json", ".md"); public static List<String> IMAGE_FILE_EXTENSIONS = Arrays.asList(".png", ".gif", ".jpg", ".jpeg", ".bmp", ".tif", ".tiff"); public GenericDiffReporter(String diffProgram) { this(diffProgram, STANDARD_ARGUMENTS, "Couldn't find: " + diffProgram); } public GenericDiffReporter(String diffProgram, String diffProgramNotFoundMessage) { this(diffProgram, STANDARD_ARGUMENTS, diffProgramNotFoundMessage); } private GenericDiffReporter(String diffProgram, String argumentsFormat, String diffProgramNotFoundMessage) { this(diffProgram, argumentsFormat, diffProgramNotFoundMessage, TEXT_FILE_EXTENSIONS); } public GenericDiffReporter(String diffProgram, String argumentsFormat, String diffProgramNotFoundMessage, List<String> validFileExtensions) { this.diffProgram = diffProgram; this.arguments = argumentsFormat; this.diffProgramNotFoundMessage = diffProgramNotFoundMessage; validExtensions = validFileExtensions; } public GenericDiffReporter(DiffInfo info) { this(info.diffProgram, info.parameters, MessageFormat.format("Unable to find program at {0}", info.diffProgram), info.fileExtensions); } @Override public void report(String received, String approved) { if (!isWorkingInThisEnvironment(received)) { throw new RuntimeException(diffProgramNotFoundMessage); } FileUtils.createIfNeeded(approved); launch(received, approved); } private void launch(String received, String approved) { try { ProcessBuilder builder = new ProcessBuilder(getCommandLine(received, approved)); preventProcessFromClosing(builder); builder.start(); ThreadUtils.sleep(800); //Give program time to start} } catch (Exception e) { throw ObjectUtils.throwAsError(e); } } private void preventProcessFromClosing(ProcessBuilder builder) { if (!SystemUtils.isWindowsEnviroment()) { File output = new File("/dev/null"); builder.redirectError(output).redirectOutput(output); } } public String[] getCommandLine(String received, String approved) { String full = String.format(arguments, "{received}", "{approved}"); List<String> argsSplitOnSpace = Arrays.stream(full.split(" ")) .map(t -> t.replace("{received}", received).replace("{approved}", approved)).collect(Collectors.toList()); ArrayList<String> commands = new ArrayList<String>(); commands.add(diffProgram); commands.addAll(argsSplitOnSpace); System.out.println(commands); return commands.toArray(new String[0]); } @Override public boolean isWorkingInThisEnvironment(String forFile) { return checkFileExists() && isFileExtensionHandled(forFile); } public boolean checkFileExists() { boolean exists = new File(diffProgram).exists(); if (REPORT_MISSING_FILES && !exists) { System.out.println(String.format("%s can't find '%s'", this.getClass().getSimpleName(), diffProgram)); } return exists; } public boolean isFileExtensionHandled(String forFile) { return isFileExtensionValid(forFile, validExtensions); } public static boolean isFileExtensionValid(String forFile, List<String> validExtensionsWithDot) { String extensionWithDot = FileUtils.getExtensionWithDot(forFile); return validExtensionsWithDot.contains(extensionWithDot); } @Override public String toString() { return getClass().getName(); } }
QuocAnh90/Uintah_Aalto
CCA/Components/MPM/Crack/CrackGeometryFactory.cc
<reponame>QuocAnh90/Uintah_Aalto /* * The MIT License * * Copyright (c) 1997-2019 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <CCA/Components/MPM/Crack/CrackGeometryFactory.h> #include <CCA/Components/MPM/Crack/CrackGeometry.h> #include <CCA/Components/MPM/Crack/NullCrack.h> #include <CCA/Components/MPM/Crack/QuadCrack.h> #include <CCA/Components/MPM/Crack/CurvedQuadCrack.h> #include <CCA/Components/MPM/Crack/TriangularCrack.h> #include <CCA/Components/MPM/Crack/ArcCrack.h> #include <CCA/Components/MPM/Crack/EllipticCrack.h> #include <CCA/Components/MPM/Crack/PartialEllipticCrack.h> #include <Core/Exceptions/ProblemSetupException.h> #include <Core/Malloc/Allocator.h> #include <fstream> #include <iostream> #include <string> using std::cerr; using namespace Uintah; CrackGeometry* CrackGeometryFactory::create(ProblemSpecP& ps) { ProblemSpecP child = ps->findBlock("crack"); if(!child) return scinew NullCrack(ps); for (ProblemSpecP crack_segment_ps = child->findBlock(); crack_segment_ps != nullptr; crack_segment_ps = crack_segment_ps->findNextBlock()) { std::string crack_type = crack_segment_ps->getNodeName(); if (crack_type == "quad") return scinew QuadCrack(crack_segment_ps); else if (crack_type == "curved_quad") return scinew CurvedQuadCrack(crack_segment_ps); else if (crack_type == "triangle") return scinew TriangularCrack(crack_segment_ps); else if (crack_type == "arc") return scinew ArcCrack(crack_segment_ps); else if (crack_type == "ellipse") return scinew EllipticCrack(crack_segment_ps); else if (crack_type == "partial_ellipse") return scinew PartialEllipticCrack(crack_segment_ps); else throw ProblemSetupException("Unknown Crack Segment Type R ("+crack_type+")", __FILE__, __LINE__); } return 0; }
ceekay1991/CallTraceForWeChat
CallTraceForWeChat/CallTraceForWeChat/WeChat_Headers/WCNoteInfo.h
<gh_stars>10-100 // // Generated by class-dump 3.5 (64 bit) (Debug version compiled Sep 17 2017 16:24:48). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by <NAME>. // #import <objc/NSObject.h> #import "NSCoding-Protocol.h" @class NSArray, NSString; @interface WCNoteInfo : NSObject <NSCoding> { int favLocalId; int editTime; int _fromScene; NSString *editUsr; NSArray *dataList; } @property(nonatomic) int fromScene; // @synthesize fromScene=_fromScene; @property(retain, nonatomic) NSArray *dataList; // @synthesize dataList; @property(retain, nonatomic) NSString *editUsr; // @synthesize editUsr; @property(nonatomic) int editTime; // @synthesize editTime; @property(nonatomic) int favLocalId; // @synthesize favLocalId; - (void).cxx_destruct; - (id)copyWithZone:(struct _NSZone *)arg1; - (id)initWithCoder:(id)arg1; - (void)encodeWithCoder:(id)arg1; - (id)init; @end
tgstyle/MCT-Basemod
src/main/java/mctmods/basemod/blocks/base/BlockBaseOreCracked.java
package mctmods.basemod.blocks.base; import mctmods.basemod.Basemod; import mctmods.basemod.library.util.CreativeTabBM; import net.minecraft.block.BlockFalling; import net.minecraft.block.SoundType; import net.minecraft.block.material.Material; public class BlockBaseOreCracked extends BlockFalling { public BlockBaseOreCracked(Material material,String name) { super(material); setRegistryName(Basemod.MODID, name); setUnlocalizedName(getRegistryName().toString()); setCreativeTab(CreativeTabBM.MCTBASEMOD_TAB); setHarvestLevel("shovel", 0); setSoundType(SoundType.GROUND); } }
DABOZE/Queen-Alexa
node_modules/mathjs/lib/esm/entry/dependenciesNumber/dependenciesEqual.generated.js
<reponame>DABOZE/Queen-Alexa /** * THIS FILE IS AUTO-GENERATED * DON'T MAKE CHANGES HERE */ import { equalScalarDependencies } from './dependenciesEqualScalar.generated.js'; import { typedDependencies } from './dependenciesTyped.generated.js'; import { createEqual } from '../../factoriesNumber.js'; export var equalDependencies = { equalScalarDependencies, typedDependencies, createEqual };
gitter-badger/scalacheck-faker
src/main/scala/faker/time/FutureOffsetDateTime.scala
<reponame>gitter-badger/scalacheck-faker<filename>src/main/scala/faker/time/FutureOffsetDateTime.scala package faker.time import java.time.{OffsetDateTime, ZoneId} import org.scalacheck.Arbitrary final case class FutureOffsetDateTime private (value: OffsetDateTime) extends AnyVal object FutureOffsetDateTime { implicit val futureOffsetDateTimeArbitrary: Arbitrary[FutureOffsetDateTime] = Arbitrary( Arbitrary .arbitrary[FutureInstant] .map(x => FutureOffsetDateTime( OffsetDateTime.ofInstant(x.value, ZoneId.systemDefault()) ) ) ) }
alimate/spring-loaded
testdata/src/main/java/proxy/TestIntfaceA2.java
<filename>testdata/src/main/java/proxy/TestIntfaceA2.java package proxy; public interface TestIntfaceA2 { void m(); void n(); }
noushadali/cordyscoelib
src/java/com/cordys/coe/util/cgc/nom/CordysNomGatewayClient.java
package com.cordys.coe.util.cgc.nom; import java.io.File; import java.util.HashMap; import java.util.Map; import org.apache.log4j.Logger; import com.cordys.coe.util.StringUtils; import com.cordys.coe.util.cgc.CordysGatewayClientBase; import com.cordys.coe.util.cgc.CordysGatewayClientException; import com.cordys.coe.util.cgc.CordysSOAPException; import com.cordys.coe.util.cgc.config.IAuthenticationConfiguration; import com.cordys.coe.util.cgc.config.ICGCConfiguration; import com.cordys.coe.util.cgc.message.CGCMessages; import com.cordys.coe.util.cgc.userinfo.IUserInfo; import com.cordys.coe.util.cgc.userinfo.UserInfoFactory; import com.cordys.coe.util.exceptions.XMLWrapperException; import com.cordys.coe.util.xml.NamespaceDefinitions; import com.cordys.coe.util.xml.nom.XPathHelper; import com.eibus.xml.nom.Document; import com.eibus.xml.nom.Node; import com.eibus.xml.nom.XMLException; import com.eibus.xml.xpath.XPathMetaInfo; /** * This class can be used to communicate with the Cordys Web Gateway. It supports 3 types of authentication: - Basic - NTLM - * Certificates. This class is thread safe. This means that multiple threads can use the same instance of this object to call * methods on the Cordys server. If you need to connect under multiple users you need to make an instance per user.<br> * Example code for NTLM: <code>String sUser = "pgussow"; String sPassword = "password"; String sServer = * "srv-nl-ces20"; String sDomain = "NTDOM"; int iPort = 80; ICordysGatewayClient cgc = new CordysGatewayClient(sUser, * sPassword, sServer, iPort, sDomain); cgc.connect();</code> */ public class CordysNomGatewayClient extends CordysGatewayClientBase implements ICordysNomGatewayClient { /** Holds the logger to use for this class. */ private static final Logger LOG = Logger.getLogger(CordysNomGatewayClient.class); /** A shared NOM document for all instances. */ private static final Document dNomDoc = new Document(); /** Holds the namespace prefix for the SOAP namespace. */ private static final String PRE_SOAP = NamespaceDefinitions.PREFIX_SOAP_1_1; /** Holds the user details. */ protected int m_xLogonInfo; /** Holds the namespace prefixes used in this class. */ private static XPathMetaInfo m_xmi = new XPathMetaInfo(); static { m_xmi.addNamespaceBinding("soap", NamespaceDefinitions.XMLNS_SOAP_1_1); m_xmi.addNamespaceBinding("ldap", "http://schemas.cordys.com/1.1/ldap"); } /** * Constructor. Creates the Cordys Gateway Client for a certificate. * * @param acAuthenticationDetails The authentication details. * @param ccConfiguration The configuration for the gateway. * @throws CordysGatewayClientException In case of any exceptions. */ public CordysNomGatewayClient(IAuthenticationConfiguration acAuthenticationDetails, ICGCConfiguration ccConfiguration) throws CordysGatewayClientException { super(acAuthenticationDetails, ccConfiguration); } /** * This method creates a SOAP message with the given name and namespace. * * @param xRequest The SOAP:Envelope to add it to. * @param sMethod The name of the method. * @param sNamespace The namespace of the method. * @return The NOM node of the method. To get the root element of the message call Node.getRoot() * @throws CordysGatewayClientException In case of any exceptions. */ public int addMethod(int xRequest, String sMethod, String sNamespace) throws CordysGatewayClientException { int xReturn = 0; // First find the SOAP:Body int xBody = XPathHelper.selectSingleNode(xRequest, "//soap:Body", m_xmi); if (xBody != 0) { xReturn = Node.createElement(sMethod, xBody); Node.setAttribute(xReturn, "xmlns", sNamespace); } return xReturn; } /** * This method creates a SOAP message with the given name and namespace. * * @param sMethodName The name of the method. * @param sNamespace The namespace of the method. * @return The NOM node of the method. To get the root element of the message call Node.getRoot() * @throws CordysGatewayClientException DOCUMENTME */ public int createMessage(String sMethodName, String sNamespace) throws CordysGatewayClientException { int xReturn = 0; int xEnvelope = 0; // Parse the base request. try { xEnvelope = parseXML(BASE_SOAP_REQUEST); // Create the method element. xReturn = dNomDoc.createElement(sMethodName); Node.setAttribute(xReturn, "xmlns", sNamespace); // Find the SOAP:Body and append the method node. int xNode = XPathHelper.selectSingleNode(xEnvelope, "//soap:Body", m_xmi); if (xNode == 0) { Node.delete(xReturn); Node.delete(xEnvelope); throw new CordysGatewayClientException(CGCMessages.CGC_ERROR_NO_BODY_FOUND); } Node.appendToChildren(xReturn, xNode); } catch (CordysGatewayClientException cgce) { throw cgce; } catch (Exception e) { throw new CordysGatewayClientException(e, CGCMessages.CGC_ERROR_CREATE_MESSAGE, sNamespace, sMethodName); } return xReturn; } /** * Disconnect from the cordys gateway. Free all cordys resources */ @Override public void disconnect() { super.disconnect(); if (m_xLogonInfo != 0) { Node.delete(m_xLogonInfo); m_xLogonInfo = 0; } } /** * This method returns the DN of the authenticated user. * * @return The DN of the authenticated user. */ public String getAuthUserDN() { int xNode = XPathHelper.selectSingleNode(m_xLogonInfo, "//ldap:tuple/ldap:old/ldap:user/ldap:authuserdn", m_xmi); if (xNode == 0) { return null; } return Node.getData(xNode); } /** * This method gets the Logger for this class. * * @return The Logger for this class. */ @Override public Logger getLogger() { return LOG; } /** * This method sends the request to Cordys. The response is put back into an XML structure. The tag returned is the NOM node * of the SOAP:Envelope. * * @param xRequest The request envelope NOM node. * @return The response NOM node. * @throws CordysGatewayClientException In case of any exception. * @throws CordysSOAPException In case of a SOAP fault. */ public int requestFromCordys(int xRequest) throws CordysGatewayClientException, CordysSOAPException { return requestFromCordys(xRequest, getConfiguration().getTimeout()); } /** * This method sends the request to Cordys. The response is put back into an XML structure. The tag returned is the NOM node * of the SOAP:Envelope. * * @param xRequest The request envelope NOM node. * @param lTimeout The timeout to use. * @return The response NOM node. * @throws CordysGatewayClientException In case of any exception. * @throws CordysSOAPException In case of a SOAP fault. */ public int requestFromCordys(int xRequest, long lTimeout) throws CordysGatewayClientException, CordysSOAPException { return requestFromCordys(xRequest, lTimeout, true, null); } /** * This method sends the request to Cordys and returns the response of the method. * * @param aInputSoapRequest The input request. * @param lTimeout The timeout to use. * @return The response of the request. * @throws CordysGatewayClientException In case of any exception. */ public String requestFromCordys(String aInputSoapRequest, long lTimeout) throws CordysGatewayClientException { return requestFromCordys(aInputSoapRequest, lTimeout, true, null, getGatewayURL(), m_sOrganization, null); } /** * This method sends the request to Cordys. The response is put back into an XML structure. The tag returned is the NOM node * of the SOAP:Envelope. * * @param xRequest The request envelope NOM node. * @param lTimeout The timeout to use. * @param sSoapAction SOAP action to be set in the request. * @return The response NOM node. * @throws CordysGatewayClientException In case of any exception. * @throws CordysSOAPException In case of a SOAP fault. */ public int requestFromCordys(int xRequest, long lTimeout, String sSoapAction) throws CordysGatewayClientException, CordysSOAPException { return requestFromCordys(xRequest, lTimeout, true, sSoapAction); } /** * This method sends the request to Cordys. The response is put back into an XML structure. The tag returned is the pointer to * the SOAP:Envelope. The resulting methods will ge a prefix 'res'. So if you want to use an XPath on the result use: * '//res:tuple' to get all the tuples. This method will not wait if the serverwatcher indicates the server is down. * * @param xRequest The request envelope NOM node. * @return The response NOM node. * @throws CordysGatewayClientException In case of any exception. * @throws CordysSOAPException In case of a SOAP fault. */ public int requestFromCordysNoBlocking(int xRequest) throws CordysGatewayClientException, CordysSOAPException { return requestFromCordysNoBlocking(xRequest, getConfiguration().getTimeout()); } /** * This method sends the request to Cordys. The response is put back into an XML structure. The tag returned is the pointer to * the SOAP:Envelope. The resulting methods will ge a prefix 'res'. So if you want to use an XPath on the result use: * '//res:tuple' to get all the tuples. This method will not wait if the serverwatcher indicates the server is down. * * @param xRequest The request envelope NOM node. * @param lTimeout The timeout to use. * @return The response NOM node. * @throws CordysGatewayClientException In case of any exception. * @throws CordysSOAPException In case of a SOAP fault. */ public int requestFromCordysNoBlocking(int xRequest, long lTimeout) throws CordysGatewayClientException, CordysSOAPException { return requestFromCordys(xRequest, lTimeout, false, null); } /** * This method is called when the HTTP response code is set to 500. All servers that are basic-profile compliant will return * error code 500 in case of a SOAP fault. The base structure is:<br> * If the response was not valid XML this method will do nothing and expect the calling method to throw an HTTPException. * * @param sHTTPResponse The response from the web server. * @param sRequestXML The request XML (used for filling the exception object with enough information). * @throws CordysSOAPException In case of a SOAP fault. * @see com.cordys.coe.util.cgc.CordysGatewayClientBase#checkForAndThrowCordysSOAPException(java.lang.String, * java.lang.String) */ @Override protected void checkForAndThrowCordysSOAPException(String sHTTPResponse, String sRequestXML) throws CordysSOAPException { int iNode = 0; try { iNode = dNomDoc.parseString(sHTTPResponse); } catch (Exception e) { LOG.error("Error parsing the XML", e); } if (iNode != 0) { // Figure out if we can find the SOAP:Fault structure. // Note: This code is for backwards compatibility. Because C3 will return code 500 // in case of a SOAP fault. int iSoapFault = XPathHelper.selectSingleNode(iNode, "//" + PRE_SOAP + ":Fault"); if (iSoapFault != 0) { if (getLogger().isDebugEnabled()) { getLogger().debug("Found a SOAP fault:\n" + Node.writeToString(iSoapFault, false)); } // Create the SoapException object. CordysSOAPException cse = NOMCordysSOAPException.parseSOAPFault(iSoapFault, sRequestXML); throw cse; } } } /** * Sends a login message to the gateway. * * @throws CordysGatewayClientException In case of any exception. */ @Override protected void sendLoginMessage() throws CordysGatewayClientException { // need to send a message to see if the configuration is OK if (getLogger().isDebugEnabled()) { getLogger().debug("Sending logon request to the Cordys server (" + getConfiguration().getHost() + ")"); } int xRequest = 0; int xResponse = 0; try { xRequest = parseXML(XML_GET_USER_DETAILS); xResponse = requestFromCordys(xRequest); int xLogonTmp; xLogonTmp = XPathHelper.selectSingleNode(m_xLogonInfo, "/soap:Envelope/soap:Body", m_xmi); if (xLogonTmp == 0) { throw new CordysGatewayClientException(CGCMessages.CGC_ERROR_NOM_XML, "<SOAP:Body>", Node.writeToString( xResponse, true), Node.writeToString(xRequest, true)); } xLogonTmp = Node.getFirstElement(xLogonTmp); if (m_xLogonInfo != 0) { Node.delete(m_xLogonInfo); } m_xLogonInfo = Node.unlink(xLogonTmp); int xTuple = XPathHelper.selectSingleNode(m_xLogonInfo, ".//ldap:tuple", m_xmi); if (xTuple == 0) { throw new CordysGatewayClientException(CGCMessages.CGC_ERROR_NOM_XML, "<tuple>", Node.writeToString(xResponse, true), Node.writeToString(xRequest, true)); } // If there is a server watcher make sure it know the cgc to use if (m_swWatcher != null) { // TODO: Implement this. // m_swWatcher.setCordysGatewayClient(this); } } catch (CordysGatewayClientException cgce) { throw cgce; } catch (Exception e) { throw new CordysGatewayClientException(CGCMessages.CGC_ERROR_LOGIN_FAILED); } finally { if (xRequest != 0) { Node.delete(xRequest); xRequest = 0; } if (xResponse != 0) { Node.delete(xResponse); xResponse = 0; } } } /** * @see com.cordys.coe.util.cgc.CordysGatewayClientBase#parseUserInfo() */ protected IUserInfo parseUserInfo() throws CordysGatewayClientException { if (m_xLogonInfo <= 0) { sendLoginMessage(); } int xTuple = XPathHelper.selectSingleNode(m_xLogonInfo, ".//ldap:tuple", m_xmi); if (xTuple == 0) { throw new CordysGatewayClientException(CGCMessages.CGC_ERROR_NOM_XML, "<tuple>", Node.writeToString(xTuple, true), Node.writeToString(m_xLogonInfo, true)); } return UserInfoFactory.createUserInfo(xTuple); } /** * Returns the SOAP namespace prefix from the SOAP:Envelope. The prefix is returned as "SOAP:" or if the prefix is empty, an * empty string is returned. * * @param iSoapEnvelopeNode SOAP envelope XML node. * @return The namespace prefix. */ private static String getSoapPrefix(int iSoapEnvelopeNode) { int iCount = Node.getNumAttributes(iSoapEnvelopeNode); for (int i = 0; i <= iCount; i++) { String sPrefix = Node.getAttributePrefix(iSoapEnvelopeNode, i); if ((sPrefix == null) || !sPrefix.equals("xmlns")) { continue; } String sName = Node.getAttributeName(iSoapEnvelopeNode, i); if (sName == null) { continue; } String sValue = Node.getAttribute(iSoapEnvelopeNode, sName); if (sValue.toLowerCase().startsWith("http://schemas.xmlsoap.org/")) { String sRes = Node.getAttributeLocalName(iSoapEnvelopeNode, i); return ((sRes != null) && (sRes.length() > 0)) ? (sRes + ":") : ""; } } // Return the default. return "SOAP:"; } /** * This method returns a new instance of the exception object. It parses the XML and initializes the object. * * @param xFault The actual soap fault NOM node. * @param xRequestEnvelope The root NOM node of the request that caused this fault. * @return A new exception object representing the exception. */ private static CordysSOAPException parseSOAPFault(int xFault, int xRequestEnvelope) { CordysSOAPException cseReturn = null; if (xFault == 0) { throw new IllegalArgumentException("The XML of the SOAP fault must be provided."); } String sSoapPrefix = getSoapPrefix(xFault); String sCode = null; String sString = null; String sDetailedMessage = null; // A SOAP fault has occurred, so we need to throw it. // Get the fault code. sCode = Node.getDataElement(xFault, sSoapPrefix + "faultcode", ""); sString = Node.getDataElement(xFault, sSoapPrefix + "faultstring", ""); sCode = Node.getDataElement(xFault, sSoapPrefix + "faultcode", ""); // Get the deailted message. int xDetail = 0; xDetail = XPathHelper.selectSingleNode(xFault, ".//soap:detail>", m_xmi); if (xDetail != 0) { StringBuffer sbDetail = new StringBuffer(""); // Write the details. int xExcChild = Node.getFirstChild(xDetail); while (xExcChild != 0) { sbDetail.append(Node.writeToString(xExcChild, true)); sbDetail.append("\n"); xExcChild = Node.getNextSibling(xExcChild); } sDetailedMessage = sbDetail.toString(); } // Write the original XML. String sExceptionXML = Node.writeToString(xFault, true); // Write the request XML to a string. String sRequestXML = null; if (xRequestEnvelope != 0) { sRequestXML = Node.writeToString(xRequestEnvelope, true); } // Create the exception object. cseReturn = new CordysSOAPException(sCode, sString, sDetailedMessage, sExceptionXML, sRequestXML); return cseReturn; } /** * This method returns a new document with the byte [] parsed. * * @param baXML the actual XML. * @return The document with the parsed XML. * @throws XMLWrapperException DOCUMENTME */ private int parseXML(byte[] baXML) throws XMLWrapperException { try { return dNomDoc.load(baXML); } catch (XMLException e) { throw new XMLWrapperException(e); } } /** * This method sends the request to Cordys. The response is put back into an XML structure. The tag returned is the NOM node * of the SOAP:Envelope. The resulting methods will ge a prefix 'res'. So if you want to use an XPath on the result use: * '//res:tuple' to get all the tuples. * * @param xRequest The request envelope NOM node. * @param lTimeout The timeout to use. * @param bBlockIfServerIsDown If this is true then the call will block indefinately untill the server is back online. * @param sSoapAction SOAP action to be set in the request. * @return The response NOM node. * @throws CordysGatewayClientException DOCUMENTME * @throws CordysSOAPException DOCUMENTME */ private int requestFromCordys(int xRequest, long lTimeout, boolean bBlockIfServerIsDown, String sSoapAction) throws CordysGatewayClientException, CordysSOAPException { int xReturn = 0; if (getLogger().isDebugEnabled()) { getLogger().debug("Request:\n" + Node.writeToString(xRequest, false)); } Map<String, String> mExtraHeaders = new HashMap<String, String>(); if (sSoapAction != null) { mExtraHeaders.put(SOAP_ACTION_HEADER, sSoapAction); } String responseContent = requestFromCordys(Node.writeToString(xRequest, false), lTimeout, bBlockIfServerIsDown, mExtraHeaders, getGatewayURL(), m_sOrganization, null); boolean bRequestOk = false; try { try { if (!StringUtils.isSet(responseContent)) { throw new CordysGatewayClientException(CGCMessages.CGC_ERROR_EMPTY_BODY); } xReturn = dNomDoc.parseString(responseContent); bRequestOk = true; } catch (Exception e) { throw new CordysGatewayClientException(e, CGCMessages.CGC_ERROR_PARSE_RESPONSE); } if (isCheckingForFaults()) { // Parse for SOAP faults. int xFault = XPathHelper.selectSingleNode(xReturn, "/soap:Envelope/soap:Body/soap:Fault"); if (xFault != 0) { throw parseSOAPFault(xFault, xRequest); } } } finally { if (!bRequestOk) { if (xReturn != 0) { Node.delete(xReturn); xReturn = 0; } } } return xReturn; } /** * @see com.cordys.coe.util.cgc.nom.ICordysNomGatewayClient#uploadFile(int, java.io.File) */ @Override public int uploadFile(int xRequest, File file) throws CordysGatewayClientException, CordysSOAPException { int xReturn = 0; if (getLogger().isDebugEnabled()) { getLogger().debug("Request:\n" + Node.writeToString(xRequest, false)); } String responseContent = uploadFile(Node.writeToString(xRequest, false), file, m_sOrganization, getConfiguration() .getTimeout(), null, true); boolean bRequestOk = false; try { try { if (!StringUtils.isSet(responseContent)) { throw new CordysGatewayClientException(CGCMessages.CGC_ERROR_EMPTY_BODY); } xReturn = dNomDoc.parseString(responseContent); bRequestOk = true; } catch (Exception e) { throw new CordysGatewayClientException(e, CGCMessages.CGC_ERROR_PARSE_RESPONSE); } if (isCheckingForFaults()) { // Parse for SOAP faults. int xFault = XPathHelper.selectSingleNode(xReturn, "/soap:Envelope/soap:Body/soap:Fault"); if (xFault != 0) { throw parseSOAPFault(xFault, xRequest); } } } finally { if (!bRequestOk) { if (xReturn != 0) { Node.delete(xReturn); xReturn = 0; } } } return xReturn; } }
ylil93/ydk-go
ydk/models/cisco_ios_xe/checkpoint_archive_oper/checkpoint_archive_oper.go
// This module contains a collection of YANG definitions for // monitoring the checkpoint archives in a Network Element. // Copyright (c) 2016-2017 by Cisco Systems, Inc. // All rights reserved. package checkpoint_archive_oper import ( "fmt" "github.com/CiscoDevNet/ydk-go/ydk" "github.com/CiscoDevNet/ydk-go/ydk/types" "github.com/CiscoDevNet/ydk-go/ydk/types/yfilter" "github.com/CiscoDevNet/ydk-go/ydk/models/cisco_ios_xe" "reflect" ) func init() { ydk.YLogDebug(fmt.Sprintf("Registering top level entities for package checkpoint_archive_oper")) ydk.RegisterEntity("{http://cisco.com/ns/yang/Cisco-IOS-XE-checkpoint-archive-oper checkpoint-archives}", reflect.TypeOf(CheckpointArchives{})) ydk.RegisterEntity("Cisco-IOS-XE-checkpoint-archive-oper:checkpoint-archives", reflect.TypeOf(CheckpointArchives{})) } // CheckpointArchives // Contents of the checkpoint archive information base type CheckpointArchives struct { parent types.Entity YFilter yfilter.YFilter // The maxium number of archives. The type is interface{} with range: 0..255. Max interface{} // The current number of archives. The type is interface{} with range: 0..255. Current interface{} // The most recent archive. The type is string. Recent interface{} // Archive information. Archives CheckpointArchives_Archives } func (checkpointArchives *CheckpointArchives) GetFilter() yfilter.YFilter { return checkpointArchives.YFilter } func (checkpointArchives *CheckpointArchives) SetFilter(yf yfilter.YFilter) { checkpointArchives.YFilter = yf } func (checkpointArchives *CheckpointArchives) GetGoName(yname string) string { if yname == "max" { return "Max" } if yname == "current" { return "Current" } if yname == "recent" { return "Recent" } if yname == "archives" { return "Archives" } return "" } func (checkpointArchives *CheckpointArchives) GetSegmentPath() string { return "Cisco-IOS-XE-checkpoint-archive-oper:checkpoint-archives" } func (checkpointArchives *CheckpointArchives) GetChildByName(childYangName string, segmentPath string) types.Entity { if childYangName == "archives" { return &checkpointArchives.Archives } return nil } func (checkpointArchives *CheckpointArchives) GetChildren() map[string]types.Entity { children := make(map[string]types.Entity) children["archives"] = &checkpointArchives.Archives return children } func (checkpointArchives *CheckpointArchives) GetLeafs() map[string]interface{} { leafs := make(map[string]interface{}) leafs["max"] = checkpointArchives.Max leafs["current"] = checkpointArchives.Current leafs["recent"] = checkpointArchives.Recent return leafs } func (checkpointArchives *CheckpointArchives) GetBundleName() string { return "cisco_ios_xe" } func (checkpointArchives *CheckpointArchives) GetYangName() string { return "checkpoint-archives" } func (checkpointArchives *CheckpointArchives) GetBundleYangModelsLocation() string { return cisco_ios_xe.GetModelsPath() } func (checkpointArchives *CheckpointArchives) GetCapabilitiesTable() map[string]string { return cisco_ios_xe.GetCapabilities() } func (checkpointArchives *CheckpointArchives) GetNamespaceTable() map[string]string { return cisco_ios_xe.GetNamespaces() } func (checkpointArchives *CheckpointArchives) SetParent(parent types.Entity) { checkpointArchives.parent = parent } func (checkpointArchives *CheckpointArchives) GetParent() types.Entity { return checkpointArchives.parent } func (checkpointArchives *CheckpointArchives) GetParentYangName() string { return "Cisco-IOS-XE-checkpoint-archive-oper" } // CheckpointArchives_Archives // Archive information type CheckpointArchives_Archives struct { parent types.Entity YFilter yfilter.YFilter // List of archives. The type is slice of CheckpointArchives_Archives_Archive. Archive []CheckpointArchives_Archives_Archive } func (archives *CheckpointArchives_Archives) GetFilter() yfilter.YFilter { return archives.YFilter } func (archives *CheckpointArchives_Archives) SetFilter(yf yfilter.YFilter) { archives.YFilter = yf } func (archives *CheckpointArchives_Archives) GetGoName(yname string) string { if yname == "archive" { return "Archive" } return "" } func (archives *CheckpointArchives_Archives) GetSegmentPath() string { return "archives" } func (archives *CheckpointArchives_Archives) GetChildByName(childYangName string, segmentPath string) types.Entity { if childYangName == "archive" { for _, c := range archives.Archive { if archives.GetSegmentPath() == segmentPath { return &c } } child := CheckpointArchives_Archives_Archive{} archives.Archive = append(archives.Archive, child) return &archives.Archive[len(archives.Archive)-1] } return nil } func (archives *CheckpointArchives_Archives) GetChildren() map[string]types.Entity { children := make(map[string]types.Entity) for i := range archives.Archive { children[archives.Archive[i].GetSegmentPath()] = &archives.Archive[i] } return children } func (archives *CheckpointArchives_Archives) GetLeafs() map[string]interface{} { leafs := make(map[string]interface{}) return leafs } func (archives *CheckpointArchives_Archives) GetBundleName() string { return "cisco_ios_xe" } func (archives *CheckpointArchives_Archives) GetYangName() string { return "archives" } func (archives *CheckpointArchives_Archives) GetBundleYangModelsLocation() string { return cisco_ios_xe.GetModelsPath() } func (archives *CheckpointArchives_Archives) GetCapabilitiesTable() map[string]string { return cisco_ios_xe.GetCapabilities() } func (archives *CheckpointArchives_Archives) GetNamespaceTable() map[string]string { return cisco_ios_xe.GetNamespaces() } func (archives *CheckpointArchives_Archives) SetParent(parent types.Entity) { archives.parent = parent } func (archives *CheckpointArchives_Archives) GetParent() types.Entity { return archives.parent } func (archives *CheckpointArchives_Archives) GetParentYangName() string { return "checkpoint-archives" } // CheckpointArchives_Archives_Archive // List of archives type CheckpointArchives_Archives_Archive struct { parent types.Entity YFilter yfilter.YFilter // This attribute is a key. The archive number. The type is interface{} with // range: 0..65535. Number interface{} // The name of the archive. The type is string. Name interface{} } func (archive *CheckpointArchives_Archives_Archive) GetFilter() yfilter.YFilter { return archive.YFilter } func (archive *CheckpointArchives_Archives_Archive) SetFilter(yf yfilter.YFilter) { archive.YFilter = yf } func (archive *CheckpointArchives_Archives_Archive) GetGoName(yname string) string { if yname == "number" { return "Number" } if yname == "name" { return "Name" } return "" } func (archive *CheckpointArchives_Archives_Archive) GetSegmentPath() string { return "archive" + "[number='" + fmt.Sprintf("%v", archive.Number) + "']" } func (archive *CheckpointArchives_Archives_Archive) GetChildByName(childYangName string, segmentPath string) types.Entity { return nil } func (archive *CheckpointArchives_Archives_Archive) GetChildren() map[string]types.Entity { children := make(map[string]types.Entity) return children } func (archive *CheckpointArchives_Archives_Archive) GetLeafs() map[string]interface{} { leafs := make(map[string]interface{}) leafs["number"] = archive.Number leafs["name"] = archive.Name return leafs } func (archive *CheckpointArchives_Archives_Archive) GetBundleName() string { return "cisco_ios_xe" } func (archive *CheckpointArchives_Archives_Archive) GetYangName() string { return "archive" } func (archive *CheckpointArchives_Archives_Archive) GetBundleYangModelsLocation() string { return cisco_ios_xe.GetModelsPath() } func (archive *CheckpointArchives_Archives_Archive) GetCapabilitiesTable() map[string]string { return cisco_ios_xe.GetCapabilities() } func (archive *CheckpointArchives_Archives_Archive) GetNamespaceTable() map[string]string { return cisco_ios_xe.GetNamespaces() } func (archive *CheckpointArchives_Archives_Archive) SetParent(parent types.Entity) { archive.parent = parent } func (archive *CheckpointArchives_Archives_Archive) GetParent() types.Entity { return archive.parent } func (archive *CheckpointArchives_Archives_Archive) GetParentYangName() string { return "archives" }
hemantbits/ComputeLibrary
documentation/search/files_16.js
var searchData= [ ['wallclocktimer_2ecpp',['WallClockTimer.cpp',['../_wall_clock_timer_8cpp.xhtml',1,'']]], ['wallclocktimer_2eh',['WallClockTimer.h',['../_wall_clock_timer_8h.xhtml',1,'']]], ['warp_5faffine_2ecl',['warp_affine.cl',['../warp__affine_8cl.xhtml',1,'']]], ['warp_5fhelpers_2eh',['warp_helpers.h',['../warp__helpers_8h.xhtml',1,'']]], ['warp_5fperspective_2ecl',['warp_perspective.cl',['../warp__perspective_8cl.xhtml',1,'']]], ['warpaffine_2ecpp',['WarpAffine.cpp',['../validation_2reference_2_warp_affine_8cpp.xhtml',1,'']]], ['warpaffine_2ecpp',['WarpAffine.cpp',['../benchmark_2_c_l_2_warp_affine_8cpp.xhtml',1,'']]], ['warpaffine_2ecpp',['WarpAffine.cpp',['../benchmark_2_n_e_o_n_2_warp_affine_8cpp.xhtml',1,'']]], ['warpaffine_2ecpp',['WarpAffine.cpp',['../validation_2_c_l_2_warp_affine_8cpp.xhtml',1,'']]], ['warpaffine_2ecpp',['WarpAffine.cpp',['../validation_2_n_e_o_n_2_warp_affine_8cpp.xhtml',1,'']]], ['warpaffine_2eh',['WarpAffine.h',['../_warp_affine_8h.xhtml',1,'']]], ['warpperspective_2ecpp',['WarpPerspective.cpp',['../_c_l_2_warp_perspective_8cpp.xhtml',1,'']]], ['warpperspective_2ecpp',['WarpPerspective.cpp',['../_n_e_o_n_2_warp_perspective_8cpp.xhtml',1,'']]], ['warpperspective_2ecpp',['WarpPerspective.cpp',['../reference_2_warp_perspective_8cpp.xhtml',1,'']]], ['warpperspective_2eh',['WarpPerspective.h',['../_warp_perspective_8h.xhtml',1,'']]], ['weightsreshape_2ecpp',['WeightsReshape.cpp',['../_c_l_2_weights_reshape_8cpp.xhtml',1,'']]], ['weightsreshape_2ecpp',['WeightsReshape.cpp',['../reference_2_weights_reshape_8cpp.xhtml',1,'']]], ['weightsreshape_2eh',['WeightsReshape.h',['../_weights_reshape_8h.xhtml',1,'']]], ['weightsretention_2ecpp',['WeightsRetention.cpp',['../_weights_retention_8cpp.xhtml',1,'']]], ['widthconcatenatelayer_2ecpp',['WidthConcatenateLayer.cpp',['../reference_2_width_concatenate_layer_8cpp.xhtml',1,'']]], ['widthconcatenatelayer_2ecpp',['WidthConcatenateLayer.cpp',['../_c_l_2_width_concatenate_layer_8cpp.xhtml',1,'']]], ['widthconcatenatelayer_2ecpp',['WidthConcatenateLayer.cpp',['../_n_e_o_n_2_width_concatenate_layer_8cpp.xhtml',1,'']]], ['widthconcatenatelayer_2eh',['WidthConcatenateLayer.h',['../_width_concatenate_layer_8h.xhtml',1,'']]], ['window_2eh',['Window.h',['../_window_8h.xhtml',1,'']]], ['window_2einl',['Window.inl',['../_window_8inl.xhtml',1,'']]], ['windowiterator_2ecpp',['WindowIterator.cpp',['../_window_iterator_8cpp.xhtml',1,'']]], ['windowiterator_2eh',['WindowIterator.h',['../_window_iterator_8h.xhtml',1,'']]], ['winograd_2ecpp',['Winograd.cpp',['../_c_l_2_winograd_8cpp.xhtml',1,'']]], ['winograd_2ecpp',['Winograd.cpp',['../reference_2_winograd_8cpp.xhtml',1,'']]], ['winograd_2eh',['Winograd.h',['../_winograd_8h.xhtml',1,'']]], ['winograd_5ffilter_5ftransform_2ecl',['winograd_filter_transform.cl',['../winograd__filter__transform_8cl.xhtml',1,'']]], ['winograd_5finput_5ftransform_2ecl',['winograd_input_transform.cl',['../winograd__input__transform_8cl.xhtml',1,'']]], ['winograd_5foutput_5ftransform_2ecl',['winograd_output_transform.cl',['../winograd__output__transform_8cl.xhtml',1,'']]], ['workload_2eh',['Workload.h',['../_workload_8h.xhtml',1,'']]], ['wrapper_2eh',['wrapper.h',['../wrapper_8h.xhtml',1,'']]] ];
captain-pool/optuna
optuna/trial/_base.py
import abc import datetime from typing import Any from typing import Dict from typing import Optional from typing import Sequence from optuna._deprecated import deprecated from optuna.distributions import BaseDistribution from optuna.distributions import CategoricalChoiceType class BaseTrial(object, metaclass=abc.ABCMeta): """Base class for trials. Note that this class is not supposed to be directly accessed by library users. """ @abc.abstractmethod def suggest_float( self, name: str, low: float, high: float, *, step: Optional[float] = None, log: bool = False, ) -> float: raise NotImplementedError @deprecated("3.0.0", "6.0.0") @abc.abstractmethod def suggest_uniform(self, name: str, low: float, high: float) -> float: raise NotImplementedError @deprecated("3.0.0", "6.0.0") @abc.abstractmethod def suggest_loguniform(self, name: str, low: float, high: float) -> float: raise NotImplementedError @deprecated("3.0.0", "6.0.0") @abc.abstractmethod def suggest_discrete_uniform(self, name: str, low: float, high: float, q: float) -> float: raise NotImplementedError @abc.abstractmethod def suggest_int(self, name: str, low: int, high: int, step: int = 1, log: bool = False) -> int: raise NotImplementedError @abc.abstractmethod def suggest_categorical( self, name: str, choices: Sequence[CategoricalChoiceType] ) -> CategoricalChoiceType: raise NotImplementedError @abc.abstractmethod def report(self, value: float, step: int) -> None: raise NotImplementedError @abc.abstractmethod def should_prune(self) -> bool: raise NotImplementedError @abc.abstractmethod def set_user_attr(self, key: str, value: Any) -> None: raise NotImplementedError @abc.abstractmethod def set_system_attr(self, key: str, value: Any) -> None: raise NotImplementedError @property @abc.abstractmethod def params(self) -> Dict[str, Any]: raise NotImplementedError @property @abc.abstractmethod def distributions(self) -> Dict[str, BaseDistribution]: raise NotImplementedError @property @abc.abstractmethod def user_attrs(self) -> Dict[str, Any]: raise NotImplementedError @property @abc.abstractmethod def system_attrs(self) -> Dict[str, Any]: raise NotImplementedError @property @abc.abstractmethod def datetime_start(self) -> Optional[datetime.datetime]: raise NotImplementedError @property def number(self) -> int: raise NotImplementedError
filipecn/maldives
bin/wallet_sim.py
<reponame>filipecn/maldives<filename>bin/wallet_sim.py from datetime import datetime import sys import os import logging sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from maldives.bot.brokers.yq_broker import YQBroker from maldives.bot.models.wallet import Wallet logging.basicConfig(format="%(asctime)s: %(message)s", level=logging.INFO, datefmt="%H:%M:%S") dealer = YQBroker() wallet = Wallet() wallet.append('petr4', 50) wallet.append('vale3', 50) results = wallet.backtrace(dealer, datetime(year=2021, day=1, month=1)) print(results)
arie-neural-alpha/trase
frontend/scripts/react-components/legacy-explore/legacy-explore.register.js
import reducerRegistry from 'reducer-registry'; import reducer from './explore.reducer'; import * as legacyExploreActions from './explore.actions'; reducerRegistry.register('legacyExplore', reducer); export { legacyExploreActions };
huangxiaoyu/Elephant
app/src/main/java/com/jun/elephant/common/BaseFuncIml.java
<filename>app/src/main/java/com/jun/elephant/common/BaseFuncIml.java<gh_stars>1000+ /* * Copyright 2016 Freelander * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.jun.elephant.common; /** * Created by Jun on 2016/10/17. */ public interface BaseFuncIml { /** 初始化数据方法 */ void initData(); /** 初始化UI控件方法 */ void initView(); /** 初始化事件监听方法 */ void initListener(); /** 初始化界面加载 */ void initLoad(); }
cgarciae/catalyst
catalyst/contrib/models/cv/classification/__init__.py
# flake8: noqa from .mobilenetv2 import InvertedResidual, MobileNetV2 from .mobilenetv3 import MobileNetV3, MobileNetV3Large, MobileNetV3Small __all__ = [ "InvertedResidual", "MobileNetV2", "MobileNetV3", "MobileNetV3Small", "MobileNetV3Large", ]
bhrugesh96/woocommerce
plugins/woocommerce-admin/client/analytics/components/report-chart/index.js
/** * External dependencies */ import { __ } from '@wordpress/i18n'; import { Component } from '@wordpress/element'; import { compose } from '@wordpress/compose'; import { format as formatDate } from '@wordpress/date'; import { withSelect } from '@wordpress/data'; import { get, isEqual } from 'lodash'; import PropTypes from 'prop-types'; import { Chart } from '@woocommerce/components'; import { getReportChartData, getTooltipValueFormat, SETTINGS_STORE_NAME, REPORTS_STORE_NAME, } from '@woocommerce/data'; import { getAllowedIntervalsForQuery, getCurrentDates, getDateFormatsForInterval, getIntervalForQuery, getChartTypeForQuery, getPreviousDate, } from '@woocommerce/date'; /** * Internal dependencies */ import { CurrencyContext } from '../../../lib/currency-context'; import ReportError from '../report-error'; import { getChartMode, getSelectedFilter, createDateFormatter } from './utils'; /** * Component that renders the chart in reports. */ export class ReportChart extends Component { shouldComponentUpdate( nextProps ) { if ( nextProps.isRequesting !== this.props.isRequesting || nextProps.primaryData.isRequesting !== this.props.primaryData.isRequesting || nextProps.secondaryData.isRequesting !== this.props.secondaryData.isRequesting || ! isEqual( nextProps.query, this.props.query ) ) { return true; } return false; } getItemChartData() { const { primaryData, selectedChart } = this.props; const chartData = primaryData.data.intervals.map( function ( interval ) { const intervalData = {}; interval.subtotals.segments.forEach( function ( segment ) { if ( segment.segment_label ) { const label = intervalData[ segment.segment_label ] ? segment.segment_label + ' (#' + segment.segment_id + ')' : segment.segment_label; intervalData[ segment.segment_id ] = { label, value: segment.subtotals[ selectedChart.key ] || 0, }; } } ); return { date: formatDate( 'Y-m-d\\TH:i:s', interval.date_start ), ...intervalData, }; } ); return chartData; } getTimeChartData() { const { query, primaryData, secondaryData, selectedChart, defaultDateRange, } = this.props; const currentInterval = getIntervalForQuery( query, defaultDateRange ); const { primary, secondary } = getCurrentDates( query, defaultDateRange ); const chartData = primaryData.data.intervals.map( function ( interval, index ) { const secondaryDate = getPreviousDate( interval.date_start, primary.after, secondary.after, query.compare, currentInterval ); const secondaryInterval = secondaryData.data.intervals[ index ]; return { date: formatDate( 'Y-m-d\\TH:i:s', interval.date_start ), primary: { label: `${ primary.label } (${ primary.range })`, labelDate: interval.date_start, value: interval.subtotals[ selectedChart.key ] || 0, }, secondary: { label: `${ secondary.label } (${ secondary.range })`, labelDate: secondaryDate.format( 'YYYY-MM-DD HH:mm:ss' ), value: ( secondaryInterval && secondaryInterval.subtotals[ selectedChart.key ] ) || 0, }, }; } ); return chartData; } getTimeChartTotals() { const { primaryData, secondaryData, selectedChart } = this.props; return { primary: get( primaryData, [ 'data', 'totals', selectedChart.key ], null ), secondary: get( secondaryData, [ 'data', 'totals', selectedChart.key ], null ), }; } renderChart( mode, isRequesting, chartData, legendTotals ) { const { emptySearchResults, filterParam, interactiveLegend, itemsLabel, legendPosition, path, query, selectedChart, showHeaderControls, primaryData, defaultDateRange, } = this.props; const currentInterval = getIntervalForQuery( query, defaultDateRange ); const allowedIntervals = getAllowedIntervalsForQuery( query, defaultDateRange ); const formats = getDateFormatsForInterval( currentInterval, primaryData.data.intervals.length, { type: 'php' } ); const emptyMessage = emptySearchResults ? __( 'No data for the current search', 'woocommerce' ) : __( 'No data for the selected date range', 'woocommerce' ); const { formatAmount, getCurrencyConfig } = this.context; return ( <Chart allowedIntervals={ allowedIntervals } data={ chartData } dateParser={ '%Y-%m-%dT%H:%M:%S' } emptyMessage={ emptyMessage } filterParam={ filterParam } interactiveLegend={ interactiveLegend } interval={ currentInterval } isRequesting={ isRequesting } itemsLabel={ itemsLabel } legendPosition={ legendPosition } legendTotals={ legendTotals } mode={ mode } path={ path } query={ query } screenReaderFormat={ createDateFormatter( formats.screenReaderFormat ) } showHeaderControls={ showHeaderControls } title={ selectedChart.label } tooltipLabelFormat={ createDateFormatter( formats.tooltipLabelFormat ) } tooltipTitle={ ( mode === 'time-comparison' && selectedChart.label ) || null } tooltipValueFormat={ getTooltipValueFormat( selectedChart.type, formatAmount ) } chartType={ getChartTypeForQuery( query ) } valueType={ selectedChart.type } xFormat={ createDateFormatter( formats.xFormat ) } x2Format={ createDateFormatter( formats.x2Format ) } currency={ getCurrencyConfig() } /> ); } renderItemComparison() { const { isRequesting, primaryData } = this.props; if ( primaryData.isError ) { return <ReportError />; } const isChartRequesting = isRequesting || primaryData.isRequesting; const chartData = this.getItemChartData(); return this.renderChart( 'item-comparison', isChartRequesting, chartData ); } renderTimeComparison() { const { isRequesting, primaryData, secondaryData } = this.props; if ( ! primaryData || primaryData.isError || secondaryData.isError ) { return <ReportError />; } const isChartRequesting = isRequesting || primaryData.isRequesting || secondaryData.isRequesting; const chartData = this.getTimeChartData(); const legendTotals = this.getTimeChartTotals(); return this.renderChart( 'time-comparison', isChartRequesting, chartData, legendTotals ); } render() { const { mode } = this.props; if ( mode === 'item-comparison' ) { return this.renderItemComparison(); } return this.renderTimeComparison(); } } ReportChart.contextType = CurrencyContext; ReportChart.propTypes = { /** * Filters available for that report. */ filters: PropTypes.array, /** * Whether there is an API call running. */ isRequesting: PropTypes.bool, /** * Label describing the legend items. */ itemsLabel: PropTypes.string, /** * Allows specifying properties different from the `endpoint` that will be used * to limit the items when there is an active search. */ limitProperties: PropTypes.array, /** * `items-comparison` (default) or `time-comparison`, this is used to generate correct * ARIA properties. */ mode: PropTypes.string, /** * Current path */ path: PropTypes.string.isRequired, /** * Primary data to display in the chart. */ primaryData: PropTypes.object, /** * The query string represented in object form. */ query: PropTypes.object.isRequired, /** * Secondary data to display in the chart. */ secondaryData: PropTypes.object, /** * Properties of the selected chart. */ selectedChart: PropTypes.shape( { /** * Key of the selected chart. */ key: PropTypes.string.isRequired, /** * Chart label. */ label: PropTypes.string.isRequired, /** * Order query argument. */ order: PropTypes.oneOf( [ 'asc', 'desc' ] ), /** * Order by query argument. */ orderby: PropTypes.string, /** * Number type for formatting. */ type: PropTypes.oneOf( [ 'average', 'number', 'currency' ] ).isRequired, } ).isRequired, }; ReportChart.defaultProps = { isRequesting: false, primaryData: { data: { intervals: [], }, isError: false, isRequesting: false, }, secondaryData: { data: { intervals: [], }, isError: false, isRequesting: false, }, }; export default compose( withSelect( ( select, props ) => { const { charts, endpoint, filters, isRequesting, limitProperties, query, advancedFilters, } = props; const limitBy = limitProperties || [ endpoint ]; const selectedFilter = getSelectedFilter( filters, query ); const filterParam = get( selectedFilter, [ 'settings', 'param' ] ); const chartMode = props.mode || getChartMode( selectedFilter, query ) || 'time-comparison'; const { woocommerce_default_date_range: defaultDateRange } = select( SETTINGS_STORE_NAME ).getSetting( 'wc_admin', 'wcAdminSettings' ); /* eslint @wordpress/no-unused-vars-before-return: "off" */ const reportStoreSelector = select( REPORTS_STORE_NAME ); const newProps = { mode: chartMode, filterParam, defaultDateRange, }; if ( isRequesting ) { return newProps; } const hasLimitByParam = limitBy.some( ( item ) => query[ item ] && query[ item ].length ); if ( query.search && ! hasLimitByParam ) { return { ...newProps, emptySearchResults: true, }; } const fields = charts && charts.map( ( chart ) => chart.key ); const primaryData = getReportChartData( { endpoint, dataType: 'primary', query, selector: reportStoreSelector, limitBy, filters, advancedFilters, defaultDateRange, fields, } ); if ( chartMode === 'item-comparison' ) { return { ...newProps, primaryData, }; } const secondaryData = getReportChartData( { endpoint, dataType: 'secondary', query, selector: reportStoreSelector, limitBy, filters, advancedFilters, defaultDateRange, fields, } ); return { ...newProps, primaryData, secondaryData, }; } ) )( ReportChart );
emanuellucas2/OVPsimProject
ImperasLib/source/nxp.ovpworld.org/peripheral/iMX6_WDOG/1.0/pse/imx6_wdog_user.c
<reponame>emanuellucas2/OVPsimProject<gh_stars>0 /* * Copyright (c) 2005-2021 Imperas Software Ltd., www.imperas.com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. * */ //////////////////////////////////////////////////////////////////////////////// // // W R I T T E N B Y I M P E R A S I G E N // // Imperas/OVP Generated // Fri Jan 6 11:38:59 2012 // //////////////////////////////////////////////////////////////////////////////// #define PREFIX "IMX6_WDOG" #include "pse.igen.h" #define BPORT1 0 #include "pse.macros.igen.h" static bhmEventHandle watchdogInterrupt, watchdogFailure, powerDownFailure; static bhmThreadHandle watchdogThread, powerDownThread; Uns32 lockSeq = 0; Uns32 locks[2] = {0x5555, 0xaaaa}; // The timout to perform double timeoutInterrupt = 0; double timeoutWatchdog = 0.5; #define TOUSEC 1000000 //////////////////////////////// //////////////////////////////// void updateInterrupt() { if(bport1_ab_data.WDOG_WICR.bits.WIE && bport1_ab_data.WDOG_WICR.bits.WTIS) { // assert WDOG ppmWriteNet(handles.WDOG, 1); } else { // clear ppmWriteNet(handles.WDOG, 0); } } void updateStatus(Bool fail) { // update status register and interrupt // fail = 1 when watchdog expires // fail = 0 when reached interupt if (fail && bport1_ab_data.WDOG_WCR.bits.WDT) { // clear other bits bport1_ab_data.WDOG_WRSR.value &= ~(BPORT1_AB_WDOG_WRSR_POR | BPORT1_AB_WDOG_WRSR_SFTW); // reset caused by Timeout bport1_ab_data.WDOG_WRSR.bits.TOUT = 1; bport1_ab_data.WDOG_WICR.bits.WTIS = 1; updateInterrupt(); } } // // WDOG Timer // void watchdogTimoutThread(void *user) { double toWatchdogTimeout = timeoutWatchdog; // If timeout interrupt setup if(bport1_ab_data.WDOG_WICR.bits.WIE && timeoutInterrupt){ if(PSE_DIAG_HIGH) { bhmMessage("I", PREFIX "_WT", "Start Watchdog Interrupt %3.1f Seconds", timeoutInterrupt); } bhmWaitDelay(timeoutInterrupt*TOUSEC); if(PSE_DIAG_HIGH) { bhmMessage("I", PREFIX "_WT", "Watchdog Interrupt at %.2f Seconds", bhmGetCurrentTime()/TOUSEC); } bhmTriggerEvent(watchdogInterrupt); updateStatus(0); // calculate remaining timeout to watchdog toWatchdogTimeout -= timeoutInterrupt; } if(PSE_DIAG_HIGH) { bhmMessage("I", PREFIX "_WT", "Start Watchdog %3.1f Seconds", toWatchdogTimeout); } bhmWaitDelay(toWatchdogTimeout*TOUSEC); if(PSE_DIAG_HIGH) { bhmMessage("I", PREFIX "_WT", "Watchdog at %.2f Seconds", bhmGetCurrentTime()/TOUSEC); } bhmTriggerEvent(watchdogFailure); updateStatus(1); watchdogThread = 0; } // // Power Down Timer // void powerDownTimeoutThread(void *user) { if(PSE_DIAG_HIGH) { bhmMessage("I", PREFIX "_PDT", "Start Power Down Timer %3.1f Seconds", (double)(16)); } // start a 16 second timeout bhmWaitDelay(16*TOUSEC); if(PSE_DIAG_MEDIUM) { bhmMessage("I", PREFIX "_PD", "Power Down Timer Failure at %.2f Seconds", bhmGetCurrentTime()/TOUSEC); } bhmTriggerEvent(powerDownFailure); updateStatus(1); powerDownThread = 0; } void updateTimeout() { // update timout value timeoutWatchdog = 0.5 * (bport1_ab_data.WDOG_WCR.bits.WT + 1 ); if (bport1_ab_data.WDOG_WICR.bits.WIE) { // interrupt generation enabled before timeout timeoutInterrupt = timeoutWatchdog - (0.5*bport1_ab_data.WDOG_WICR.bits.WICT); } else { timeoutInterrupt = 0; } if(PSE_DIAG_HIGH) { bhmMessage("I", PREFIX "_TU", "Timeout updated. " "To Interrupt %3.1f Seconds. " "To Watchdog %3.1f Seconds", timeoutInterrupt, timeoutWatchdog); } // has been enabled if(watchdogThread) { bhmDeleteThread(watchdogThread); } watchdogThread = bhmCreateThread(watchdogTimoutThread, NULL, "Watchdog Timeout", 0); } //////////////////////////////// Callback stubs //////////////////////////////// PPM_WRITE_CB(regNoDefinitionRead) { Uns32 offset = addr - handles.bport1; bhmMessage("W", PREFIX, "No Write Register Defined at offset 0x%04x", offset); } PPM_READ_CB(regNoDefinitionWrite) { Uns32 offset = addr - handles.bport1; bhmMessage("W", PREFIX, "No Read Register Defined at offset 0x%04x", offset); return 0; } PPM_REG_WRITE_CB(WriteWDOG_WCR) { *(Uns16*)user = data | 0x8f; // mask write once bits (cannot be cleared until reset) if(PSE_DIAG_HIGH) { bhmMessage("I", PREFIX "_WR", "%s: 0x%04x", __FUNCTION__, *(Uns16*)user); } // assert watchdog signal ppmWriteNet(handles.WDOG, bport1_ab_data.WDOG_WCR.bits.WDA ? 0 : 1); if(!bport1_ab_data.WDOG_WCR.bits.SRS) { if(PSE_DIAG_HIGH) { bhmMessage("I", PREFIX "_SR", "Software Reset"); } // clear other bits bport1_ab_data.WDOG_WRSR.value &= ~(BPORT1_AB_WDOG_WRSR_POR | BPORT1_AB_WDOG_WRSR_TOUT); // indicate software reset bport1_ab_data.WDOG_WRSR.bits.SFTW = 1; // generate reset ppmWriteNet(handles.wdog_rst, 1); ppmWriteNet(handles.wdog_rst, 0); } if(bport1_ab_data.WDOG_WCR.bits.WDE && !(data & BPORT1_AB_WDOG_WCR_WDE)){ updateTimeout(); } else if(!bport1_ab_data.WDOG_WCR.bits.WDE && (data & BPORT1_AB_WDOG_WCR_WDE)){ // has been disabled // TODO: When enabled start again from start or continue? if(watchdogThread) { bhmDeleteThread(watchdogThread); } } } PPM_REG_WRITE_CB(WriteWDOG_WICR) { *(Uns16*)user = (*(Uns16*)user & 0x7f00) | (data & 0x80ff); if(PSE_DIAG_HIGH) { bhmMessage("I", PREFIX "_WR", "%s: 0x%04x", __FUNCTION__, *(Uns16*)user); } if(bport1_ab_data.WDOG_WICR.bits.WIE && !(data & BPORT1_AB_WDOG_WICR_WIE)){ // interrupt enabled updateTimeout(); } if(data & BPORT1_AB_WDOG_WICR_WTIS) { // clear interrupt bport1_ab_data.WDOG_WICR.bits.WTIS = 0; updateInterrupt(); } } PPM_REG_WRITE_CB(WriteWDOG_WMCR) { // If PDE is set allow software to clear if(bport1_ab_data.WDOG_WMCR.bits.PDE) { *(Uns16*)user = (*(Uns16*)user & 0xfffe) | (data & 0x1); } if(PSE_DIAG_HIGH) { bhmMessage("I", PREFIX "_WR", "%s: 0x%04x", __FUNCTION__, *(Uns16*)user); } if(!bport1_ab_data.WDOG_WMCR.bits.PDE && powerDownThread) { // disable power down counter bhmDeleteThread(powerDownThread); powerDownThread=0; } } PPM_REG_WRITE_CB(WriteWDOG_WSR) { // YOUR CODE HERE (WriteWDOG_WSR) *(Uns16*)user = data; if(PSE_DIAG_HIGH) { bhmMessage("I", PREFIX "_WR", "%s: 0x%04x", __FUNCTION__, *(Uns16*)user); } if (bport1_ab_data.WDOG_WCR.bits.WDE) { // when enabled must go through sequence to update timer if (data == locks[lockSeq]) { lockSeq++; if(lockSeq >= 2) { // correct sequence complete updateTimeout(); lockSeq = 0; } } else { if(PSE_DIAG_LOW) { bhmMessage("W", PREFIX "_LS", "%s: lock 0x%04x expected 0x%04x", __FUNCTION__, *(Uns16*)user, locks[lockSeq]); } // check if it was the first in sequence if (data == locks[0]) { lockSeq = 1; } else { lockSeq = 0; } } } else { // when NOT enabled update timer updateTimeout(); } } //////////////////////////////// //////////////////////////////// PPM_CONSTRUCTOR_CB(constructor) { periphConstructor(); // indicate power on reset bport1_ab_data.WDOG_WRSR.bits.POR = 1; watchdogFailure = bhmCreateNamedEvent("watchdogFault", "Watchdog Timeout"); powerDownFailure = bhmCreateNamedEvent("powerDownFault", "Power Down Timeout"); powerDownThread = bhmCreateThread(powerDownTimeoutThread, NULL, "Power Down Timeout", 0); } PPM_DESTRUCTOR_CB(destructor) { } PPM_SAVE_STATE_FN(peripheralSaveState) { } PPM_RESTORE_STATE_FN(peripheralRestoreState) { }
emarc99/SLib
src/slib/crypto/block_cipher.cpp
/* * Copyright (c) 2008-2018 SLIBIO <https://github.com/SLIBIO> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "slib/crypto/block_cipher.h" namespace slib { /* BlockCipherPadding_PKCS7 Defines padding method described in PKCS#5, PKCS#7. Padding is added at the end of the message as following. 01 02 02 03 03 03 04 04 04 04 05 05 05 05 05 only applicable up to 256-bytes */ void BlockCipherPadding_PKCS7::addPadding(void* buf, sl_size padding) { sl_uint8* c = (sl_uint8*)buf; sl_uint8 n = (sl_uint8)padding; for (sl_uint8 i = 0; i < n; i++) { c[i] = n; } } sl_uint32 BlockCipherPadding_PKCS7::removePadding(const void* buf, sl_uint32 blockSize) { sl_uint8* c = (sl_uint8*)buf; sl_uint8 n = c[blockSize - 1]; for (sl_uint32 i = blockSize - n; i < blockSize - 1; i++) { if (c[i] != n) { return 0; } } if (blockSize < n) { return 0; } return n; } }
PrachieNaik/DSA
Array/TwoPointers/TwoPointers.cpp
<reponame>PrachieNaik/DSA<filename>Array/TwoPointers/TwoPointers.cpp /* Given a sorted array A (sorted in ascending order), having N integers(can have negative numbers too), find if there exists any pair of elements (A[i], A[j]) such that their sum is equal to X. Method: Two pointers is really an easy and effective technique which is typically used for searching pairs in a sorted array. How does this work? The algorithm basically uses the fact that the input array is sorted. We start the sum of extreme values (smallest and largest) and conditionally move both pointers. We move left pointer i when the sum of A[i] and A[j] is less than X. We do not miss any pair because the sum is already smaller than X. Same logic applies for right pointer j. Time Complexity: O(n), Space Complexity: O(1) */ #include <iostream> using namespace std; // Two pointer technique based solution to find // if there is a pair in A[0..N-1] with a given sum. int isPairSum(int A[], int N, int X) { // represents first pointer int i = 0; // represents second pointer int j = N - 1; while (i < j) { // If we find a pair if (A[i] + A[j] == X) return 1; // If sum of elements at current // pointers is less, we move towards // higher values by doing i++ else if (A[i] + A[j] < X) i++; // If sum of elements at current // pointers is more, we move towards // lower values by doing j-- else j--; } return 0; } // Driver code int main() { // array declaration int arr[] = { 3, 5, 9, 2, 8, 10, 11 }; // value to search int val = 17; // size of the array int arrSize = *(&arr + 1) - arr; // Function call cout << (bool)isPairSum(arr, arrSize, val); return 0; } /* Some examples that can be solved by using two pointers technique: Find the closest pair from two sorted arrays Find the pair in array whose sum is closest to x Find all triplets with zero sum Find a triplet that sum to a given value Find a triplet such that sum of two equals to third element Find four elements that sum to a given value Pythagorean Triplet */