repo_name stringlengths 6 101 | path stringlengths 4 300 | text stringlengths 7 1.31M |
|---|---|---|
koiva/wellington | src/main/java/org/github/rwynn/wellington/transfer/UserDTO.java | package org.github.rwynn.wellington.transfer;
import com.fasterxml.jackson.annotation.JsonInclude;
import org.github.rwynn.wellington.validation.NewUser;
import org.github.rwynn.wellington.validation.StrongPassword;
import org.github.rwynn.wellington.validation.UpdateLock;
import org.github.rwynn.wellington.validation.UpdateRoles;
import org.hibernate.validator.constraints.NotEmpty;
import javax.validation.constraints.NotNull;
import java.io.Serializable;
import java.util.HashSet;
import java.util.Set;
@JsonInclude(JsonInclude.Include.NON_EMPTY)
public class UserDTO implements Serializable {
@NotEmpty(message = "{org.github.rwynn.wellington.transfer.UserDTO.username.NotEmpty.message}", groups = { NewUser.class, UpdateLock.class, UpdateRoles.class })
private String username;
@NotEmpty(message = "{org.github.rwynn.wellington.transfer.UserDTO.password.NotEmpty.message}", groups = NewUser.class)
@StrongPassword(groups = NewUser.class)
private String password;
@NotNull(message = "{org.github.rwynn.wellington.transfer.UserDTO.locked.NotNull.message}", groups = UpdateLock.class)
private Boolean locked;
@NotEmpty(message = "{org.github.rwynn.wellington.transfer.UserDTO.authorities.NotEmpty.message}", groups = UpdateRoles.class)
private Set<String> authorities = new HashSet<String>();
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public Set<String> getAuthorities() {
return authorities;
}
public void setAuthorities(Set<String> authorities) {
this.authorities = authorities;
}
public Boolean isLocked() {
return locked;
}
public void setLocked(Boolean locked) {
this.locked = locked;
}
@Override
public String toString() {
return "UserDTO{" +
"username='" + username + '\'' +
", password='" + password + '\'' +
", locked=" + locked +
", authorities=" + authorities +
'}';
}
}
|
BillyOsvaldo/e-presensi_backend | src/hooks/permissions.js | const { authenticate } = require('@feathersjs/authentication').hooks
const permissions = {}
permissions.apiOrJWT = async (context) => {
try {
await authenticate('apiKey')(context)
} catch(e) {
await authenticate('jwt')(context)
}
}
module.exports = permissions
|
moiify/llofo | src/user/include/da14580.h | #ifndef DA14580_H_
#define DA14580_H_
#include "types.h"
#include "protocol.h"
#define TBEACON_INFO_LEN 22
#define TBEACON_MAC_LEN 6
#define HOST_MAC_LEN 6
#define TBEACON_ID_LEN 12
#define TBEACON_VER_LEN 2
typedef enum
{
NOTIFY_PKE,
NOTIFY_DEFEND_ON,
NOTIFY_DEFEND_OFF,
NOTIFY_FINDME,
NOTIFY_START,
NOTIFY_CONNECTED,
NOTIFY_DISCONNECTED,
NOTIFY_STACKRESTART = 10,
} BLE_EVENT_NOTIFY;
typedef enum
{
BLE_ADDRESS = 0x00,//蓝牙通信地址
BMS_ADDRESS = 0x01,//BMS通信地址
BIO_ADDRESS = 0x02,//BIO通信地址
CHECK_ADDRESS = 0x03,//检测通信地址
STC_ADDRESS = 0x05,//STC锁通信地址
ETC_ADDRESS = 0X10,//小安ETC协议通信地址
XH_BMS_ADDRESS = 0x16,//星恒BMS通信地址
RFID_ADDRESS = 0xFF,//RFID通信协议
} ADDRESS_485;
enum
{
BLE_PORT0 = 0X00,
BLE_PORT1 = 0X01,
BLE_PORT2 = 0X02,
};
enum
{
BLE_PIN0 = 0X00,
BLE_PIN1 = 0X01,
BLE_PIN2 = 0X02,
BLE_PIN3 = 0X03,
BLE_PIN4 = 0X04,
BLE_PIN5 = 0X05,
BLE_PIN6 = 0X06,
BLE_PIN7 = 0X07,
BLE_PIN8 = 0X08,
BLE_PIN9 = 0X09,
};
//检测命令
enum
{
CMD_CHK_ALL = 0,
CMD_CHK_GSM = 1,
CMD_CHK_BATTERY = 2,
CMD_CHK_ACC = 3,
CMD_CHK_SIMCARD = 4,
CMD_CHK_BRAKE_ON = 5,
CMD_CHK_BRAKE_OFF = 6,
CMD_CHK_BACKSEAT_UNLOCK = 7,
CMD_CHK_SET_DEFEND = 8,
CMD_CHK_END = 9,
CMD_GET_IMEI = 10,
CMD_GET_VERSION_AND_IMEI = 11,
CMD_CHK_BACKSEAT_LOCK = 12,
CMD_CHK_BACKWHEEL_UNLOCK = 13,
CMD_CHK_BACKWHEEL_LOCK = 14,
CMD_CHK_GPS = 15,
CMD_CHK_IS_WHEELSPAN = 16,
CMD_CHK_SADDLE_INPLACE = 20,//K
CMD_CHK_SADDLE_OUTPLACE = 21,//L
CMD_CHK_BACKWHEEL_INPLACE = 22,//M
CMD_CHK_BACKWHEEL_OUTPLACE = 23,//N
CMD_CHK_BMS_1_COM = 24,//O
CMD_CHK_ELEC = 26,
CMD_CHK_AUDIO_CUSTROM_RESULT= 29,//语音定制结果
CMD_CHK_OVER_SPEED = 53,//超速报警信号错误
CMD_CHK_ALL_NEW = 66,
CMD_END
};
typedef enum
{
PIN_LEVEL_LOW = 0,
PIN_LEVEL_HIGH = 1,
} BLE_PIN_LEVEL;
//道钉状态
typedef enum{
EVENT_NOTBEACON = 0X00,
EVENT_ACCONTBEACON = 0X01,
EVENT_DEFENDONTBEACON = 0X02,
EVENT_FINDTBEACON = 0X03,
}TBEACON_EVENT_CODE;
#pragma pack(push, 1)
typedef struct
{
u8 signature;
u8 address;
u8 cmd;
u8 length;
u8 data[];
} __attribute__((__packed__)) MSG_BMS_XINGHENG;
#define MSG_HEADER_XINGHENG_LEN sizeof(MSG_BMS_XINGHENG)
typedef struct
{
uint8_t sw;
uint8_t name[15];
uint8_t imei[15];
uint8_t token[4];
} __attribute__((__packed__)) ST_MSG_SETTING_PARAM;
typedef struct
{
u8 cmd;
u8 len;
u8 mode;
u8 gsm;
u8 sw;
u32 volatge_mV;
u8 checksum;
} __attribute__((__packed__))DA14580_STATUS_MSG;
typedef struct
{
u8 cmd;
u8 len;
u8 data;
u8 checksum;
} __attribute__((__packed__))DA14580_SHUTDOWN_MSG;
typedef struct
{
u8 cmd;
u8 length;
u8 data[];
} __attribute__((__packed__)) MSG_HEADER_DATA;
#define MSG_HEADER_485_LEN sizeof(MSG_HEADER_DATA)
typedef struct
{
short signature;
uint8_t address;
uint8_t cmd;
uint8_t length;
uint8_t data[];
} __attribute__((__packed__)) MSG_HEADER_XA;
#define MSG_HEADER_XA_LEN sizeof(MSG_HEADER_XA)
typedef struct
{
u8 result; //0表示未扫到符合条件的道钉,1表示扫到
u8 tBeaconRssi;
u8 thold;
u8 tBeaconMacAddr[TBEACON_MAC_LEN];
u8 tBeaconInfo[TBEACON_INFO_LEN];
}__attribute__((__packed__))BLE_SCAN_TBEACON_RESULT;
typedef struct
{
u8 tBeaconSignalStr[2];
u8 tBeaconIDStr[TBEACON_ID_LEN]; //站点编号 TBEACON_ID_LEN = 12
u8 tBeaconNumStr; //站点数量
u8 tBeaconPileNumStr; //桩位编号
u8 tBeaconSignalThresholdStr[2]; //信号限制
u8 tBeaconSOCStr[2]; //信号限制
u8 tBeaconVersionStr[2]; //版本 TBEACON_VER_LEN = 2
}__attribute__((__packed__))TBEACON_INFO_STR;
typedef struct
{
u8 event;
u8 tBeaconAddr[6];
u8 tBeaconId[12];
u8 tBeaconSOC;
u16 tBeaconVsn;
s32 lat;
s32 lon;
u32 timestamp;
}__attribute__((__packed__))DA14580_TBEACON_INFO_MSG;
typedef struct
{
uint16_t data;
}__attribute__((__packed__)) DATA_16_BITS;
typedef struct
{
int16_t data;
}__attribute__((__packed__)) DATA_S16_BITS;
typedef struct
{
uint32_t data;
}__attribute__((__packed__)) DATA_U32_BITS;
typedef struct
{
u16 signature; //帧头0XFF02
u8 cmd; //命令字
u8 len; //数据长度
u8 data[]; //数据N个字节 + 两个字节的累加校验和
} __attribute__((__packed__))DA14580_RFID485_MSG;
#define DA14580_RFID485_MSG_LEN sizeof(DA14580_RFID485_MSG)
#pragma pack(pop)
typedef int (*DA14580_PROC)(void *);
typedef struct
{
uint8_t cmd;
DA14580_PROC action;
} PROC_MAP;
int da14580_initial(void);
int da14580_sendUartData(uint8_t *msg, uint32_t dataLen);
int da14580_sendDATA(uint8_t address, uint8_t cmd, uint8_t *data, uint8_t dataLen);
void *da14580_allocMsg(uint8_t address, uint8_t cmd, uint8_t *data, uint8_t dataLen);
int da14580_setHeartbeat(u8 set_heartbeat);
int da14580_setBlePinLevel(u8 port, u8 pin, BLE_PIN_LEVEL level);
//int da14580_sendBMSDATA(uint8_t cmd, uint8_t *data, uint8_t dataLen);
//int da14580_setBlePinLevel(uint8_t port , uint8_t pin, BLE_PIN_LEVEL level);
#endif /* DA14580_H_ */
|
mtomono/TERSE3 | src/shapeCollection/CubeInt.java | <reponame>mtomono/TERSE3
/*
Copyright 2017, 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
*/
package shapeCollection;
import collection.TList;
import math.C;
/**
*
* @author masao
*/
public class CubeInt {
public TList<Integer> factor;
public CubeInt(int... factor) {
this(TList.sofi(factor));
}
public CubeInt(TList<Integer> factor) {
this.factor=factor;
}
public TList<Integer> mesh(TList<Integer> point) {
return point.pair(factor,(a,b)->a/b).sfix();
}
public TList<Integer> center(TList<Integer> mesh) {
return mesh.pair(factor,(a,b)->a*b+b/2);
}
public Integer volume() {
return factor.toC(i->i, C.i).pai().body();
}
}
|
MomoPewpew/Quark | src/main/java/vazkii/quark/world/block/BlockBiotite.java | <reponame>MomoPewpew/Quark<gh_stars>0
/**
* This class was created by <Vazkii>. It's distributed as
* part of the Quark Mod. Get the Source Code in github:
* https://github.com/Vazkii/Quark
*
* Quark is Open Source and distributed under the
* CC-BY-NC-SA 3.0 License: https://creativecommons.org/licenses/by-nc-sa/3.0/deed.en_GB
*
* File Created @ [18/04/2016, 17:45:33 (GMT)]
*/
package vazkii.quark.world.block;
import net.minecraft.block.Block;
import net.minecraft.block.BlockQuartz;
import net.minecraft.block.SoundType;
import net.minecraft.block.properties.IProperty;
import net.minecraft.block.state.IBlockState;
import net.minecraft.client.renderer.ItemMeshDefinition;
import net.minecraft.client.renderer.block.model.ModelResourceLocation;
import net.minecraft.client.renderer.block.statemap.IStateMapper;
import net.minecraft.client.renderer.block.statemap.StateMapperBase;
import net.minecraft.item.EnumRarity;
import net.minecraft.item.ItemStack;
import net.minecraft.util.ResourceLocation;
import net.minecraftforge.fml.relauncher.Side;
import net.minecraftforge.fml.relauncher.SideOnly;
import vazkii.quark.base.block.IQuarkBlock;
import vazkii.quark.base.lib.LibMisc;
import javax.annotation.Nonnull;
public class BlockBiotite extends BlockQuartz implements IQuarkBlock {
private final String[] variants;
private final String bareName;
public BlockBiotite() {
setSoundType(SoundType.STONE);
setHardness(0.8F);
String name = "biotite_block";
variants = new String[] { name, "chiseled_biotite_block", "pillar_biotite_block" };
bareName = name;
setTranslationKey(name);
}
@Nonnull
@Override
public Block setTranslationKey(@Nonnull String name) {
super.setTranslationKey(name);
register(name);
return this;
}
@Override
public String getBareName() {
return bareName;
}
@Override
public String[] getVariants() {
return variants;
}
@Override
@SideOnly(Side.CLIENT)
public ItemMeshDefinition getCustomMeshDefinition() {
return null;
}
@Override
public EnumRarity getBlockRarity(ItemStack stack) {
return EnumRarity.COMMON;
}
@Override
public IProperty[] getIgnoredProperties() {
return new IProperty[0];
}
@Override
public IProperty getVariantProp() {
return null;
}
@Override
public Class getVariantEnum() {
return null;
}
@Override
@SideOnly(Side.CLIENT)
public IStateMapper getStateMapper() {
return new StateMapperBase() {
@Nonnull
@Override
public ModelResourceLocation getModelResourceLocation(@Nonnull IBlockState state) {
BlockQuartz.EnumType type = state.getValue(BlockQuartz.VARIANT);
ResourceLocation baseLocation = new ResourceLocation(LibMisc.MOD_ID.toLowerCase(), "biotite_block");
switch (type) {
case CHISELED: return new ModelResourceLocation(baseLocation, "chiseled");
case LINES_Y: return new ModelResourceLocation(baseLocation, "axis=y");
case LINES_X: return new ModelResourceLocation(baseLocation, "axis=x");
case LINES_Z: return new ModelResourceLocation(baseLocation, "axis=z");
default: return new ModelResourceLocation(baseLocation, "normal");
}
}
};
}
}
|
e10101/incubator-superset | superset/assets/src/dashboard/util/charts/getFormDataWithExtraFilters.js | import getEffectiveExtraFilters from './getEffectiveExtraFilters';
// We cache formData objects so that our connected container components don't always trigger
// render cascades. we cannot leverage the reselect library because our cache size is >1
const cachedDashboardMetadataByChart = {};
const cachedFiltersByChart = {};
const cachedFormdataByChart = {};
export default function getFormDataWithExtraFilters({
chart = {},
dashboardMetadata,
filters,
sliceId,
}) {
// if dashboard metadata + filters have not changed, use cache if possible
if (
(cachedDashboardMetadataByChart[sliceId] || {}) === dashboardMetadata &&
(cachedFiltersByChart[sliceId] || {}) === filters &&
!!cachedFormdataByChart[sliceId]
) {
return cachedFormdataByChart[sliceId];
}
const formData = {
...chart.formData,
extra_filters: getEffectiveExtraFilters({
dashboardMetadata,
filters,
sliceId,
}),
};
cachedDashboardMetadataByChart[sliceId] = dashboardMetadata;
cachedFiltersByChart[sliceId] = filters;
cachedFormdataByChart[sliceId] = formData;
return formData;
}
|
loganbyers/frictionless-py | setup.py | import os
import io
from setuptools import setup, find_packages
# Helpers
def read(*paths):
"""Read a text file."""
basedir = os.path.dirname(__file__)
fullpath = os.path.join(basedir, *paths)
contents = io.open(fullpath, encoding="utf-8").read().strip()
return contents
# Prepare
PACKAGE = "frictionless"
NAME = PACKAGE.replace("_", "-")
TESTS_REQUIRE = [
"mypy",
"moto",
"black",
"jinja2",
"pylama",
"pytest",
"ipython",
"pymysql",
"psycopg2",
"notedown",
"ipykernel",
"nbconvert",
"pytest-cov",
"pytest-vcr",
"oauth2client",
"python-dotenv",
"docstring-parser",
]
EXTRAS_REQUIRE = {
"bigquery": ["google-api-python-client>=1.12.1"],
"ckan": ["ckanapi>=4.3"],
"dataflows": ["dataflows>=0.1"],
"excel": ["openpyxl>=3.0", "xlrd>=1.2", "xlwt>=1.2"],
"gsheets": ["pygsheets>=2.0"],
"html": ["pyquery>=1.4"],
"json": ["ijson>=3.0", "jsonlines>=1.2"],
"ods": ["ezodf>=0.3"],
"pandas": ["pandas>=1.0"],
"s3": ["boto3>=1.9"],
"server": ["gunicorn>=20.0", "flask>=1.1"],
"spss": ["savReaderWriter>=3.0"],
"sql": ["sqlalchemy>=1.3"],
"dev": TESTS_REQUIRE,
}
INSTALL_REQUIRES = [
"petl>=1.6",
"pyyaml>=5.3",
"isodate>=0.6",
"chardet>=3.0",
"requests>=2.10",
"jsonschema>=2.5",
"simpleeval>=0.9",
"stringcase>=1.2",
"typer[all]>=0.3",
"validators>=0.18",
"python-slugify>=1.2",
"python-dateutil>=2.8",
]
README = read("README.md")
VERSION = read(PACKAGE, "assets", "VERSION")
PACKAGES = find_packages(exclude=["tests"])
ENTRY_POINTS = {"console_scripts": ["frictionless = frictionless.__main__:program"]}
# Run
setup(
name=NAME,
version=VERSION,
packages=PACKAGES,
include_package_data=True,
install_requires=INSTALL_REQUIRES,
tests_require=TESTS_REQUIRE,
extras_require=EXTRAS_REQUIRE,
entry_points=ENTRY_POINTS,
zip_safe=False,
long_description=README,
long_description_content_type="text/markdown",
description="Frictionless is a framework to describe, extract, validate, and transform tabular data",
author="Open Knowledge Foundation",
author_email="<EMAIL>",
url="https://github.com/frictionlessdata/frictionless-py",
license="MIT",
keywords=[
"data validation",
"frictionless data",
"open data",
"json schema",
"json table schema",
"data package",
"tabular data package",
],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
liftchampion/nativejson-benchmark | thirdparty/ccan/ccan/asearch/test/run-strings.c | #include <ccan/asearch/asearch.h>
#include <ccan/array_size/array_size.h>
#include <ccan/tap/tap.h>
#include <stdlib.h>
#include <ccan/asearch/asearch.c>
static int cmp(const int *key, const char *const *elem, void *ctx)
{
return *key - atoi(*elem);
}
int main(void)
{
const char *args[] = { "1", "4", "7", "9" };
int key = 7;
const char **p;
plan_tests(1);
p = asearch(&key, args, ARRAY_SIZE(args), cmp, NULL);
ok1(p == &args[2]);
return exit_status();
}
|
jacksonicson/paper.IS2015 | control/Control/src/balancer/placement_bestfit_demand.py | <reponame>jacksonicson/paper.IS2015
from logs import sonarlog
import conf_domainsize
import conf_nodes
import placement_bestfit
import numpy as np
# Setup Sonar logging
logger = sonarlog.getLogger('placement')
class BestFitDemand(placement_bestfit.BestFit):
def sort(self, host_choice, _key):
return sorted(host_choice, key = _key)
def test_nodes(self, new_domain, node_list):
host_choice = []
for node in node_list:
# Get actual CPU measurements
curr_cpu_demand = np.percentile(node.get_readings(), 95)
# Memory demand is calculated by summing up all VM reservations
mem_load = 0
# Calculate the node utilization by accumulating all domain loads
for dom in node.domains.values():
spec = dom.domain_configuration.get_domain_spec()
mem_load += spec.total_memory()
# Calculate metric
spec = conf_domainsize.get_domain_spec(new_domain.size)
mem_delta = conf_nodes.NODE_MEM - (mem_load + spec.total_memory())
# Calculate estiated CPU demand if VM is almost
vm_cpu_demand = conf_nodes.to_node_load(95, new_domain.size)
cpu_delta = conf_nodes.UTIL - curr_cpu_demand - vm_cpu_demand
# Calculate fit metric
metric = cpu_delta * mem_delta
# Server is not able to handle the domain
if cpu_delta < 0 or mem_delta < 0:
continue
# Add metric to the choice list
host_choice.append((node.name, metric))
# Check if we found at least one host
if not host_choice:
return None
# Sort host choice list
host_choice = self.sort(host_choice, lambda x: x[1])
# Pkc hte one with the lowest metric (best fit)
return host_choice[0][0]
|
personal-social-media/rmega | lib/rmega/cli.rb | <gh_stars>100-1000
require 'optparse'
require 'io/console'
require 'yaml'
module Rmega
module CLI
module Helpers
def cli_options
$cli_options ||= read_configuration_file
end
def cli_prompt_password
print("Enter password: ")
password = STDIN.noecho(&:gets)
password = password[0..-2] if password.end_with?("\n")
puts
return password
end
def mega_url?(url)
Nodes::Factory.url?(url)
end
def configuration_filepath
File.expand_path('~/.rmega')
end
def read_configuration_file
return {} unless File.exists?(configuration_filepath)
opts = YAML.load_file(configuration_filepath)
opts.keys.each { |k| opts[k.to_sym] = opts.delete(k) } # symbolize_keys!
return opts
rescue Exception => ex
raise(ex)
end
def apply_cli_options
cli_options.each do |key, value|
Rmega.options.__send__("#{key}=", value)
end
Rmega.logger.level = ::Logger::DEBUG if cli_options[:debug]
Rmega.options.show_progress = true
if Thread.respond_to?(:report_on_exception) and !cli_options[:debug]
Thread.report_on_exception = false
end
end
def apply_opt_parser_options(opts)
opts.on("-t NUM", "--thread_pool_size", "Number of threads to use [1-8], default and recommended is #{Rmega.options.thread_pool_size}") { |num|
num = num.to_i
if num <= 0
num = 1
elsif num > 8
num = 8
end
cli_options[:thread_pool_size] = num
}
opts.on("--proxy-addr ADDRESS", "Http proxy address") { |value|
cli_options[:http_proxy_address] = value
}
opts.on("--proxy-port PORT", "Http proxy port") { |value|
cli_options[:http_proxy_port] = value.to_i
}
opts.on("-u", "--user USER_EMAIL", "User email address") { |value|
cli_options[:user] = value
}
opts.on("--pass [USER_PASSWORD]", "User password (if omitted will prompt for it)") { |value|
cli_options[:pass] = value
}
opts.on("--debug", "Debug mode") {
cli_options[:debug] = true
}
opts.on("-v", "--version", "Print the version number") {
puts Rmega::VERSION
puts Rmega::HOMEPAGE
exit(0)
}
end
def traverse_storage(node, path, opts = {})
path.gsub!(/^\/|\/$/, "")
curr_part = path.split("/")[0] || ""
last_part = (path.split("/")[1..-1] || []).join("/")
if curr_part.empty?
if node.type == :root or node.type == :folder
return node
else
return nil
end
else
n = node.folders.find { |n| n.name.casecmp(curr_part).zero? }
n ||= node.files.find { |n| n.name.casecmp(curr_part).zero? } unless opts[:only_folders]
if last_part.empty?
return n
else
return traverse_storage(n, last_part)
end
end
end
def cli_rescue
apply_cli_options
yield
rescue Interrupt
puts "\nInterrupted"
rescue Exception => ex
if cli_options[:debug]
raise(ex)
else
$stderr.puts "\nERROR: #{ex.message}"
end
end
end
end
end
|
kkptm/CppLikeCSharp | System.Drawing/include/Switch/System/Drawing/Image.hpp | <reponame>kkptm/CppLikeCSharp
/// @file
/// @brief Contains Switch::System::Drawing::Image class.
#pragma once
#include <Switch/System/Object.hpp>
#include <Switch/System/String.hpp>
#include <Switch/RefPtr.hpp>
#include <Switch/System/Array.hpp>
#include <Switch/System/IComparable.hpp>
#include <Switch/System/IO/Stream.hpp>
#include "../../SystemDrawingExport.hpp"
#include "Imaging/ColorPalette.hpp"
#include "Imaging/ImageFlags.hpp"
#include "Imaging/ImageFormat.hpp"
#include "Imaging/PixelFormat.hpp"
#include "Size.hpp"
#include "SizeF.hpp"
/// @brief The Switch namespace contains all fundamental classes to access Hardware, Os, System, and more.
namespace Switch {
/// @cond
/// TODO : To remove as soon as possible...
namespace Resources {
class Image;
}
/// @endcond
/// @brief The System namespace contains fundamental classes and base classes that define commonly-used value and reference data types, events and event handlers, interfaces, attributes, and processing exceptions.
namespace System {
/// @brief The System::Drawing namespace provides access to GDI+ basic graphics functionality. More advanced functionality is provided in the System::Drawing::Drawing2D, System::Drawing::Imaging, and System::Drawing::Text namespaces.
namespace Drawing {
/// @cond
class Bmp;
class Jpg;
class Png;
class Gif;
/// @endcond
/// @brief An abstract base class that provides functionality for the System::Drawing::Bitmap and System::Drawing::Imaging::Metafile descended classes.
/// @remarks To draw an System::Drawing::Image on a Windows Form, you should use one of the Graphics::DrawImage(Image, Point) methods.
/// @par Library
/// Switch.System.Drawing
class system_drawing_export_ Image : public object, public IComparable {
public:
/// @cond
Image();
Image(const Image& image);
/// @endcond
property_<const byte*, readonly_> Data {
get_ {return this->rawData.Data();}
};
static property_<Image, readonly_> None;
/// @brief Gets attribute flags for the pixel data of this System::Drawing::Image.
/// @return a value of System::Drawing::Imaging::ImageFlags.
/// @remarks The integer value returned from this method will correspond to a sum of System::Drawing::Imaging::ImageFlags, as described in the following table.
/// | ImageFlag value | Integer representation |
/// |------------------------------|------------------------|
/// | ImageFlagsNone | 0 |
/// | IImageFlagsScalable | 1 |
/// | IImageFlagsHasAlpha | 2 |
/// | IImageFlagsHasTranslucent | 4 |
/// | IImageFlagsPartiallyScalable | 8 |
/// | IImageFlagsColorSpaceRGB | 16 |
/// | IImageFlagsColorSpaceCMYK | 32 |
/// | IImageFlagsColorSpaceGRAY | 64 |
/// | IImageFlagsColorSpaceYCBCR | 128 |
/// | IImageFlagsColorSpaceYCCK | 256 |
/// | IImageFlagsHasRealDPI | 4096 |
/// | IImageFlagsHasRealPixelSize | 8192 |
/// | IImageFlagsReadOnly | 65536 |
/// | IImageFlagsCaching | 131072 |
/// @remarks For example, if the Image::Flags property for an image returned 77960, the System::Drawing::Imaging::ImageFlags for the image would be System::Drawing::Imaging::ImageFlags::ReadOnly, System::Drawing::Imaging::ImageFlags::HasRealDpi, System::Drawing::Imaging::ImageFlags::HasRealPixelSize, System::Drawing::Imaging::ImageFlags::ColorSpaceYcbcr, and System::Drawing::Imaging::ImageFlags::PartiallyScalable.
property_<int32, readonly_> Flags {
get_ {return (int32)this->flags;}
};
property_<Array<System::Guid>, readonly_> FrameDimensionsList {
get_ {return frameDimensionList;}
};
property_<int32, readonly_> Height {
get_ {return this->size.Height();}
};
property_<float, readonly_> HorizontalResolution {
get_ {return this->horizontalResolution;}
};
property_<Imaging::ColorPalette> Palette {
get_ {return this->palette;},
set_ {this->palette = value;}
};
property_<SizeF, readonly_> PhysicalDimension {
get_ {return SizeF(Convert::ToSingle(this->size.Width()) * this->horizontalResolution, Convert::ToSingle(this->size.Height()) * this->verticalResolution);}
};
property_<Imaging::PixelFormat, readonly_> PixelFormat {
get_ {return this->pixelFormat;}
};
property_<Imaging::ImageFormat, readonly_> RawFormat {
get_ {return this->rawFormat;}
};
property_<System::Drawing::Size, readonly_> Size {
get_ {return this->size;}
};
property_<const object&> Tag {
get_->const object& {return *this->tag;},
set_ {this->tag = &value;}
};
property_<float, readonly_> VerticalResolution {
get_ {return this->verticalResolution;}
};
property_<int32, readonly_> Width {
get_ {return this->size.Width();}
};
static refptr<Image> FromFile(const string& fileName);
template<typename TStream>
static refptr<Image> FromStream(const TStream& stream) {return new Image(stream.template MemberwiseClone<TStream>().template As<TStream>());}
static refptr<Image> FromStream(refptr<System::IO::Stream> stream) {return new Image(stream);}
static refptr<Image> FromData(const char* data[]);
virtual int32 CompareTo(const IComparable& obj) const {
if (!is<Image>(obj))
return 1;
return CompareTo(as<Image>(obj));
}
virtual int32 CompareTo(const Image& value) const {return IntPtr((intptr)&rawData).CompareTo(IntPtr((intptr)&rawData));}
virtual int32 GetHashCode() const {return IntPtr((intptr)&rawData).GetHashCode();}
protected:
/// @cond
friend Bmp;
friend Jpg;
friend Png;
friend Gif;
friend Resources::Image;
Image(const string& fileName);
Image(refptr<System::IO::Stream> stream);
void ReadStream(refptr<System::IO::Stream> stream);
void ReadWindowsBmp(refptr<System::IO::Stream> stream);
Imaging::ImageFlags flags;
Array<Guid> frameDimensionList;
float horizontalResolution = 1.0f;
Imaging::PixelFormat pixelFormat;
Imaging::ColorPalette palette;
Array<byte> rawData;
Imaging::ImageFormat rawFormat;
System::Drawing::Size size;
const object* tag = null;
float verticalResolution = 1.0f;
/// @endcond
};
}
}
}
|
amichard/tfrs | backend/api/urls.py | """
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline
compliance reporting for transportation fuel suppliers in accordance with
the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.conf.urls import url, include
from rest_framework.documentation import include_docs_urls
from rest_framework.routers import DefaultRouter
from api.viewsets.Autocomplete import AutocompleteViewSet
from api.viewsets.Autosave import AutosaveViewSet
from api.viewsets.ComplianceReport import ComplianceReportViewSet
from api.viewsets.Document import DocumentViewSet
from api.viewsets.DocumentComments import DocumentCommentsViewSet
from api.viewsets.FuelCode import FuelCodeViewSet
from api.viewsets.Notification import NotificationViewSet
from tfrs.settings import DOCUMENTS_API, FUEL_CODES_API, \
CREDIT_CALCULATION_API, TESTING, COMPLIANCE_REPORTING_API, \
EXCLUSION_REPORTS_API
from .viewsets.CompliancePeriod import CompliancePeriodViewSet
from .viewsets.CreditTrade import CreditTradeViewSet
from .viewsets.CreditTradeHistory import CreditTradeHistoryViewSet
from .viewsets.Organization import OrganizationViewSet
from .viewsets.Role import RoleViewSet
from .viewsets.SigningAuthorityAssertion \
import SigningAuthorityAssertionViewSet
from .viewsets.SigningAuthorityConfirmation \
import SigningAuthorityConfirmationViewSet
from .viewsets.User import UserViewSet
from .viewsets.CreditTradeComments import CreditTradeCommentsViewSet
from .viewsets.CarbonIntensityLimit import CarbonIntensityLimitViewSet
from .viewsets.CreditCalculation import CreditCalculationViewSet
from .viewsets.DefaultCarbonIntensity import DefaultCarbonIntensityViewSet
from .viewsets.EnergyDensity import EnergyDensityViewSet
from .viewsets.EnergyEffectivenessRatio import EnergyEffectivenessRatioViewSet
from .viewsets.PertroleumCarbonIntensity import PetroleumCarbonIntensityViewSet
from .viewsets.ExpectedUse import ExpectedUseViewSet
from .viewsets.FuelClass import FuelClassViewSet
from .viewsets.NotionalTransferType import NotionalTransferTypeViewSet
from .viewsets.TransactionType import TransactionTypeViewSet
# Create a router and register our views with it.
ROUTER = DefaultRouter(trailing_slash=False)
ROUTER.register(r'compliance_periods', CompliancePeriodViewSet)
ROUTER.register(r'credit_trades', CreditTradeViewSet)
ROUTER.register(r'credit_trades_history', CreditTradeHistoryViewSet)
ROUTER.register(r'comments', CreditTradeCommentsViewSet)
ROUTER.register(r'organizations', OrganizationViewSet)
ROUTER.register(r'roles', RoleViewSet)
ROUTER.register(r'signing_authority_assertions',
SigningAuthorityAssertionViewSet)
ROUTER.register(r'signing_authority_confirmations',
SigningAuthorityConfirmationViewSet)
ROUTER.register(r'users', UserViewSet)
ROUTER.register(r'notifications',
NotificationViewSet,
base_name='notification')
ROUTER.register(r'autocomplete',
AutocompleteViewSet,
base_name='autocomplete')
ROUTER.register(r'autosave',
AutosaveViewSet,
base_name='autosave')
if DOCUMENTS_API['ENABLED'] or TESTING:
ROUTER.register(r'documents', DocumentViewSet)
ROUTER.register(r'documents_comments', DocumentCommentsViewSet)
if FUEL_CODES_API['ENABLED'] or TESTING:
ROUTER.register(r'fuel_codes', FuelCodeViewSet)
if CREDIT_CALCULATION_API['ENABLED'] or TESTING:
ROUTER.register(
r'credit_calculation/carbon_intensity_limits',
CarbonIntensityLimitViewSet
)
ROUTER.register(
r'credit_calculation/default_carbon_intensities',
DefaultCarbonIntensityViewSet
)
ROUTER.register(
r'credit_calculation/energy_densities',
EnergyDensityViewSet
)
ROUTER.register(
r'credit_calculation/energy_effectiveness_ratios',
EnergyEffectivenessRatioViewSet
)
ROUTER.register(
r'credit_calculation/expected_uses',
ExpectedUseViewSet
)
ROUTER.register(
r'credit_calculation/fuel_classes',
FuelClassViewSet
)
ROUTER.register(
r'credit_calculation/notional_transfer_types',
NotionalTransferTypeViewSet
)
ROUTER.register(
r'credit_calculation/petroleum_carbon_intensities',
PetroleumCarbonIntensityViewSet
)
ROUTER.register(
r'credit_calculation/fuel_types',
CreditCalculationViewSet
)
if COMPLIANCE_REPORTING_API['ENABLED'] or TESTING:
ROUTER.register(r'compliance_reports', ComplianceReportViewSet)
if EXCLUSION_REPORTS_API['ENABLED'] or TESTING:
ROUTER.register(
r'exclusion_reports/transaction_types',
TransactionTypeViewSet
)
urlpatterns = [
# Swagger documentation
url(r'^doc/', include_docs_urls(title='TFRS API Documentation')),
url(r'^', include(ROUTER.urls))
]
urlpatterns += ROUTER.urls
|
CorundumGames/AshPlusPlus | src/ash/fsm/EngineStateMachine.cpp | #include "ash/fsm/EngineStateMachine.hpp"
ash::fsm::EngineStateMachine::EngineStateMachine(const Engine& engine)
{
//ctor
}
ash::fsm::EngineStateMachine::~EngineStateMachine()
{
//dtor
}
|
benoitongit/android-open-street-map | OpenStreetMapLibrary/src/com/android/lib/map/osm/GeoPoint.java | package com.android.lib.map.osm;
import java.io.Serializable;
public class GeoPoint implements Serializable {
private static final long serialVersionUID = -6241356443051839339L;
private int latitudeE6 = 0;
private int longitudeE6 = 0;
public int getLatitudeE6() {
return latitudeE6;
}
public void setLatitudeE6(int latitudeE6) {
this.latitudeE6 = (int)(normalizeLatitude((double)(latitudeE6 / 1E6)));
}
public int getLongitudeE6() {
return longitudeE6;
}
public double normalizeLatitude(double latitude) {
if (!Double.isNaN(latitude)) {
if (latitude < -90)
return (double)(-90* 1E6);
else if (latitude > 90)
return (double)( 90* 1E6);
}
return latitude * 1E6;
}
public double normalizeLongitude(double longitude) {
if (!Double.isNaN(longitude)) {
if (longitude < -180)
return ((double)((longitude - 180) % 360) + 180 )* 1E6;
else if (longitude > 180)
return ((double)((longitude + 180) % 360) - 180 )* 1E6;
}
return longitude * 1E6;
}
public void setLongitudeE6(int longitudeE6) {
this .longitudeE6 = (int)(normalizeLongitude((double)(longitudeE6 / 1E6)));
}
public GeoPoint() {
}
public GeoPoint(int latitudeE6, int longitudeE6) {
setLatitudeE6(latitudeE6);
setLongitudeE6(longitudeE6);
}
} |
oxanamenushina/job4j | chapter_011/src/main/java/ru/job4j/crudservlet/UserUpdateServlet.java | package ru.job4j.crudservlet;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.PrintWriter;
/**
* UserUpdateServlet.
*
* @author <NAME> (<EMAIL>).
* @version $Id$
* @since 0.1
*/
public class UserUpdateServlet extends HttpServlet {
/**
* The instance of ValidateService class.
*/
private final Validate logic = ValidateService.getInstance();
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
resp.setContentType("text/html");
User user = logic.findById(Integer.parseInt(req.getParameter("id")));
PrintWriter pw = resp.getWriter();
pw.println(this.getText(req.getContextPath(), user));
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
resp.setContentType("text/html");
String id = req.getParameter("id");
logic.update(new User(
id == null ? -1 : Integer.parseInt(id),
req.getParameter("name"),
req.getParameter("login"),
req.getParameter("email")
));
resp.sendRedirect(String.format("%s/users/userlist/UserList.jsp", req.getContextPath()));
}
private String getText(String path, User user) {
StringBuilder sb = new StringBuilder();
sb.append("<!DOCTYPE html>"
+ "<html lang=\"en\">"
+ "<head>"
+ " <meta charset=\"UTF-8\">"
+ " <title>Create user</title>"
+ "</head>"
+ "<body>"
+ "id = " + user.getId() + "<br><br>"
+ "<form action='" + path + "/edit' method='post'>"
+ " name: <input type='text' value='" + user.getName() + "' name='name'/><br><br>"
+ " login: <input type='text' value='" + user.getLogin() + "' name='login'/><br><br>"
+ " email: <input type='text' value='" + user.getEmail() + "' name='email'/><br><br>"
+ "<input type='hidden' name='id' value='" + user.getId() + "'/>"
+ "<input type='submit' value='update'/>"
+ "</form>"
+ "</body>"
+ "</html>");
return sb.toString();
}
} |
p9c/matrjoska | pkg/blockchain/example_test.go | package blockchain_test
import (
"fmt"
bits2 "github.com/p9c/matrjoska/pkg/bits"
"github.com/p9c/matrjoska/pkg/block"
"log"
"math/big"
"os"
"path/filepath"
"github.com/p9c/matrjoska/pkg/blockchain"
"github.com/p9c/matrjoska/pkg/chaincfg"
"github.com/p9c/matrjoska/pkg/database"
_ "github.com/p9c/matrjoska/pkg/database/ffldb"
)
// This example demonstrates how to create a new chain instance and use ProcessBlock to attempt to add a block to the
// chain. As the package overview documentation describes, this includes all of the Bitcoin consensus rules. This
// example intentionally attempts to insert a duplicate genesis block to illustrate how an invalid block is handled.
func ExampleBlockChain_ProcessBlock() {
// Create a new database to store the accepted blocks into. Typically this would be opening an existing database and
// would not be deleting and creating a new database like this, but it is done here so this is a complete working
// example and does not leave temporary files laying around.
dbPath := filepath.Join(os.TempDir(), "exampleprocessblock")
_ = os.RemoveAll(dbPath)
db, e := database.Create("ffldb", dbPath, chaincfg.MainNetParams.Net)
if e != nil {
log.Printf("Failed to create database: %v\n", e)
return
}
defer func() {
if e = os.RemoveAll(dbPath); E.Chk(e) {
}
}()
defer func() {
if e = db.Close(); E.Chk(e) {
}
}()
// Create a new BlockChain instance using the underlying database for the main bitcoin network. This example does
// not demonstrate some of the other available configuration options such as specifying a notification callback and
// signature cache. Also, the caller would ordinarily keep a reference to the median time source and add time values
// obtained from other peers on the network so the local time is adjusted to be in agreement with other peers.
chain, e := blockchain.New(
&blockchain.Config{
DB: db,
ChainParams: &chaincfg.MainNetParams,
TimeSource: blockchain.NewMedianTime(),
},
)
if e != nil {
log.Printf("Failed to create chain instance: %v\n", e)
return
}
// Process a block. For this example, we are going to intentionally cause an error by trying to process the genesis
// block which already exists.
genesisBlock := block.NewBlock(chaincfg.MainNetParams.GenesisBlock)
var isMainChain bool
var isOrphan bool
isMainChain, isOrphan, e = chain.ProcessBlock(
0, genesisBlock,
blockchain.BFNone, 0,
)
if e != nil {
log.Printf("Failed to process block: %v\n", e)
return
}
log.Printf("Block accepted. Is it on the main chain?: %v", isMainChain)
log.Printf("Block accepted. Is it an orphan?: %v", isOrphan)
// Output:
// Failed to process block: already have block 000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f
}
// This example demonstrates how to convert the compact "bits" in a block header which represent the target difficulty
// to a big integer and display it using the typical hex notation.
func ExampleCompactToBig() {
// Convert the bits from block 300000 in the main block chain.
bits := uint32(419465580)
targetDifficulty := bits2.CompactToBig(bits)
// Display it in hex.
fmt.Printf("%064x\n", targetDifficulty.Bytes())
// Output:
// 0000000000000000896c00000000000000000000000000000000000000000000
}
// This example demonstrates how to convert a target difficulty into the compact "bits" in a block header which
// represent that target difficulty .
func ExampleBigToCompact() {
// Convert the target difficulty from block 300000 in the main block
// chain to compact form.
t := "0000000000000000896c00000000000000000000000000000000000000000000"
targetDifficulty, success := new(big.Int).SetString(t, 16)
if !success {
fmt.Println("invalid target difficulty")
return
}
bits := bits2.BigToCompact(targetDifficulty)
fmt.Println(bits)
// Output:
// 419465580
}
|
ankitabantey/covidbubble | node_modules/@chakra-ui/styled-system/dist/esm/config/flexbox.js | import { createParser, system } from "@styled-system/core";
var config = {
alignItems: true,
alignContent: true,
justifyItems: true,
justifyContent: true,
flexWrap: true,
flexDirection: true,
flex: true,
flexGrow: true,
flexShrink: true,
flexBasis: {
property: "flexBasis",
scale: "sizes"
},
justifySelf: true,
alignSelf: true,
order: true,
flexDir: {
property: "flexDirection"
}
};
/**
* Types for flexbox related CSS properties
*/
export var flexbox = system(config);
export var flexboxParser = createParser(config);
//# sourceMappingURL=flexbox.js.map |
asugbdp9w7gf-p29q3823e872t3o29387fg298d/rip-atlas | src/main/java/best/reich/ingros/mixin/impl/MixinAbstractClientPlayer.java | package best.reich.ingros.mixin.impl;
import best.reich.ingros.IngrosWare;
import best.reich.ingros.events.render.CapeLocationEvent;
import com.mojang.authlib.GameProfile;
import net.minecraft.client.entity.AbstractClientPlayer;
import net.minecraft.util.ResourceLocation;
import net.minecraft.world.World;
import org.spongepowered.asm.mixin.Mixin;
import org.spongepowered.asm.mixin.injection.At;
import org.spongepowered.asm.mixin.injection.Inject;
import org.spongepowered.asm.mixin.injection.callback.CallbackInfoReturnable;
@Mixin(AbstractClientPlayer.class)
public class MixinAbstractClientPlayer extends AbstractClientPlayer {
public MixinAbstractClientPlayer(World worldIn, GameProfile playerProfile) {
super(worldIn, playerProfile);
}
@Inject(method = "getLocationCape",at = @At("HEAD"),cancellable = true)
public void getLocationCape(CallbackInfoReturnable<ResourceLocation> cir) {
CapeLocationEvent event = IngrosWare.INSTANCE.bus.fireEvent(new CapeLocationEvent(this));
}
}
|
ResonanceGroup/FEMM | femm/GeneralPrefs.h | #if !defined(AFX_GENERALPREFS_H__9C2D61CC_B458_496C_9A23_BD2CD5D22AC8__INCLUDED_)
#define AFX_GENERALPREFS_H__9C2D61CC_B458_496C_9A23_BD2CD5D22AC8__INCLUDED_
#if _MSC_VER > 1000
#pragma once
#endif // _MSC_VER > 1000
// GeneralPrefs.h : header file
//
/////////////////////////////////////////////////////////////////////////////
// CGeneralPrefs dialog
class CGeneralPrefs : public CDialog
{
// Construction
public:
CGeneralPrefs(CWnd* pParent = NULL); // standard constructor
int s_defdoc;
void WritePrefs();
void ScanPrefs();
// Dialog Data
//{{AFX_DATA(CGeneralPrefs)
enum { IDD = IDD_GENPREFS };
CComboBox m_defdoc;
BOOL m_def_lua_console;
BOOL m_def_xyplot;
BOOL m_def_show_output_window;
BOOL m_def_smartmesh;
//}}AFX_DATA
// Overrides
// ClassWizard generated virtual function overrides
//{{AFX_VIRTUAL(CGeneralPrefs)
public:
virtual BOOL PreTranslateMessage(MSG* pMsg);
protected:
virtual void DoDataExchange(CDataExchange* pDX); // DDX/DDV support
//}}AFX_VIRTUAL
// Implementation
protected:
// Generated message map functions
//{{AFX_MSG(CGeneralPrefs)
virtual BOOL OnInitDialog();
//}}AFX_MSG
DECLARE_MESSAGE_MAP()
public:
};
//{{AFX_INSERT_LOCATION}}
// Microsoft Visual C++ will insert additional declarations immediately before the previous line.
#endif // !defined(AFX_GENERALPREFS_H__9C2D61CC_B458_496C_9A23_BD2CD5D22AC8__INCLUDED_)
|
CBorum/go-saxo | trading/getallocationkeys_response.go | package trading
type GetAllocationKeysResponse struct {
Data []struct {
AllocationKeyID string `json:"AllocationKeyId"`
AllocationKeyName string `json:"AllocationKeyName"`
AssetType string `json:"AssetType"`
BuySell string `json:"BuySell"`
ExternalReference string `json:"ExternalReference"`
OwnerAccountKey string `json:"OwnerAccountKey"`
Participants float64 `json:"Participants"`
Status string `json:"Status"`
Uic float64 `json:"Uic"`
} `json:"Data"`
}
|
bowlofstew/orb | samples/async/src/main/java/com/ea/orbit/samples/async/Main.java | /*
Copyright (C) 2015 Electronic Arts Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.ea.orbit.samples.async;
import com.ea.orbit.actors.Actor;
import com.ea.orbit.actors.Stage;
import com.ea.orbit.async.Async;
import com.ea.orbit.async.Await;
import com.ea.orbit.concurrent.Task;
import java.io.IOException;
import static com.ea.orbit.async.Await.await;
public class Main
{
static { Await.init(); }
@Async
public static Task<String> asyncMethod()
{
Hello helloActor = Actor.getReference(Hello.class, "0");
String h1 = await(helloActor.sayHello("hello"));
String h2 = await(helloActor.sayHello("hi"));
String h3 = await(helloActor.sayHello("hey"));
return Task.fromValue(h1 + " " + h2 + " " + h3);
}
public static void main(String[] args) throws IOException
{
Stage stage = new Stage();
stage.setClusterName("helloWorldCluster");
stage.start().join();
Task<String> response = asyncMethod();
System.out.println("IsDone: " + response.isDone());
System.out.println(response.join());
stage.stop().join();
System.exit(0);
}
}
|
BarryGr/smockin | src/main/java/com/smockin/admin/dto/response/FtpMockResponseDTO.java | package com.smockin.admin.dto.response;
import com.smockin.admin.dto.FtpMockDTO;
import com.smockin.admin.persistence.enums.RecordStatusEnum;
import java.util.Date;
/**
* Created by mgallina.
*/
public class FtpMockResponseDTO extends FtpMockDTO {
private String extId;
private Date dateCreated;
public FtpMockResponseDTO() {
}
public FtpMockResponseDTO(final String extId, final String name, final RecordStatusEnum status, final Date dateCreated) {
super(name, status);
this.extId = extId;
this.dateCreated = dateCreated;
}
public String getExtId() {
return extId;
}
public void setExtId(String extId) {
this.extId = extId;
}
public Date getDateCreated() {
return dateCreated;
}
public void setDateCreated(Date dateCreated) {
this.dateCreated = dateCreated;
}
}
|
christopher-burke/warmups | python/codingbat/src/sleepin.py | #!/usr/bin/env python3
"""sleep_in
The parameter weekday is True if it is a weekday, and
the parameter vacation is True if we are on vacation.
We sleep in if it is not a weekday or we're on vacation.
Return True if we sleep in.
source: https://codingbat.com/prob/p173401
"""
def sleep_in(weekday: bool, vacation: bool) -> bool:
"""Sleep in or not.
This solution uses the built in any().
"""
if not any((weekday, vacation,)):
return True
return vacation
if __name__ == "__main__":
print(sleep_in(False, False))
print(sleep_in(True, False))
print(sleep_in(False, True))
|
tenmark86/alx-low_level_programming | 0x04-more_functions_nested_loops/7-print_diagonal.c | <filename>0x04-more_functions_nested_loops/7-print_diagonal.c
#include "main.h"
/**
* print_diagonal - a function that draws a diagonal line on the terminal
* @n: input number of times '\' should be printed
* Return: a diagonal
*/
void print_diagonal(int n)
{
int co, sp;
if (n <= 0)
{
_putchar('\n');
}
else
{
for (co = 1; co <= n; co++)
{
for (sp = 1; sp < co; sp++)
{
_putchar(' ');
}
_putchar('\\');
_putchar('\n');
}
}
}
|
ResearchHub/ResearchHub-Backend-Open | src/researchhub_case/models.py | <filename>src/researchhub_case/models.py
# flake8: noqa
from .related_models.researchhub_case_abstract_model import (
AbstractResearchhubCase
)
from .related_models.author_claim_case_model import AuthorClaimCase
|
starburst997/nme | project/include/xcompile/netdnet/dnetdb.h | /* DNLIB FUNCTIONS PROTOTYPING */
#ifndef NETDNET_DNLIB_H
#define NETDNET_DNLIB_H
#ifdef __cplusplus
extern "C"
{
#endif
// forward declaration. This is in <netdnet/dn.h>.
struct sockaddr_dn;
struct nodeent {
char *n_name; /* name of node */
unsigned short n_addrtype; /* node address type */
unsigned short n_length; /* length of address */
unsigned char *n_addr; /* address */
unsigned char *n_params; /* node parameters */
unsigned char n_reserved[16]; /* reserved */
};
/* DECnet database & utility functions on libdnet */
extern struct dn_naddr *dnet_addr(char *cp);
extern int dnet_conn(char *node, char *object, int type,
unsigned char *opt_out, int opt_outl,
unsigned char *opt_in, int *opt_inl);
extern char *dnet_htoa(struct dn_naddr *add);
extern char *dnet_ntoa(struct dn_naddr *add);
extern struct dn_naddr *getnodeadd(void);
extern struct nodeent *getnodebyaddr(const char *addr, int len, int type);
extern struct nodeent *getnodebyname(const char *name);
extern int dnet_setobjhinum_handling(int handling, int min);
extern int getobjectbyname(const char * name);
extern int getobjectbynumber(int number, char * name, size_t name_len);
extern int dnet_checkobjectnumber(int num);
extern char *getexecdev(void);
extern void setnodeent(int);
extern void *dnet_getnode(void);
extern char *dnet_nextnode(void *);
extern void dnet_endnode(void *);
extern int dnet_recv(int s, void *buf, int len, unsigned int flags);
extern int dnet_pton(int af, const char *src, void *addr);
extern const char *dnet_ntop(int af, const void *addr, char *str, size_t len);
/* DECnet daemon functions in libdnet_daemon */
extern int dnet_daemon(int object, char *named_object,
int verbosity, int do_fork);
extern void dnet_accept(int sockfd, short status, char *data, int len);
extern void dnet_reject(int sockfd, short status, char *data, int len);
extern void dnet_set_optdata(char *data, int len);
extern char *dnet_daemon_name(void);
extern int getnodename(char *, size_t);
extern int setnodename(char *, size_t);
extern void init_daemon_logging(char *, char);
extern void dnetlog(int level, char *fmt, ...);
#define DNETLOG(x) dnetlog x
int dnet_priv_check(const char * file, const char * proc,
const struct sockaddr_dn * local, const struct sockaddr_dn * remote);
/* Used by dnet_ntop/dnet_pton */
#define DNET_ADDRSTRLEN 8
/*
* Define DECnet object numerically.
*/
#define DNOBJECT_FAL (getobjectbyname("FAL")) /* file access listener */
#define DNOBJECT_NICE (getobjectbyname("NICE")) /* NICE */
#define DNOBJECT_DTERM (getobjectbyname("DTERM")) /* DECnet remote terminals */
#define DNOBJECT_MIRROR (getobjectbyname("MIRROR")) /* DECnet mirror */
#define DNOBJECT_EVR (getobjectbyname("EVR")) /* DECnet event receiver */
#define DNOBJECT_MAIL11 (getobjectbyname("MAIL11")) /* mail service */
#define DNOBJECT_PHONE (getobjectbyname("PHONE")) /* DECnet phone utility */
#define DNOBJECT_CTERM (getobjectbyname("CTERM")) /* DECnet command terminals */
#define DNOBJECT_DTR (getobjectbyname("DTR")) /* DECnet test receiver */
/* Config for dnet_checkobjectnumber()/getobjectbyname()/getobjectbynumber() */
#define DNOBJ_SEARCH_ENV "DECNET_OBJPROTO"
#define DNOBJ_SEARCH_DEF "decnet"
#define DNOBJ_HINUM_ENV "DECNET_OBJHINUM"
#define DNOBJ_HINUM_DEF "error"
#define DNOBJHINUM_RESET -128
#define DNOBJHINUM_ERROR -1
#define DNOBJHINUM_RETURN 0
#define DNOBJHINUM_ZERO 1
#define DNOBJHINUM_ALWAYSZERO 2
/* Connect/Reject codes. These are my symbolic names, not DEC's */
#define DNSTAT_REJECTED 0 /* Rejected by object */
#define DNSTAT_RESOURCES 1 /* No resources available */
#define DNSTAT_NODENAME 2 /* Unrecognised node name */
#define DNSTAT_LOCNODESHUT 3 /* Local Node is shut down */
#define DNSTAT_OBJECT 4 /* Unrecognised object */
#define DNSTAT_OBJNAMEFORMAT 5 /* Invalid object name format */
#define DNSTAT_TOOBUSY 6 /* Object too busy */
#define DNSTAT_NODENAMEFORMAT 10 /* Invalid node name format */
#define DNSTAT_REMNODESHUT 11 /* Remote Node is shut down */
#define DNSTAT_ACCCONTROL 34 /* Access control rejection */
#define DNSTAT_NORESPONSE 38 /* No response from object */
#define DNSTAT_NODEUNREACH 39 /* Node Unreachable */
/* Disconnect notification errors */
#define DNSTAT_MANAGEMENT 8 /* Abort by management/third party */
#define DNSTAT_ABORTOBJECT 9 /* Remote object aborted the link */
#define DNSTAT_FAILED 38 /* Node or object failed */
#define DNSTAT_NODERESOURCES 32 /* Node does not have sufficient resources for a new link */
#define DNSTAT_OBJRESOURCES 33 /* Object does not have sufficient resources for a new link */
#define DNSTAT_BADACCOUNT 36 /* The Account field in unacceptable */
#define DNSTAT_TOOLONG 43 /* A field in the access control message was too long */
/* We need this for 'Eduardo' kernels */
#ifndef MSG_EOR
#define MSG_EOR 0x80
#endif
#ifdef __cplusplus
}
#endif
#endif
|
jcoderltd/odin | odin-core/src/main/java/io/jcoder/odin/graph/DependencyProvider.java | /*
* Copyright 2019 JCoder Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.jcoder.odin.graph;
import java.util.Collection;
import io.jcoder.odin.InjectionContext;
import io.jcoder.odin.registration.InjectionRegistration;
/**
*
* @author <NAME>
*/
@FunctionalInterface
public interface DependencyProvider {
/**
* Provides a collection of dependencies for the given registration.
*
* @param context
* the {@link InjectionContext} linked to the dependencies
* @param registration
* the registration to analyze.
* @return a collection of dependencies for the given registration. If no dependencies exists for this registration,
* then an empty collection must be returned.
*/
public Collection<InjectionRegistration<?>> dependencies(InjectionContext context, InjectionRegistration<?> registration);
}
|
k000kc/java-a-to-z | Chapter_4/Lesson-4.3.1/src/test/java/ru.apetrov/UpdateFoodStorageTest/ControllQualityTest.java | <gh_stars>1-10
package ru.apetrov.UpdateFoodStorageTest;
import org.junit.Before;
import org.junit.Test;
import ru.apetrov.UpdateFoodStorage.ControllQualityExpansion;
import ru.apetrov.UpdateFoodStorage.ControllQuality;
import ru.apetrov.UpdateFoodStorage.Products.Food;
import ru.apetrov.UpdateFoodStorage.Products.ReproductFood;
import ru.apetrov.UpdateFoodStorage.Storages.BaseStorage;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
/**
* Created by Andrey on 17.01.2017.
*/
public class ControllQualityTest {
/**
* Класс контроля качества.
*/
private ControllQuality controllQuality;
/**
* Срок годности.
*/
private Date expaireDate;
/**
* Дата изготовления.
*/
private Date createDate;
/**
* Текущая дата.
*/
private Date currentDate;
/**
* Продукт.
*/
private Food food;
/**
* Инициализация.
* @throws ParseException exeption.
*/
@Before
public void init() throws ParseException {
controllQuality = new ControllQuality(3);
expaireDate = new SimpleDateFormat("yyyy-MM-dd").parse("2017-01-30");
createDate = new SimpleDateFormat("yyyy-MM-dd").parse("2017-01-01");
food = new Food("Apple", expaireDate, createDate, 65.5, 10.0);
}
/**
* Ситуация когда продукты помещаются на склад.
*/
@Test
public void whenFoodsPlaceWarehouse() {
try {
currentDate = new SimpleDateFormat("yyyy-MM-dd").parse("2017-01-02");
} catch (ParseException e) {
e.printStackTrace();
}
BaseStorage storage = controllQuality.rellocateFoods(food, currentDate);
assertThat(storage, is(controllQuality.getStorages()[0]));
}
/**
* Ситуация когда продукты помещаются в магазин.
*/
@Test
public void whenFoodsPlaceShop() {
try {
currentDate = new SimpleDateFormat("yyyy-MM-dd").parse("2017-01-15");
} catch (ParseException e) {
e.printStackTrace();
}
BaseStorage storage = controllQuality.rellocateFoods(food, currentDate);
assertThat(storage, is(controllQuality.getStorages()[1]));
}
/**
* Ситуация когда продукты помещаются в мусорку.
*/
@Test
public void whenFoodsPlaceTrash() {
try {
currentDate = new SimpleDateFormat("yyyy-MM-dd").parse("2017-01-31");
} catch (ParseException e) {
e.printStackTrace();
}
BaseStorage storage = controllQuality.rellocateFoods(food, currentDate);
assertThat(storage, is(controllQuality.getStorages()[2]));
}
/**
* Ситуация когда у продукта срок годности подходит к концу.
*/
@Test
public void whenExpirationDateEndsThenGiveDiscount() {
try {
currentDate = new SimpleDateFormat("yyyy-MM-dd").parse("2017-01-28");
} catch (ParseException e) {
e.printStackTrace();
}
controllQuality.rellocateFoods(food, currentDate);
assertThat(food.getPrice(), is(55.5));
}
} |
IBMStreams/OSStreams | src/java/platform/com.ibm.streams.application.models/src/main/java/com/ibm/streams/application/models/spl/logical/PrimitiveOperator.java | <filename>src/java/platform/com.ibm.streams.application.models/src/main/java/com/ibm/streams/application/models/spl/logical/PrimitiveOperator.java<gh_stars>1-10
/*
* Copyright 2021 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.streams.application.models.spl.logical;
import java.math.BigInteger;
import java.util.List;
public class PrimitiveOperator extends Operator {
// private final String logicalName = null;
private final BigInteger definitionIndex;
private final BigInteger sourceIndex;
private final BigInteger index;
private final BigInteger logicalIndex;
private final String name;
private final String logicalName;
private final List<PrimitiveInputPort> inputPorts;
private final List<PrimitiveOutputPort> outputPorts;
private final List<Parameter> params;
private final List<Annotation> annotations;
public List<Annotation> getAnnotations() {
return annotations;
}
public PrimitiveOperator(
BigInteger definitionIndex,
BigInteger sourceIndex,
BigInteger index,
BigInteger logicalIndex,
String name,
String logicalName,
List<PrimitiveInputPort> inputPorts,
List<PrimitiveOutputPort> outputPorts,
List<Parameter> params,
List<Annotation> annotations) {
this.definitionIndex = definitionIndex;
this.sourceIndex = sourceIndex;
this.index = index;
this.logicalIndex = logicalIndex;
this.name = name;
this.logicalName = logicalName;
this.inputPorts = inputPorts;
this.outputPorts = outputPorts;
this.params = params;
this.annotations = annotations;
}
public BigInteger getDefinitionIndex() {
return definitionIndex;
}
public BigInteger getIndex() {
return index;
}
public String getName() {
return name;
}
public List<PrimitiveInputPort> getInputPorts() {
return inputPorts;
}
public List<PrimitiveOutputPort> getOutputPorts() {
return outputPorts;
}
public List<Parameter> getParams() {
return params;
}
public BigInteger getLogicalIndex() {
return logicalIndex;
}
public String getLogicalName() {
return logicalName;
}
public BigInteger getSourceIndex() {
return sourceIndex;
}
}
|
93million/certcache | src/cli/commands/create-keys.js | const childProcess = require('child_process')
const path = require('path')
const util = require('util')
const { catkeys } = require('./args')
const getConfig = require('../../lib/getConfig')
const execFile = util.promisify(childProcess.execFile)
module.exports = {
cmd: 'create-keys',
desc:
'Create access keys to allow certcache clients to access certcache server',
builder: { catkeys },
handler: async (argv) => {
const execScript = path.resolve(
__dirname,
'..',
'..',
'..',
'node_modules',
'.bin',
'catkeys'
)
const { catKeysDir } = (await getConfig())
execFile(
execScript,
['create-key', '--server', '--keydir', catKeysDir]
)
.then(() => {
execFile(
execScript,
['create-key', '--keydir', catKeysDir, '--name', 'client']
)
})
.catch((err) => {
console.error(err)
})
}
}
|
wu0916/design-pattern | src/main/java/com/snailwu/designpattern/d22_memento_pattern/Client.java | <gh_stars>0
package com.snailwu.designpattern.d22_memento_pattern;
/**
* 备忘录模式
*
* @author 吴庆龙
* @date 2020/4/13 4:46 下午
*/
public class Client {
public static void main(String[] args) {
Originator or = new Originator();
Caretaker cr = new Caretaker();
or.setState("S0");
System.out.println("初始状态: " + or.getState());
// 保存状态
cr.setMemento(or.createMemento());
or.setState("S1");
System.out.println("新的状态: " + or.getState());
// 恢复状态
or.restoreMemento(cr.getMemento());
System.out.println("恢复状态: " + or.getState());
}
}
|
Essane/PartimeJob | src/main/java/com/essane/partimejob/mapper/TaskMapper.java | package com.essane.partimejob.mapper;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.essane.partimejob.domain.Task;
public interface TaskMapper extends BaseMapper<Task> {
}
|
nhoughto/camel | core/camel-util/src/main/java/org/apache/camel/util/concurrent/CamelThreadFactory.java | <filename>core/camel-util/src/main/java/org/apache/camel/util/concurrent/CamelThreadFactory.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.util.concurrent;
import java.util.concurrent.ThreadFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Thread factory which creates threads supporting a naming pattern.
*/
public final class CamelThreadFactory implements ThreadFactory {
private static final Logger LOG = LoggerFactory.getLogger(CamelThreadFactory.class);
private final String pattern;
private final String name;
private final boolean daemon;
public CamelThreadFactory(String pattern, String name, boolean daemon) {
this.pattern = pattern;
this.name = name;
this.daemon = daemon;
}
public Thread newThread(Runnable runnable) {
String threadName = ThreadHelper.resolveThreadName(pattern, name);
Thread answer = new Thread(runnable, threadName);
answer.setDaemon(daemon);
LOG.trace("Created thread[{}] -> {}", threadName, answer);
return answer;
}
public String getName() {
return name;
}
public String toString() {
return "CamelThreadFactory[" + name + "]";
}
} |
Hendrikto/jena | jena-examples/src/main/java/tdb1/examples/ExTDB_Txn2.java | <filename>jena-examples/src/main/java/tdb1/examples/ExTDB_Txn2.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tdb1.examples;
import org.apache.jena.atlas.lib.StrUtils;
import org.apache.jena.query.Dataset;
import org.apache.jena.system.Txn;
import org.apache.jena.tdb.TDBFactory;
import org.apache.jena.update.UpdateExecutionFactory;
import org.apache.jena.update.UpdateFactory;
import org.apache.jena.update.UpdateExecution;
import org.apache.jena.update.UpdateRequest;
/**
* Example of a WRITE transaction.
* See {@link Txn#executeRead}.
*/
public class ExTDB_Txn2 {
public static void main(String...argv) {
String directory = "MyDatabases/DB1";
Dataset dataset = TDBFactory.createDataset(directory);
// Start WRITE transaction.
// It's possible to read from the dataset inside the write transaction.
// An application can have other Datasets, in the same JVM,
// tied to the same TDB database performing read
// transactions concurrently. If another write transaction
// starts, the call of dataset.begin(WRITE) blocks until
// existing writer finishes.
// A WRITE transaction is
// dataset.begin(ReadWrite.READ);
// try {
// ...
// ... dataset.abort() or dataset.commit()
// } finally { dataset.end();}
//
Txn.executeWrite(dataset, ()->{
// Do a SPARQL Update.
String sparqlUpdateString = StrUtils.strjoinNL
("PREFIX . <http://example/>"
,"INSERT { :s :p ?now } WHERE { BIND(now() AS ?now) }"
);
execUpdate(sparqlUpdateString, dataset);
});
}
public static void execUpdate(String sparqlUpdateString, Dataset dataset) {
UpdateRequest request = UpdateFactory.create(sparqlUpdateString);
UpdateExecution proc = UpdateExecutionFactory.create(request, dataset);
proc.execute();
}
}
|
cristivlas/zerobugs | third_party_libs/dwarf-20111214/dwarfgen/irepmacro.h | /*
Copyright (C) 2010-2011 <NAME>.
This program is free software; you can redistribute it and/or modify it
under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it would be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
You should have received a copy of the GNU General Public License along
with this program; if not, write the Free Software Foundation, Inc., 51
Franklin Street - Fifth Floor, Boston MA 02110-1301, USA.
*/
//
// irepmacro.h
//
class IRMacroRecord {
public:
IRMacroRecord() {};
~IRMacroRecord() {};
IRMacroRecord(Dwarf_Off cuDieOffset,Dwarf_Off offset,Dwarf_Small type,
Dwarf_Signed lineno, Dwarf_Signed lineindex,
const std::string ¯o):cuDieOffset_(cuDieOffset),
offset_(offset),
type_(type),lineno_(lineno),lineindex_(lineindex),
macro_(macro) {};
private:
Dwarf_Off cuDieOffset_;
Dwarf_Off offset_;
Dwarf_Small type_;
Dwarf_Signed lineno_;
Dwarf_Signed lineindex_;
std::string macro_;
};
class IRMacro {
public:
IRMacro() {};
~IRMacro() {};
std::vector<IRMacroRecord> &getMacroVec() { return macrorec_; };
private:
std::vector<IRMacroRecord> macrorec_;
};
|
Dong-gi/Dong-gi.github.io | Repositories/Eclipse/spring-aop/src/main/java/io/github/donggi/advice/AfterAdvice3.java | package io.github.donggi.advice;
import org.aspectj.lang.JoinPoint;
public class AfterAdvice3 {
public void after1() {
System.out.println("after with no args");
}
public void after2(JoinPoint joinPoint) {
System.out.println("after with JoinPoint : " + joinPoint.toShortString());
System.out.println("this : " + joinPoint.getThis());
}
}
|
hostgov/qmall | qmall-member/src/main/java/com/qjx/qmall/member/vo/MemberRegistVo.java | <gh_stars>0
package com.qjx.qmall.member.vo;
import lombok.Data;
/**
* Ryan
* 2021-11-14-16:09
*/
@Data
public class MemberRegistVo {
private String username;
private String password;
private String phone;
}
|
peramic/App.ProcessViewer | src/main/java/havis/custom/harting/processviewer/ReportListener.java | package havis.custom.harting.processviewer;
import havis.middleware.ale.service.ec.ECReport;
public interface ReportListener {
public void fire(ECReport ecReport) throws Exception;
}
|
microsoft/Network-Adapter-Class-Extension | netcx/ec/lib/executioncontexttask.hpp | // Copyright (C) Microsoft Corporation. All rights reserved.
#pragma once
#include <kwaitevent.h>
#include <ExecutionContextTask.h>
class ExecutionContextTask
{
friend class
ExecutionContext;
public:
_IRQL_requires_max_(PASSIVE_LEVEL)
ExecutionContextTask(
_In_ void * TaskContext,
_In_ PFN_EXECUTION_CONTEXT_TASK TaskFn,
_In_ bool SignalCompletion = true
);
_IRQL_requires_max_(PASSIVE_LEVEL)
virtual
~ExecutionContextTask(
void
) = default;
_IRQL_requires_max_(PASSIVE_LEVEL)
virtual
void
WaitForCompletion(
void
);
private:
_IRQL_requires_max_(DISPATCH_LEVEL)
static
ExecutionContextTask *
FromLink(
_In_ LIST_ENTRY * Link
);
_IRQL_requires_max_(DISPATCH_LEVEL)
bool
AddToList(
_In_ LIST_ENTRY * ListHead
);
private:
void * const
m_context;
PFN_EXECUTION_CONTEXT_TASK const
m_taskFn;
LIST_ENTRY
m_linkage = {};
bool const
m_signalCompletion;
KAutoEvent
m_completed;
};
|
Jeebiz/jeebiz-admin | jeebiz-admin-extras/jeebiz-admin-authz-rbac0/src/main/java/net/jeebiz/admin/authz/rbac0/dao/IAuthorizedFeatureDao.java | /**
* Copyright (C) 2018 Jeebiz (http://jeebiz.net).
* All Rights Reserved.
*/
package net.jeebiz.admin.authz.rbac0.dao;
import java.util.List;
import org.apache.ibatis.annotations.Mapper;
import org.apache.ibatis.annotations.Param;
import net.jeebiz.admin.authz.feature.dao.entities.AuthzFeatureModel;
import net.jeebiz.admin.authz.feature.dao.entities.AuthzFeatureOptModel;
import net.jeebiz.boot.api.dao.BaseDao;
/**
* 服务功能菜单
*/
@Mapper
public interface IAuthorizedFeatureDao extends BaseDao<AuthzFeatureModel>{
/**
* 查询指定角色id拥有的功能菜单
* @param roleId
* @return
*/
public List<AuthzFeatureModel> getFeatures(@Param(value = "roleId") String roleId);
/**
* 查找功能操作并标记指定角色拥有权限的功能操作选中状态
* @param roleId
* @return
*/
public List<AuthzFeatureOptModel> getFeatureOpts(@Param(value = "roleId") String roleId);
/**
* 查询用户指定功能菜单下已经授权的功能菜单
* @return
*/
public List<AuthzFeatureModel> getChildFeatures(@Param(value = "roleId") String roleId, @Param("servId") String servId);
/**
* 查找功能操作并标记指定角色拥有权限的功能操作选中状态
* @param roleId
* @return
*/
public List<AuthzFeatureOptModel> getChildFeatureOpts(@Param(value = "roleId") String roleId);
} |
ahnitz/pegasus | test/core/043-integrity-condorio-5.0API/blackdiamond.py | import hashlib
import logging
import sys
from pathlib import Path
from datetime import datetime
from Pegasus.api import *
logging.basicConfig(level=logging.DEBUG)
PEGASUS_LOCATION = "/usr/bin/pegasus-keg"
# --- Work Dir Setup -----------------------------------------------------------
RUN_ID = "black-diamond-integrity-checking-condorio-5.0api-" + datetime.now().strftime(
"%s"
)
TOP_DIR = Path(Path.cwd())
WORK_DIR = TOP_DIR / "work"
try:
Path.mkdir(WORK_DIR)
except FileExistsError:
pass
# --- Configuration ------------------------------------------------------------
print("Generating pegasus.conf at: {}".format(TOP_DIR / "pegasus.properties"))
conf = Properties()
conf["pegasus.data.configuration"] = "condorio"
conf["pegasus.integrity.checking"] = "full"
conf.write()
# --- Sites --------------------------------------------------------------------
LOCAL = "local"
CONDOR_POOL = "condorpool"
shared_scratch_dir = str(WORK_DIR / "LOCAL/shared-scratch")
shared_storage_dir = str(WORK_DIR / "LOCAL/shared-storage")
print("Generating site catalog")
sc = SiteCatalog().add_sites(
Site(LOCAL, arch=Arch.X86_64, os_type=OS.LINUX)
.add_directories(
Directory(Directory.SHARED_SCRATCH, shared_scratch_dir).add_file_servers(
FileServer("file://" + shared_scratch_dir, Operation.ALL)
),
Directory(Directory.SHARED_STORAGE, shared_storage_dir).add_file_servers(
FileServer("file://" + shared_storage_dir, Operation.ALL)
),
)
.add_pegasus_profile(clusters_num=1),
Site(CONDOR_POOL, arch=Arch.X86_64, os_type=OS.LINUX)
.add_pegasus_profile(style="condor")
.add_condor_profile(universe="vanilla"),
)
# --- Replicas -----------------------------------------------------------------
print("Generating replica catalog")
# create initial input file and compute its hash for integrity checking
with open("f.a", "wb+") as f:
f.write(b"This is sample input to KEG\n")
f.seek(0)
readable_hash = hashlib.sha256(f.read()).hexdigest()
fa = File("f.a")
rc = ReplicaCatalog().add_replica(
LOCAL,
fa,
"file://" + str(TOP_DIR / fa.lfn),
checksum={"sha256": readable_hash}
)
# --- Transformations ----------------------------------------------------------
print("Generating transformation catalog")
preprocess = Transformation("preprocess", namespace="pegasus", version="4.0").add_sites(
TransformationSite(
CONDOR_POOL,
PEGASUS_LOCATION,
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
)
)
findrage = Transformation("findrange", namespace="pegasus", version="4.0").add_sites(
TransformationSite(
CONDOR_POOL,
PEGASUS_LOCATION,
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
)
)
analyze = Transformation("analyze", namespace="pegasus", version="4.0").add_sites(
TransformationSite(
CONDOR_POOL,
PEGASUS_LOCATION,
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
)
)
tc = TransformationCatalog().add_transformations(preprocess, findrage, analyze)
# --- Workflow -----------------------------------------------------------------
print("Generating workflow")
fb1 = File("f.b1")
fb2 = File("f.b2")
fc1 = File("f.c1")
fc2 = File("f.c2")
fd = File("f.d")
try:
Workflow("blackdiamond").add_jobs(
Job(preprocess)
.add_args("-a", "preprocess", "-T", "60", "-i", fa, "-o", fb1, fb2)
.add_inputs(fa)
.add_outputs(fb1, fb2, register_replica=True),
Job(findrage)
.add_args("-a", "findrange", "-T", "60", "-i", fb1, "-o", fc1)
.add_inputs(fb1)
.add_outputs(fc1, register_replica=True),
Job(findrage)
.add_args("-a", "findrange", "-T", "60", "-i", fb2, "-o", fc2)
.add_inputs(fb2)
.add_outputs(fc2, register_replica=True),
Job(analyze)
.add_args("-a", "analyze", "-T", "60", "-i", fc1, fc2, "-o", fd)
.add_inputs(fc1, fc2)
.add_outputs(fd, register_replica=True),
).add_site_catalog(sc).add_replica_catalog(rc).add_transformation_catalog(tc).plan(
dir=str(WORK_DIR),
verbose=3,
relative_dir=RUN_ID,
sites=[CONDOR_POOL],
output_site=LOCAL,
force=True,
submit=True,
)
except PegasusClientError as e:
print(e.output)
|
rostykerei/cci | src/main/java/nl/rostykerei/cci/ch03/q02/StackMin.java | package nl.rostykerei.cci.ch03.q02;
import nl.rostykerei.cci.datastructure.Stack;
/**
* Question 3.2 - Stack Min.
*
* @author <NAME>
*/
public interface StackMin extends Stack<Integer> {
/**
* Returns the minimum value within the stack.
*
* @return minimum value within the stack
* @throws java.util.EmptyStackException if the stack is empty
*/
int min();
}
|
nodejs/node.js-convergence | test/parallel/test-fs-truncate-fd.js | 'use strict';
var common = require('../common');
var assert = require('assert');
var path = require('path');
var fs = require('fs');
var tmp = common.tmpDir;
if (!fs.existsSync(tmp))
fs.mkdirSync(tmp);
var filename = path.resolve(tmp, 'truncate-file.txt');
var success = 0;
fs.writeFileSync(filename, 'hello world', 'utf8');
var fd = fs.openSync(filename, 'r+');
fs.truncate(fd, 5, function(err) {
assert.ok(!err);
assert.equal(fs.readFileSync(filename, 'utf8'), 'hello');
success++;
});
process.on('exit', function() {
fs.closeSync(fd);
fs.unlinkSync(filename);
assert.equal(success, 1);
console.log('ok');
});
|
coronalabs/com.coronalabs-plugin.kochava | src/tvos/Libs/KochavaCoreTVOS/KVALog.h | <reponame>coronalabs/com.coronalabs-plugin.kochava<gh_stars>0
//
// KVALog.h
// KochavaCore
//
// Created by <NAME> on 8/1/16.
// Copyright © 2017 - 2021 Kochava, Inc. All rights reserved.
//
#ifndef KVALog_h
#define KVALog_h
#pragma mark - CLASS
@class KVALogLevel;
@class KVAProduct;
#pragma mark - INTERFACE
/*!
@class KVALog
@brief A controller for working with the log.
@discussion This class provides controlling mechanisms for the log. It maintains a log enabled boolean and a log level, providing methods to support logging.
Inherits from: NSObject
@classdesign Singleton
@author <NAME>
@copyright 2017 - 2021 Kochava, Inc.
*/
@interface KVALog : NSObject
#pragma mark - SHARED INSTANCE (SINGLETON)
/*!
@property shared
@brief The singleton shared instance.
*/
@property (class, readonly, strong, nonnull) KVALog *shared;
#pragma mark - PROPERTIES
/*!
@property level
@brief The visible maximum log level for log messages.
*/
@property (strong, atomic, nonnull, readwrite) KVALogLevel *level;
/*!
@property osLogEnabledBool
@brief A boolean indicating if log messages may be printed using os_log.
@discussion Default true. When disabled, log messages will fall back to NSLog or Swift's print. NSLog and Swift's print lack certain features which os_log has, but they may print in environments where os_log is not supported.
*/
@property (atomic, readwrite) BOOL osLogEnabledBool;
/*!
@property prettyPrintBool
@brief A boolean indicating if log messages should be pretty printed.
@discussion Default true.
*/
@property (atomic, readwrite) BOOL prettyPrintBool;
@end
#endif
|
allanim/vaadin | uitest/src/com/vaadin/tests/containers/sqlcontainer/ComboBoxUpdateProblem.java | package com.vaadin.tests.containers.sqlcontainer;
import com.vaadin.server.LegacyApplication;
import com.vaadin.shared.ui.combobox.FilteringMode;
import com.vaadin.ui.ComboBox;
import com.vaadin.ui.LegacyWindow;
/**
* See http://dev.vaadin.com/ticket/9155 .
*/
public class ComboBoxUpdateProblem extends LegacyApplication {
private final DatabaseHelper databaseHelper = new DatabaseHelper();
@Override
public void init() {
setMainWindow(new LegacyWindow("Test window"));
ComboBox combo = new ComboBox("Names",
databaseHelper.getTestContainer());
combo.setItemCaptionPropertyId("FIELD1");
combo.setFilteringMode(FilteringMode.CONTAINS);
combo.setImmediate(true);
getMainWindow().addComponent(combo);
}
}
|
svenfuchs/adva_cms | plugins/adva_cells/vendor/plugins/cells/generators/cell/cell_generator.rb | require 'rails_generator/generators/components/controller/controller_generator'
class CellGenerator < ControllerGenerator
def manifest
record do |m|
# Check for class naming collisions.
m.class_collisions class_path, "#{class_name}Cell"
# Directories
m.directory File.join('app/cells', class_path)
m.directory File.join('app/cells', class_path, file_name)
# Cell
m.template 'cell.rb', File.join('app/cells', class_path, "#{file_name}_cell.rb")
# View template for each action.
actions.each do |action|
path = File.join('app/cells', class_path, file_name, "#{action}.html.erb")
m.template 'view.html.erb', path,
:assigns => { :action => action, :path => path }
end
end
end
end
|
anubhavparas/ARIAC_Group_1 | rwa5_group_1/docs/html/search/variables_7.js | <reponame>anubhavparas/ARIAC_Group_1<filename>rwa5_group_1/docs/html/search/variables_7.js
var searchData=
[
['id',['id',['../structPart.html#ac78dd973677ae60144acd1dd72a77e70',1,'Part']]],
['incorrect_5fpart_5fagv1',['incorrect_part_agv1',['../classSensorControl.html#a200559ad6fdcca0cbb054348c12e3b42',1,'SensorControl']]],
['incorrect_5fpart_5fagv2',['incorrect_part_agv2',['../classSensorControl.html#a9beba2715ea4818fbf96f003090caec1',1,'SensorControl']]],
['isplacedonagv',['isPlacedOnAGV',['../structProduct.html#afebd3514e550c1d27d9f6b5d0d9fdc8a',1,'Product']]]
];
|
ctfrancia/escacsBCN | barna-ajedrez/src/reducers/tournamentsReducer.js | <gh_stars>0
const defaultState = {
tournaments: []
};
const tournamentsReducer = (state = defaultState.tournaments, action) => {
switch (action.type) {
case 'ADD_NEW_TOURNAMENT':
return [...state, action.tournaments];
case 'FETCH_TOURNAMENT_LIST':
return [...state, ...action.tournaments];
case 'UPDATE_TOURNAMENT_LIST':
return [...state, action.tournaments];
case 'REMOVE_TOURNAMENT':
console.log('REMOVE TOURNAMENT REDUCER',action.tournament.id);
console.log('STATE', state[0].data);
const arr = state[0].data;
console.log('ARR CONST', arr);
const newState = arr.filter(tournament => tournament.id !== action.tournament.id);
return [...newState, action.tournaments ]
// return {
// // ...state,
// // tournaments: [...newState]
// }
// console.log(' STATE FILTER TOURNAMENT',tournament) );
// log tournament.id !== action.tournament.id);
default:
return state;
}
};
export default tournamentsReducer;
|
dzinoviev/clown | include/readln.h | #ifndef READLN_H
#define READLN_H
/* Read a string, and return a pointer to it.
Returns NULL on EOF. */
char *rl_gets ();
#endif /* READLN_H */
|
seckcoder/lang-learn | python/sklearn/examples/cluster/plot_lena_segmentation.py | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogenous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print __doc__
# Author: <NAME> <<EMAIL>>, <NAME>
# License: BSD
import time
import numpy as np
import scipy as sp
import pylab as pl
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
pl.figure(figsize=(5, 5))
pl.imshow(lena, cmap=pl.cm.gray)
for l in range(N_REGIONS):
pl.contour(labels == l, contours=1,
colors=[pl.cm.spectral(l / float(N_REGIONS)), ])
pl.xticks(())
pl.yticks(())
pl.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
pl.show()
|
albertico/homebrew-cask | Casks/tuneup.rb | cask 'tuneup' do
version '2.5.3.0'
sha256 '8a2722d8719323d692c4a14935d945a3e42946aa9abd8a772fbd3737e4698b5d'
# cloudfront.net is the official download host per the vendor homepage
url "https://dvk2ozaytrec6.cloudfront.net/mac4/Sparkle/TuneUp-Installer-#{version.major_minor}.0.zip"
appcast 'https://dvk2ozaytrec6.cloudfront.net/mac4/appcast.xml',
:checkpoint => 'fa140f16451aa2604c86fe4f1eed48c80dc183618152552c633dbe3c864ec4f2'
name 'TuneUp'
homepage 'https://www.tuneupmedia.com/'
license :commercial
installer :manual => 'TuneUp-Installer.app'
end
|
hkirsman/hhvm_centos7_builds | third-party/mcrouter/src/mcrouter/ProxyConfigBuilder.h | /*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
#pragma once
#include <memory>
#include <string>
#include <folly/dynamic.h>
#include <folly/Range.h>
#include "mcrouter/options.h"
#include "mcrouter/PoolFactory.h"
namespace facebook { namespace memcache { namespace mcrouter {
class ConfigApi;
class ProxyConfig;
class proxy_t;
class ProxyConfigBuilder {
public:
ProxyConfigBuilder(const McrouterOptions& opts,
ConfigApi& configApi,
folly::StringPiece jsonC);
std::shared_ptr<ProxyConfig> buildConfig(proxy_t& proxy) const;
const folly::dynamic& preprocessedConfig() const {
return json_;
}
private:
folly::dynamic json_;
std::unique_ptr<PoolFactory> poolFactory_;
std::string configMd5Digest_;
};
}}} // facebook::memcache::mcrouter
|
JeromeDuboisPro/framework | arcane/src/arcane/ActionWrapperService.h | // -*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
//-----------------------------------------------------------------------------
// Copyright 2000-2021 CEA (www.cea.fr) IFPEN (www.ifpenergiesnouvelles.com)
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: Apache-2.0
//-----------------------------------------------------------------------------
/*---------------------------------------------------------------------------*/
/* ActionWrapperService.h (C) 2000-2006 */
/* */
/* Service faisant un wrapper autour d'une action. */
/*---------------------------------------------------------------------------*/
#ifndef ARCANE_ACTIONWRAPPER_H
#define ARCANE_ACTIONWRAPPER_H
/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
#include "arcane/IActionWrapperService.h"
/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
ARCANE_BEGIN_NAMESPACE
/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
class ServiceBuildInfo;
class IApplication;
class IServiceInfo;
/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/*!
* \internal
*
* \brief Wrapper autour d'une action.
*
*/
class ActionWrapperService
: public IActionWrapperService
{
public:
ActionWrapperService(const ServiceBuildInfo& sbi);
virtual ~ActionWrapperService();
public:
//! Parent de ce service
virtual IBase* serviceParent() const;
//! Informations du service
virtual IServiceInfo* serviceInfo() const
{ return m_service_info; }
private:
IApplication* m_application;
IServiceInfo* m_service_info;
};
/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
ARCANE_END_NAMESPACE
/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
#endif
|
jdflorencio/simpleERP_clinteSide | public/app.routes.js | var router = angular.module('materialApp.routes', ['ui.router']);
router.config(function ($stateProvider, $urlRouterProvider, $locationProvider, $httpProvider, $injector) {
$urlRouterProvider.otherwise('/login');
$httpProvider.interceptors.push(($injector) => {
return {
request: function (req) {
req.headers.Authorization = 'Bearer ' + localStorage.getItem("Authorization")
return req
},
responseError: function (error) {
const { status } = error
switch (status) {
case 401:
localStorage.removeItem('Authorization')
var state = $injector.get('$state')
state.go('login')
break
case 403:
console.info('atual URL:', window.location)
// console.info('atual URL:', $stateProvider)
break
}
return
},
requestError: function (err) {
console.warn(" ||| aqui >>>", err)
}
}
})
$stateProvider
.state('login', {
url: '/login',
templateUrl: '/modules/login/views/login.html',
controller: 'loginCtrl',
controllerAs: 'ctrl',
params: {
title: "SimpleERP"
},
resolve: {
skipIfAuthenticated: function (AppService) {
teste = AppService.notAuthenticated()
}
}
})
.state('home', {
url: '/',
templateUrl: 'home.html',
params: {
title: "SimpleERP"
},
resolve: {
redirectIfNotAuthenticated: _redirectIfNotAuthenticated
}
})
.state('clientes', {
url: '/clientes',
templateUrl: '/modules/clientes/views/clientes.html',
controller: 'clientesCtrl',
controllerAs: 'ctrl',
params: {
title: "Clientes"
}
})
.state('adicionar_cliente', {
url: '/cliente/add',
templateUrl: '/modules/subModules/cliente/views/cliente.html',
controller: 'clienteCtrl',
controllerAs: 'ctrl',
params: {
title: "Adicionar novo clientes"
}
})
.state('editar_cliente', {
url: '/cliente/:id',
templateUrl: '/modules/subModules/cliente/views/cliente.html',
controller: 'clienteCtrl',
controllerAs: 'ctrl',
params: {
title: "Editando cliente"
}
})
.state('produtos', {
url: '/produtos',
templateUrl: '/modules/produtos/views/produtos.html',
controller: 'produtosCtrl',
controllerAs: 'ctrl',
params: {
title: "Produtos"
}
})
.state('adicionar_produto', {
url: '/produto/add',
templateUrl: '/modules/subModules/produto/views/produto.html',
controller: 'produtoCtrl',
controllerAs: 'ctrl',
params: {
title: "Adicionar novo produtos"
}
})
.state('editar_produto', {
url: '/produto/:id',
templateUrl: '/modules/subModules/produto/views/produto.html',
controller: 'produtoCtrl',
controllerAs: 'ctrl',
params: {
title: "Editando produto"
}
})
.state('grupos', {
url: '/grupos',
templateUrl: '/modules/grupos/views/grupos.html',
controller: 'gruposCtrl',
controllerAs: 'ctrl',
params: {
title: "grupos"
}
})
.state('adicionar_grupo', {
url: '/grupo/add',
templateUrl: '/modules/subModules/grupo/views/grupo.html',
controller: 'grupoCtrl',
controllerAs: 'ctrl',
params: {
title: "Adicionar novo grupos"
}
})
.state('editar_grupo', {
url: '/grupo/:id',
templateUrl: '/modules/subModules/grupo/views/grupo.html',
controller: 'grupoCtrl',
controllerAs: 'ctrl',
params: {
title: "Editando grupo"
}
})
.state('subgrupos', {
url: '/subgrupos',
templateUrl: '/modules/subgrupos/views/subgrupos.html',
controller: 'subgruposCtrl',
controllerAs: 'ctrl',
params: {
title: "subgrupos"
}
})
.state('adicionar_subgrupo', {
url: '/subgrupo/add',
templateUrl: '/modules/subModules/subgrupo/views/subgrupo.html',
controller: 'subgrupoCtrl',
controllerAs: 'ctrl',
params: {
title: "Adicionar novo subgrupos"
}
})
.state('editar_subgrupo', {
url: '/subgrupo/:id',
templateUrl: '/modules/subModules/subgrupo/views/subgrupo.html',
controller: 'subgrupoCtrl',
controllerAs: 'ctrl',
params: {
title: "Editando subgrupo"
}
})
.state('tributacao', {
url: '/tributacao',
templateUrl: '/modules/tributacao/views/tributacao.html',
controller: 'tributacaoCtrl',
controllerAs: 'ctrl',
params: {
title: "Tributacão"
}
})
.state('adicionar_tributo', {
url: '/tributo/add',
templateUrl: '/modules/subModules/tributo/views/tributo.html',
controller: 'tributoCtrl',
controllerAs: 'ctrl',
params: {
title: "Adicionar novo tributos"
}
})
.state('editar_tributo', {
url: '/tributo/:id',
templateUrl: '/modules/subModules/tributo/views/tributo.html',
controller: 'tributoCtrl',
controllerAs: 'ctrl',
params: {
title: "Editando tributo"
}
}).state('notasfiscais', {
url: '/notasfiscais',
templateUrl: '/modules/notasFiscais/views/notasFiscais.html',
controller: 'notasFiscaisCtrl',
controllerAs: 'ctrl',
params: {
title: "Notas Fiscais"
}
})
.state('adicionar_nota_fiscal', {
url: '/notafiscal/add',
templateUrl: '/modules/subModules/notaFiscal/views/notaFiscal.html',
controller: 'notaFiscalCtrl',
controllerAs: 'ctrl',
params: {
title: "Adicionando uma nova nota Fiscal"
}
})
.state('editar_nota_fiscal', {
url: '/notafiscal/:id',
templateUrl: '/modules/subModules/notaFiscal/views/notaFiscal.html',
controller: 'notaFiscalCtrl',
controllerAs: 'ctrl',
params: {
title: "Editando a nota Fiscal"
}
})
$locationProvider.html5Mode(true);
});
function _skipIfAuthenticated() {
}
function _redirectIfNotAuthenticated() {
console.log('Não Autenticado')
}
|
c-sp/AGE | src/age_emulator_gb/sound/age_gb_sound_volume.hpp | <filename>src/age_emulator_gb/sound/age_gb_sound_volume.hpp<gh_stars>1-10
//
// Copyright 2020 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef AGE_GB_SOUND_VOLUME_HPP
#define AGE_GB_SOUND_VOLUME_HPP
//!
//! \file
//!
#include <age_debug.hpp>
#include <age_types.hpp>
namespace age
{
template<typename BaseClass>
class gb_volume_envelope : public BaseClass
{
public:
using BaseClass::BaseClass;
[[nodiscard]] uint8_t read_nrX2() const
{
return m_nrX2;
}
void write_nrX2(uint8_t nrX2)
{
auto msg = BaseClass::log();
// "zombie" update
int volume = static_cast<uint8_t>(m_volume);
if ((m_period == 0) && (m_period_counter > 0))
{
++volume;
}
else if (!m_increase_volume)
{
volume += 2;
}
if (m_increase_volume != ((nrX2 & 0x08) > 0))
{
volume = 16 - volume;
}
volume &= 0x0F;
update_volume(volume);
// store new value
m_nrX2 = nrX2;
m_period = m_nrX2 & 0x07;
m_increase_volume = (m_nrX2 & 0x08) > 0;
msg << "configure volume envelope:"
<< "\n * period = " << log_hex8(m_period)
<< "\n * volume = " << log_hex8(m_volume)
<< "\n * volume " << (m_increase_volume ? "up" : "down");
deactivate_if_silent();
}
bool init_volume_envelope(bool inc_period)
{
m_period_counter = (m_period == 0) ? 8 : m_period;
m_period_counter += inc_period ? 1 : 0;
update_volume(m_nrX2 >> 4);
return deactivate_if_silent();
}
void volume_envelope()
{
if (m_period_counter > 0)
{
--m_period_counter;
if (m_period_counter == 0)
{
if (m_period > 0)
{
if (adjust_volume())
{
m_period_counter = m_period;
}
// else m_period_counter unchanged (= 0) -> stop
}
// cannot adjust volume at the moment -> retry later
else
{
m_period_counter = 8;
}
}
}
}
private:
bool deactivate_if_silent()
{
bool is_silent = (m_nrX2 & 0xF8U) == 0;
if (is_silent)
{
BaseClass::deactivate();
}
return is_silent;
}
void update_volume(int new_volume)
{
AGE_ASSERT((new_volume >= 0) && (new_volume < 0x10))
m_volume = static_cast<int8_t>(new_volume);
BaseClass::set_volume(m_volume);
}
bool adjust_volume()
{
int volume = m_increase_volume ? m_volume + 1 : m_volume - 1;
bool adjust_volume = (volume >= 0) && (volume < 0x10);
if (adjust_volume)
{
update_volume(volume);
}
return adjust_volume;
}
uint8_t m_nrX2 = 0;
bool m_increase_volume = false;
int8_t m_period = 0;
int8_t m_period_counter = 0;
int8_t m_volume = 0;
};
} // namespace age
#endif // AGE_GB_SOUND_VOLUME_HPP
|
raghav-deepsource/FreeBuilder | src/main/java/org/inferred/freebuilder/processor/source/Variable.java | /*
* Copyright 2018 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.inferred.freebuilder.processor.source;
import org.inferred.freebuilder.processor.source.Scope.Level;
public class Variable extends ValueType implements Excerpt, Scope.Key<IdKey> {
private final String preferredName;
public Variable(String preferredName) {
this.preferredName = preferredName;
}
@Override
public Level level() {
return Level.METHOD;
}
@Override
public boolean equals(Object obj) {
return this == obj;
}
@Override
public int hashCode() {
return super.hashCode();
}
@Override
public void addTo(SourceBuilder code) {
IdKey name = code.scope().computeIfAbsent(this, () -> new IdKey(pickName(code)));
code.add("%s", name.name());
}
@Override
protected void addFields(FieldReceiver fields) {
fields.add("preferredName", preferredName);
}
private String pickName(SourceBuilder code) {
if (registerName(code, preferredName)) {
return preferredName;
}
if (registerName(code, "_" + preferredName)) {
return "_" + preferredName;
}
int suffix = 2;
while (!registerName(code, "_" + preferredName + suffix)) {
suffix++;
}
return "_" + preferredName + suffix;
}
private boolean registerName(SourceBuilder code, String name) {
return code.scope().putIfAbsent(new IdKey(name), this) == null;
}
}
|
gaoyuan117/RenRenRoom | app/src/main/java/com/justwayward/renren/ui/adapter/TopDetailAdapter.java | package com.justwayward.renren.ui.adapter;
import android.support.annotation.LayoutRes;
import android.support.annotation.Nullable;
import android.text.Html;
import android.text.TextUtils;
import android.widget.ImageView;
import com.bumptech.glide.Glide;
import com.chad.library.adapter.base.BaseQuickAdapter;
import com.chad.library.adapter.base.BaseViewHolder;
import com.justwayward.renren.R;
import com.justwayward.renren.bean.TopDetailBean;
import java.util.List;
/**
* Created by gaoyuan on 2017/11/30.
*/
public class TopDetailAdapter extends BaseQuickAdapter<TopDetailBean, BaseViewHolder> {
public TopDetailAdapter(@LayoutRes int layoutResId, @Nullable List<TopDetailBean> data) {
super(layoutResId, data);
}
@Override
protected void convert(BaseViewHolder helper, TopDetailBean item) {
ImageView imgCover = helper.getView(R.id.ivSubCateCover);
Glide.with(mContext).load(item.getPic()).error(R.drawable.cover_default).into(imgCover);
helper.setText(R.id.tvSubCateTitle, item.getTitle())
.setText(R.id.tvSubCateAuthor, (item.getAuthor() == null ? "未知" : item.getAuthor()) + " | " + (item.getLabels() == null ? "未知" : item.getCategory_name()))
.setText(R.id.tvSubCateShort, Html.fromHtml(item.getDesc()))
.setText(R.id.tvSubCateMsg, String.format(mContext.getResources().getString(R.string.category_book_msg),
item.getCollect_num(),
TextUtils.isEmpty(item.getView_num() + "") ? "0" : item.getView_num() + ""));
}
}
|
DwArFeng/rabc-weapon-rack | rabc-weapon-rack-stack/src/main/java/com/dwarfeng/rabcwr/stack/bean/entity/Pexp.java | <reponame>DwArFeng/rabc-weapon-rack
package com.dwarfeng.rabcwr.stack.bean.entity;
import com.dwarfeng.subgrade.stack.bean.entity.Entity;
import com.dwarfeng.subgrade.stack.bean.key.LongIdKey;
import com.dwarfeng.subgrade.stack.bean.key.StringIdKey;
/**
* @author DwArFeng
* @since 0.0.1-alpha
*/
public class Pexp implements Entity<LongIdKey> {
private static final long serialVersionUID = -7246102861329325491L;
private LongIdKey key;
private StringIdKey roleKey;
private String content;
private String remark;
public Pexp() {
}
public Pexp(LongIdKey key, StringIdKey roleKey, String content, String remark) {
this.key = key;
this.roleKey = roleKey;
this.content = content;
this.remark = remark;
}
@Override
public LongIdKey getKey() {
return key;
}
@Override
public void setKey(LongIdKey key) {
this.key = key;
}
public StringIdKey getRoleKey() {
return roleKey;
}
public void setRoleKey(StringIdKey roleKey) {
this.roleKey = roleKey;
}
public String getContent() {
return content;
}
public void setContent(String content) {
this.content = content;
}
public String getRemark() {
return remark;
}
public void setRemark(String remark) {
this.remark = remark;
}
@Override
public String toString() {
return "Pexp{" +
"key=" + key +
", roleKey=" + roleKey +
", content='" + content + '\'' +
", remark='" + remark + '\'' +
'}';
}
}
|
ppartarr/azure-sdk-for-java | sdk/containerregistry/mgmt-v2016_06_27_preview/src/main/java/com/microsoft/azure/management/containerregistry/v2016_06_27_preview/implementation/RegistryInner.java | /**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
*/
package com.microsoft.azure.management.containerregistry.v2016_06_27_preview.implementation;
import org.joda.time.DateTime;
import com.microsoft.azure.management.containerregistry.v2016_06_27_preview.StorageAccountProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.microsoft.rest.serializer.JsonFlatten;
import com.microsoft.azure.Resource;
/**
* An object that represents a container registry.
*/
@JsonFlatten
public class RegistryInner extends Resource {
/**
* The URL that can be used to log into the container registry.
*/
@JsonProperty(value = "properties.loginServer", access = JsonProperty.Access.WRITE_ONLY)
private String loginServer;
/**
* The creation date of the container registry in ISO8601 format.
*/
@JsonProperty(value = "properties.creationDate", access = JsonProperty.Access.WRITE_ONLY)
private DateTime creationDate;
/**
* The value that indicates whether the admin user is enabled. This value
* is false by default.
*/
@JsonProperty(value = "properties.adminUserEnabled")
private Boolean adminUserEnabled;
/**
* The properties of the storage account for the container registry. If
* specified, the storage account must be in the same physical location as
* the container registry.
*/
@JsonProperty(value = "properties.storageAccount", required = true)
private StorageAccountProperties storageAccount;
/**
* Get the URL that can be used to log into the container registry.
*
* @return the loginServer value
*/
public String loginServer() {
return this.loginServer;
}
/**
* Get the creation date of the container registry in ISO8601 format.
*
* @return the creationDate value
*/
public DateTime creationDate() {
return this.creationDate;
}
/**
* Get the value that indicates whether the admin user is enabled. This value is false by default.
*
* @return the adminUserEnabled value
*/
public Boolean adminUserEnabled() {
return this.adminUserEnabled;
}
/**
* Set the value that indicates whether the admin user is enabled. This value is false by default.
*
* @param adminUserEnabled the adminUserEnabled value to set
* @return the RegistryInner object itself.
*/
public RegistryInner withAdminUserEnabled(Boolean adminUserEnabled) {
this.adminUserEnabled = adminUserEnabled;
return this;
}
/**
* Get the properties of the storage account for the container registry. If specified, the storage account must be in the same physical location as the container registry.
*
* @return the storageAccount value
*/
public StorageAccountProperties storageAccount() {
return this.storageAccount;
}
/**
* Set the properties of the storage account for the container registry. If specified, the storage account must be in the same physical location as the container registry.
*
* @param storageAccount the storageAccount value to set
* @return the RegistryInner object itself.
*/
public RegistryInner withStorageAccount(StorageAccountProperties storageAccount) {
this.storageAccount = storageAccount;
return this;
}
}
|
beforeuwait/myLintCode | BackTracking/towerOfHanoi.py | # coding=utf-8
"""
汉诺塔
描述:汉诺塔问题(又称为河内塔问题),是一个大家熟知的问题。在A,B,C三根柱子上,有n个不同大小的圆盘(假设半径分别为1-n吧),一开始他们都叠在我A上(如图所示),你的目标是在最少的合法移动步数内将所有盘子从A塔移动到C塔。
游戏中的每一步规则如下:
每一步只允许移动一个盘子(从一根柱子最上方到另一个柱子的最上方)
移动的过程中,你必须保证大的盘子不能在小的盘子上方(小的可以放在大的上面,最大盘子下面不能有任何其他大小的盘子)
思路:
移动 n-1次
把最大换到最边上
再移动 n-1次
这题挺有意思的
"""
class Solution:
"""
@param n: the number of disks
@return: the order of moves
"""
def __init__(self):
self.step = ["from A to B", "from A to C", "from B to A", "from B to C", "from C to A", "from C to B"]
self.result = []
def towerOfHanoi(self, n):
# write your code here
self.haoni(n, 'A', 'B', 'C')
return self.result
def haoni(self, n, a, b, c):
if n == 1:
self.move(a, c)
else:
self.haoni(n-1, a, c, b)
self.move(a, c)
self.haoni(n-1, b, a, c)
def move(self, m, n):
if m == 'A':
s = 1 if n =='C' else 0
self.result.append(self.step[s])
elif m == 'B':
s = 2 if n == 'A' else 3
self.result.append(self.step[s])
else:
s = 4 if n == 'A' else 5
self.result.append(self.step[s])
print(Solution().towerOfHanoi(5)) |
youngdaLee/Baekjoon | src/5/5355.js | <gh_stars>10-100
/**
* 5355. 화성 수학
*
* 작성자: xCrypt0r
* 언어: node.js
* 사용 메모리: 7,700 KB
* 소요 시간: 132 ms
* 해결 날짜: 2020년 11월 16일
*/
const fs = require('fs');
function main() {
let input = fs.readFileSync('/dev/stdin').toString().trim().split('\n');
let T = +input.shift();
for (let i = 0; i < T; i++) {
let [n, ...operators] = input[i].split(' ');
n = +n;
operators.forEach(o => {
if (o === '@') {
n *= 3;
} else if (o === '%') {
n += 5;
} else if (o === '#') {
n -= 7;
}
});
console.log(n.toFixed(2));
}
}
main();
|
Henning-Schulz/LPE-Common | org.lpe.common.config/src/org/lpe/common/config/experiment/KeyRegistry.java | package org.lpe.common.config.experiment;
import java.io.FileNotFoundException;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
public class KeyRegistry {
private final Map<String, Key<?>> string2keys = new HashMap<>();
private static KeyRegistry instance;
public static KeyRegistry getInstance() {
if (instance == null) {
instance = new KeyRegistry();
}
return instance;
}
private KeyRegistry() {
}
public void register(String name, Key<?> key) {
string2keys.put(name, key);
}
public Key<?> get(String keyString) {
return string2keys.get(keyString);
}
public void generateEmptyConfigFile(String fileName) throws FileNotFoundException, UnsupportedEncodingException {
if (fileName == null) {
throw new IllegalArgumentException("File name must not be null!");
}
PrintWriter writer = new PrintWriter(fileName, "UTF-8");
for (Entry<String, Key<?>> entry : string2keys.entrySet()) {
Key<?> key = entry.getValue();
writer.println(key.toString() + " = <" + key.getType().getName() + ">");
}
writer.close();
}
}
|
shaderecker/openstack-org | summit/javascript/summitapp-editevent.js | <filename>summit/javascript/summitapp-editevent.js
/**
* Copyright 2014 Openstack Foundation
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
var form_validator = null;
var TaxonomyEvent = 1;
var TaxonomyPresentation = 2;
var TaxonomyGroupEvent = 3;
var TaxonomyTeamEvent = 4;
var TaxonomyEventWithFile = 5;
$(document).ready(function(){
$(document).on('change', '.btn-file :file', function() {
var input = $(this),
label = input.val().replace(/\\/g, '/').replace(/.*\//, '');
$('#attachment-filename').val(label);
});
$("#location").chosen();
$("#start_date").datetimepicker({
format:'Y-m-d H:i:00',
step:5,
defaultDate:summit_begin_date,
minDate:summit_begin_date,
maxDate:summit_end_date,
formatDate:'Y-m-d',
defaultTime: summit_start_time,
formatTime:'H:i:00',
});
$("#end_date").datetimepicker({
format:'Y-m-d H:i:00',
step:5,
defaultDate:summit_begin_date,
minDate:summit_begin_date,
maxDate:summit_end_date,
formatDate:'Y-m-d',
defaultTime: summit_start_time,
formatTime:'H:i:00',
});
var is_publishing = false;
var summit_id = $('#summit_id').val();
$('#event_type').change(function(){
var item = $(this).find(':selected');
if(item.length == 0) return;
var useSponsors = item.data('use-sponsors');
var sponsorsMandatory = item.data('sponsors-mandatory');
var type = item.data('type-taxonomy');
if(useSponsors) $('.sponsors-container').show();
else $('.sponsors-container').hide();
if(sponsorsMandatory){
$('#sponsors').rules('add',{ required : true});
}
else{
$('#sponsors').rules("remove");
}
if(type == TaxonomyPresentation){
var useSpeakers = item.data('use-speakers');
var speakersMandatory = item.data('speakers-mandatory');
if(useSpeakers) $('.speakers-container').show();
else $('.speakers-container').hide();
if(speakersMandatory){
$('#speakers').rules('add',{ required : true});
}
else{
$('#speakers').rules("remove");
}
var useModerator = item.data('use-moderator');
var moderatorMandatory = item.data('moderator-mandatory');
var moderatorLabel = item.data('moderator-label');
if(useModerator) $('.moderator-container').show();
else $('.moderator-container').hide();
if(moderatorMandatory){
$('#moderator').rules('add',{ required : true});
}
else{
$('#moderator').rules("remove");
}
$('.moderator-label').text(moderatorLabel);
$('.level_container').show();
$('#expect_learn_container').show();
$('.to_record_container').show();
// only prepopulate on new
if (!$('#event_id').val()) {
$('#allow_feedback').attr("checked","checked");
}
}
else{
$('#expect_learn_container').hide();
$('.level_container').hide();
$('.moderator-container').hide();
$('.speakers-container').hide();
$('.to_record_container').hide();
$('#moderator').rules("remove");
$('#speakers').rules("remove");
// only prepopulate on new
if (!$('#event_id').val()) {
$('#allow_feedback').removeAttr("checked");
}
}
if(type == TaxonomyGroupEvent ){
$('.groups_container').show();
}
else{
$('.groups_container').hide();
}
if(type == TaxonomyEventWithFile ){
$('.attachment_container').show();
}
else{
$('.attachment_container').hide();
$('#attachment-filename').val('');
}
});
// speakers autocomplete
var speakers_source = new Bloodhound({
datumTokenizer: Bloodhound.tokenizers.obj.whitespace('value'),
queryTokenizer: Bloodhound.tokenizers.whitespace,
remote: {
url: 'api/v1/summits/'+summit_id+'/speakers/search?term=%QUERY',
wildcard: '%QUERY'
}
});
$('#speakers').tagsinput({
itemValue: 'unique_id',
itemText: 'name',
freeInput: false,
allowDuplicates: false,
trimValue: true,
tagClass: function(item) {
return 'label label-info speaker_' + item.speaker_id ;
},
typeaheadjs: [
{
hint: true,
highlight: true,
minLength: 1
},
{
name: 'speakers_source',
displayKey: 'name',
source: speakers_source,
limit: 20
}
]
});
var speakers_emails = [];
$.each(speakers, function(index, value) {
$('#speakers').tagsinput('add', value);
speakers_emails.push(value.email);
});
var email_href = $('#email-speakers').attr('href')+speakers_emails.join();
email_href += '?cc=<EMAIL>';
$('#email-speakers').attr('href',email_href);
$("#speakers").bind("paste", function(e){
// access the clipboard using the api
var pastedData = e.originalEvent.clipboardData.getData('text');
alert(pastedData);
} );
// tags autocomplete
var tags_source = new Bloodhound({
datumTokenizer: Bloodhound.tokenizers.obj.whitespace('value'),
queryTokenizer: Bloodhound.tokenizers.whitespace,
remote: {
url: 'api/v1/summits/'+summit_id+'/tags?query=%QUERY',
wildcard: '%QUERY'
}
});
$('#tags').tagsinput({
itemValue: 'id',
itemText: 'name',
freeInput: false,
allowDuplicates: false,
trimValue: true,
typeaheadjs: [
{
hint: true,
highlight: true,
minLength: 3
},
{
name: 'tags_source',
displayKey: 'name',
source: tags_source,
limit: 20
}
]
});
$.each(tags, function(index, value) {
$('#tags').tagsinput('add', value);
});
// sponsors autocomplete
var sponsors_source = new Bloodhound({
datumTokenizer: Bloodhound.tokenizers.obj.whitespace('value'),
queryTokenizer: Bloodhound.tokenizers.whitespace,
remote: {
url: 'api/v1/summits/'+summit_id+'/sponsors?query=%QUERY',
wildcard: '%QUERY'
}
});
$('#sponsors').tagsinput({
itemValue: 'id',
itemText: 'name',
freeInput: false,
allowDuplicates: false,
trimValue: true,
tagClass: 'label label-success',
typeaheadjs: [
{
hint: true,
highlight: true,
minLength: 2
},
{
name: 'sponsors_source',
displayKey: 'name',
source: sponsors_source,
limit: 20
}
]
});
$.each(sponsors, function(index, value) {
$('#sponsors').tagsinput('add', value);
});
// moderator autocomplete
var moderators_source = new Bloodhound({
datumTokenizer: Bloodhound.tokenizers.obj.whitespace('value'),
queryTokenizer: Bloodhound.tokenizers.whitespace,
remote: {
url: 'api/v1/summits/'+summit_id+'/speakers/search?term=%QUERY',
wildcard: '%QUERY'
}
});
$('#moderator').tagsinput({
itemValue: 'unique_id',
itemText: 'name',
freeInput: false,
maxTags: 1,
trimValue: true,
tagClass: function(item) {
return 'label label-info speaker_' + item.speaker_id ;
},
typeaheadjs: [
{
hint: true,
highlight: true,
minLength: 3
},
{
name: 'moderators_source',
displayKey: 'name',
source: moderators_source,
limit: 20
}
]
});
if (!$.isEmptyObject(moderator)) {
$('#moderator').tagsinput('add', moderator);
}
// groups autocomplete
var groups_source = new Bloodhound({
datumTokenizer: Bloodhound.tokenizers.obj.whitespace('value'),
queryTokenizer: Bloodhound.tokenizers.whitespace,
remote: {
url: 'api/v1/groups?query=%QUERY',
wildcard: '%QUERY'
}
});
$('#groups').tagsinput({
itemValue: 'id',
itemText: 'name',
freeInput: false,
allowDuplicates: false,
trimValue: true,
tagClass: 'label label-success',
typeaheadjs: [
{
hint: true,
highlight: true,
minLength: 2
},
{
name: 'groups_source',
displayKey: 'name',
source: groups_source,
limit: 20
}
]
});
$.each(groups, function(index, value) {
$('#groups').tagsinput('add', value);
});
tinymce.init({
selector: "textarea.html_text",
width: '99%',
height: 150,
plugins: [ "anchor link spellchecker" ],
toolbar: "formatselect, fontselect, fontsizeselect, bold, italic, underline, alignleft, aligncenter, alignright, alignjustify, bullist, numlist, outdent, indent, blockquote, undo, redo, removeformat, link, spellchecker",
statusbar: false,
menubar: false,
});
var form = $('#edit-event-form');
jQuery.validator.addMethod("no_rel_urls", function(value, element) {
return this.optional(element) || !(/(?=.*href\s*=\s*"(?!http))/.test(value));
}, "We don't allow relative urls in the text.");
//validation
form_validator = form.validate({
onfocusout: false,
focusCleanup: true,
ignore: [],
rules: {
title: {required: true},
abstract: {required: true, no_rel_urls: true},
social_summary: {required: true , maxlength: 100},
expect_learn: {no_rel_urls: true},
rsvp_link: { url : true },
headcount: { number: true },
event_type: { required: true },
level: { required: function(){
var type = $('#event_type').find("option:selected").data('type-taxonomy');
return type == TaxonomyPresentation;
}},
track: { required: function(){
return true;
}},
location: { required: function(){
var published = $('#published').val();
return is_publishing || published;
}},
start_date: { required: function(){
var published = $('#published').val();
var end_date = $('#end_date').val();
return is_publishing || published || end_date != '';
}},
end_date: { required: function(){
var published = $('#published').val();
var start_date = $('#start_date').val();
return is_publishing || published || start_date != '';
}},
groups: { required: function(){
var type = $('#event_type').find("option:selected").data('type-taxonomy');
return type === TaxonomyGroupEvent;
}},
},
});
$('#btn_save').click(function(evt){
evt.preventDefault();
form.find('textarea').each(function() {
var text_area = $(this);
var text_editor = tinyMCE.get(text_area.attr('id'));
if (text_editor)
text_area.val(text_editor.getContent());
});
is_publishing = false;
if (!form.valid()) return false;
form.find(':submit').attr('disabled','disabled');
saveOrUpdate(is_publishing);
return false;
});
$('#btn_publish').click(function(evt){
evt.preventDefault();
form.find('textarea').each(function() {
var text_area = $(this);
var text_editor = tinyMCE.get(text_area.attr('id'));
if (text_editor)
text_area.val(text_editor.getContent());
});
is_publishing = true;
if (!form.valid()) return false;
form.find(':submit').attr('disabled','disabled');
saveOrUpdate(is_publishing);
return false;
});
$('#btn_unpublish').click(function(evt)
{
evt.preventDefault();
form.find(':submit').attr('disabled','disabled');
var summit_id = $('#summit_id').val();
var event_id = $('#event_id').val();
var url = 'api/v1/summits/'+summit_id+'/events/'+event_id+'/unpublish';
swal({
title: "Are you sure?",
text: "You will be unpublishing this event from current schedule!",
type: "warning",
showCancelButton: true,
confirmButtonColor: "#DD6B55",
confirmButtonText: "Yes, UnPublish it!",
closeOnConfirm: true,
allowEscapeKey: false
}).then(function(isConfirm){
if (isConfirm) {
$.ajax({
type: 'DELETE',
url: url,
contentType: "application/json; charset=utf-8",
dataType: "json"
})
.done(function () {
swal("Unpublished!", "Your event was unpublished successfully.", "success");
location.reload();
form.find(':submit').removeAttr('disabled');
})
.fail(function (jqXHR) {
var responseCode = jqXHR.status;
if (responseCode == 412) {
var response = $.parseJSON(jqXHR.responseText);
swal('Validation error', response.messages[0].message, 'warning');
}
else {
swal('Error', 'There was a problem saving the event, please contact admin.', 'warning');
}
form.find(':submit').removeAttr('disabled');
});
return;
}
swal("Cancelled", "", "error");
form.find(':submit').removeAttr('disabled');
}).catch(swal.noop);
return false;
});
function saveOrUpdate(publish)
{
var summit_id = $('#summit_id').val();
var event_id = ($('#event_id').val()) ? $('#event_id').val() : 0;
var url = 'api/v1/summits/'+summit_id+'/events';
if(event_id) url += '/'+event_id
var request = {
title: $('#title').val(),
rsvp_link: $('#rsvp_link').val(),
headcount: $('#headcount').val(),
abstract: tinyMCE.get('abstract').getContent(),
social_summary:$('#social_summary').val(),
expect_learn: tinyMCE.get('expect_learn').getContent(),
location_id: $('#location').val(),
start_date: $('#start_date').val(),
end_date: $('#end_date').val(),
event_type: $('#event_type').val(),
level: $('#level').val(),
track: $('#track').val(),
allow_feedback: ($('#allow_feedback').prop('checked')) ? 1 : 0,
tags: $('#tags').val(),
sponsors: $('#sponsors').val(),
speakers: $('#speakers').tagsinput('items'),
moderator: $('#moderator').tagsinput('items')[0],
groups: $('#groups').tagsinput('items'),
publish: publish,
to_record: ($('#to_record').prop('checked')) ? 1 : 0,
attending_media: ($('#attending_media').prop('checked')) ? 1 : 0,
};
$.ajax({
type: event_id ? 'PUT' : 'POST',
url: url,
data: JSON.stringify(request),
contentType: "application/json; charset=utf-8",
dataType: "json"
}).done(function(saved_event) {
if($('#attachment-filename').val()){
// upload file
uploadAttachment(event_id == 0, saved_event);
return;
}
finishEventSaveOrUpdate(event_id == 0, saved_event);
}).fail(function(jqXHR) {
var responseCode = jqXHR.status;
if(responseCode == 412) {
var response = $.parseJSON(jqXHR.responseText);
swal('Validation error', response.messages[0].message, 'warning');
} else {
swal('Error', 'There was a problem saving the event, please contact admin.', 'warning');
}
form.find(':submit').removeAttr('disabled');
});
}
function finishEventSaveOrUpdate(newEvent, event){
if (newEvent) {
swal("Saved!", "Your event was created successfully.", "success");
window.location = window.location+'/'+event.ID;
$('#event_id').val(event.ID);
$('.active','.breadcrumb').html(event.Title);
} else {
swal("Updated!", "Your event was updated successfully.", "success");
location.reload();
}
form.find(':submit').removeAttr('disabled');
}
function uploadAttachment(newEvent, event)
{
var summit_id = $('#summit_id').val();
var url = 'api/v1/summits/'+summit_id+'/events/'+event.ID+'/attach';
var file_data = $("#event-attachment").prop("files")[0];
var form_data = new FormData();
form_data.append("file", file_data);
if ($('#attachment-filename').val()) {
$.ajax({
url: url,
dataType: 'JSON',
cache: false,
contentType: false,
processData: false,
data: form_data,
type: 'POST',
success: function(attachment_id){
finishEventSaveOrUpdate(newEvent, event);
},
error: function(response,status,error) {
swal('Validation error', response.responseJSON.messages[0].message, 'warning');
}
});
}
}
$('.speakers-container').on('click', '.tag', function(){
var speaker_class = $.grep(this.className.split(" "), function(v, i){
return v.indexOf('speaker_') === 0;
}).join();
var speaker_id = speaker_class.split('speaker_')[1];
var url = 'summit-admin/' + summit_id + '/speakers/' + speaker_id;
console.log(url);
window.open(url, '_blank');
});
$('.moderator-container').on('click', '.tag', function(){
var speaker_class = $.grep(this.className.split(" "), function(v, i){
return v.indexOf('speaker_') === 0;
}).join();
var speaker_id = speaker_class.split('speaker_')[1];
var url = 'summit-admin/' + summit_id + '/speakers/' + speaker_id;
console.log(url);
window.open(url, '_blank');
});
});
|
bedhubgrid/whitehall | config/initializers/friendly_id.rb | <gh_stars>100-1000
FriendlyId.defaults do |config|
config.base = :name
config.use :slugged, :finders, :sequentially_slugged, FriendlyId::CustomNormalise
config.sequence_separator = "--"
end
|
marcial-lopezferrada-hs/vault-ctrl-tool | e2e/e2e_test.go | <reponame>marcial-lopezferrada-hs/vault-ctrl-tool
package e2e
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"path"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/hashicorp/vault/api"
mtrics "github.com/hootsuite/vault-ctrl-tool/v2/metrics"
"github.com/hootsuite/vault-ctrl-tool/v2/util/clock"
"github.com/stretchr/testify/assert"
testing2 "k8s.io/utils/clock/testing"
)
// TestSyncWithPinnedVersion ensures that when requesting a specific version of a secret in a config file cascades
// that request to Vault.
func TestSyncWithPinnedVersion(t *testing.T) {
fixture := setupSync(t, `
---
version: 3
secrets:
- key: example
path: path/in/vault
missingOk: false
mode: 0700
pinnedVersion: 3
output: example-output
lifetime: static
`, []string{
"--init",
"--vault-token", "unit-test-token"})
vaultToken := Secret(vaultTokenJSON)
fixture.vaultClient.EXPECT().VerifyVaultToken(gomock.Any()).Return(vaultToken, nil).AnyTimes()
fixture.vaultClient.EXPECT().ServiceSecretPrefix(gomock.Any()).Return("/prefix/")
fixture.vaultClient.EXPECT().SetToken(gomock.Any()).AnyTimes()
fixture.vaultClient.EXPECT().ReadWithData(gomock.Any(), gomock.Any()).DoAndReturn(
func(path string, data map[string][]string) (*api.Secret, error) {
// Expect a request for the absolute secret path of version 3.
assert.Equal(t, "/prefix/path/in/vault", path)
assert.Len(t, data, 1)
assert.Equal(t, []string{"3"}, data["version"])
response := Secret(exampleSecretJSON)
return response, nil
}).Times(1)
fakeClock := testing2.NewFakeClock(time.Now())
ctx := clock.Set(context.Background(), fakeClock)
vtoken, err := fixture.syncer.GetVaultToken(ctx, *fixture.cliFlags)
assert.NoError(t, err)
err = fixture.syncer.PerformSync(ctx, vtoken, fakeClock.Now().AddDate(1, 0, 0), *fixture.cliFlags)
assert.NoError(t, err)
assert.FileExists(t, path.Join(fixture.workDir, "example-output"))
assert.Equal(t, 1, fixture.metrics.Counter(mtrics.SecretUpdates))
assert.Equal(t, 0, fixture.metrics.Counter(mtrics.VaultTokenWritten))
}
// TestSyncVersionScope - when a KVv2 secret gets a new version and it is at least 30 seconds old, the
// field associated with the secret must be updated.
func TestSyncVersionScope(t *testing.T) {
const configBody = `---
version: 3
secrets:
- key: example
path: path/in/vault
missingOk: false
mode: 0700
lifetime: version
fields:
- name: foo
output: foo
`
sharedDir := t.TempDir()
fixture1 := setupSyncWithDir(t, configBody, []string{"--init",
"--vault-token", "unit-test-token"}, sharedDir)
vaultToken := Secret(vaultTokenJSON)
fixture1.vaultClient.EXPECT().VerifyVaultToken(gomock.Any()).Return(vaultToken, nil).AnyTimes()
fixture1.vaultClient.EXPECT().ServiceSecretPrefix(gomock.Any()).Return("/prefix/")
fixture1.vaultClient.EXPECT().SetToken(gomock.Any()).AnyTimes()
fixture1.vaultClient.EXPECT().Read(gomock.Any()).DoAndReturn(
func(path string) (*api.Secret, error) {
assert.Equal(t, "/prefix/path/in/vault", path)
response := Secret(exampleSecretJSON)
return response, nil
}).Times(1)
fakeClock := testing2.NewFakeClock(time.Now())
ctx := clock.Set(context.Background(), fakeClock)
vtoken, err := fixture1.syncer.GetVaultToken(ctx, *fixture1.cliFlags)
assert.NoError(t, err)
err = fixture1.syncer.PerformSync(ctx, vtoken, fakeClock.Now().AddDate(1, 0, 0), *fixture1.cliFlags)
assert.NoError(t, err)
assert.FileExists(t, path.Join(fixture1.workDir, "foo"))
foobytes, _ := ioutil.ReadFile(path.Join(fixture1.workDir, "foo"))
assert.Equal(t, "aaaa", string(foobytes))
assert.Equal(t, 1, fixture1.metrics.Counter(mtrics.SecretUpdates))
assert.Equal(t, 0, fixture1.metrics.Counter(mtrics.VaultTokenWritten))
// Now, do this again, except with a new version of the secret
fixture2 := setupSyncWithDir(t, configBody, []string{"--sidecar", "--one-shot", "--vault-token", "unit-test-token"}, sharedDir)
fixture2.vaultClient.EXPECT().VerifyVaultToken(gomock.Any()).Return(vaultToken, nil).AnyTimes()
fixture2.vaultClient.EXPECT().ServiceSecretPrefix(gomock.Any()).Return("/prefix/")
fixture2.vaultClient.EXPECT().SetToken(gomock.Any()).AnyTimes()
fixture2.vaultClient.EXPECT().Read(gomock.Any()).DoAndReturn(
func(path string) (*api.Secret, error) {
assert.Equal(t, "/prefix/path/in/vault", path)
// return "v4" of the secret
response := Secret(exampleSecretV4JSON)
return response, nil
}).Times(1)
vtoken, err = fixture2.syncer.GetVaultToken(ctx, *fixture2.cliFlags)
assert.NoError(t, err)
err = fixture2.syncer.PerformSync(ctx, vtoken, fakeClock.Now().AddDate(1, 0, 0), *fixture1.cliFlags)
assert.NoError(t, err)
assert.FileExists(t, path.Join(fixture2.workDir, "foo"))
foobytes, _ = ioutil.ReadFile(path.Join(fixture2.workDir, "foo"))
// Since the secret is quite old, expect the field to be updated.
assert.Equal(t, "aaaa2", string(foobytes))
assert.Equal(t, 1, fixture1.metrics.Counter(mtrics.SecretUpdates))
assert.Equal(t, 0, fixture1.metrics.Counter(mtrics.VaultTokenWritten))
}
// TestSyncVersionScope - when a KVv2 secret gets a new version and it is not 30 seconds old, nothing
// should be updated.
func TestSyncVersionScopeWithFreshSecret(t *testing.T) {
const configBody = `---
version: 3
secrets:
- key: example
path: path/in/vault
missingOk: false
mode: 0700
lifetime: version
touchfile: test-touchfile
fields:
- name: foo
output: foo
`
// Step 1: There is nothing in the briefcase, so the field will be written.
sharedDir := t.TempDir()
fixture1 := setupSyncWithDir(t, configBody, []string{"--init",
"--vault-token", "unit-test-token"}, sharedDir)
vaultToken := Secret(vaultTokenJSON)
fixture1.vaultClient.EXPECT().VerifyVaultToken(gomock.Any()).Return(vaultToken, nil).AnyTimes()
fixture1.vaultClient.EXPECT().ServiceSecretPrefix(gomock.Any()).Return("/prefix/")
fixture1.vaultClient.EXPECT().SetToken(gomock.Any()).AnyTimes()
fixture1.vaultClient.EXPECT().Read(gomock.Any()).DoAndReturn(
func(path string) (*api.Secret, error) {
assert.Equal(t, "/prefix/path/in/vault", path)
response := Secret(exampleSecretJSON)
return response, nil
}).Times(1)
// This is 10 seconds after the time in exampleSecretFreshV4JSON
fakeClock := testing2.NewFakeClock(time.Date(2019, 10, 2, 22, 52, 20, 0, time.UTC))
ctx := clock.Set(context.Background(), fakeClock)
vtoken, err := fixture1.syncer.GetVaultToken(ctx, *fixture1.cliFlags)
assert.NoError(t, err)
err = fixture1.syncer.PerformSync(ctx, vtoken, fakeClock.Now().AddDate(1, 0, 0), *fixture1.cliFlags)
assert.NoError(t, err)
assert.FileExists(t, path.Join(fixture1.workDir, "foo"))
foobytes, _ := ioutil.ReadFile(path.Join(fixture1.workDir, "foo"))
assert.Equal(t, "aaaa", string(foobytes))
assert.Equal(t, 1, fixture1.metrics.Counter(mtrics.SecretUpdates))
assert.Equal(t, 0, fixture1.metrics.Counter(mtrics.VaultTokenWritten))
// Expect the "touchfile" to exist since the fields were written.
assert.FileExists(t, path.Join(fixture1.workDir, "test-touchfile"))
assert.NoError(t, os.Remove(path.Join(fixture1.workDir, "test-touchfile")))
// Now, do this again, except with a new version of the secret in Vault
fixture2 := setupSyncWithDir(t, configBody, []string{"--sidecar", "--one-shot", "--vault-token", "unit-test-token"}, sharedDir)
fixture2.vaultClient.EXPECT().VerifyVaultToken(gomock.Any()).Return(vaultToken, nil).AnyTimes()
fixture2.vaultClient.EXPECT().ServiceSecretPrefix(gomock.Any()).Return("/prefix/")
fixture2.vaultClient.EXPECT().SetToken(gomock.Any()).AnyTimes()
fixture2.vaultClient.EXPECT().Read(gomock.Any()).DoAndReturn(
func(path string) (*api.Secret, error) {
assert.Equal(t, "/prefix/path/in/vault", path)
// return "v4" of the secret, but with a created_timestamp that isn't old enough.
response := Secret(exampleSecretFreshV4JSON)
return response, nil
}).Times(1)
vtoken, err = fixture2.syncer.GetVaultToken(ctx, *fixture2.cliFlags)
assert.NoError(t, err)
err = fixture2.syncer.PerformSync(ctx, vtoken, fakeClock.Now().AddDate(1, 0, 0), *fixture1.cliFlags)
assert.NoError(t, err)
// Expect the touchfile to _not_ exist, since the fields were not updated.
assert.NoFileExists(t, path.Join(fixture2.workDir, "test-touchfile"))
assert.FileExists(t, path.Join(fixture2.workDir, "foo"))
foobytes, _ = ioutil.ReadFile(path.Join(fixture2.workDir, "foo"))
assert.Equal(t, "aaaa", string(foobytes))
assert.Equal(t, 0, fixture2.metrics.Counter(mtrics.SecretUpdates))
assert.Equal(t, 0, fixture2.metrics.Counter(mtrics.VaultTokenWritten))
}
// TestSyncWithEmptyConfig ensures that when a configuration file is empty, the service still runs, but doesn't
// actually do anything.
func TestSyncWithEmptyConfig(t *testing.T) {
fixture := setupSync(t, `
---
version: 3
`, []string{"--vault-token", "unit-test-token",
"--init"})
fixture.vaultClient.EXPECT().Address().Return("unit-tests").AnyTimes()
var secret api.Secret
if err := json.Unmarshal([]byte(vaultTokenJSON), &secret); err != nil {
t.Fatal(err)
}
fixture.vaultClient.EXPECT().VerifyVaultToken(gomock.Any()).Return(&secret, nil).AnyTimes()
fixture.vaultClient.EXPECT().SetToken(gomock.Any()).AnyTimes()
fakeClock := testing2.NewFakeClock(time.Now())
ctx := clock.Set(context.Background(), fakeClock)
vtoken, err := fixture.syncer.GetVaultToken(ctx, *fixture.cliFlags)
assert.NoError(t, err)
err = fixture.syncer.PerformSync(ctx, vtoken, fakeClock.Now().AddDate(1, 0, 0), *fixture.cliFlags)
assert.NoError(t, err)
assert.Equal(t, 1, fixture.metrics.Counter(mtrics.BriefcaseReset))
assert.Equal(t, 0, fixture.metrics.Counter(mtrics.VaultTokenWritten))
assert.Equal(t, 0, fixture.metrics.Counter(mtrics.VaultTokenRefreshed))
assert.Equal(t, 0, fixture.metrics.Counter(mtrics.SecretUpdates))
}
// TestBase64Field
func TestBase64Field(t *testing.T) {
fixture := setupSync(t, `
---
version: 3
secrets:
- key: example
path: path/in/vault
missingOk: false
mode: 0700
lifetime: static
fields:
- name: foo64
output: foo-output.txt
encoding: base64
`, []string{"--vault-token", "unit-test-token",
"--init"})
fixture.vaultClient.EXPECT().Address().Return("unit-tests").AnyTimes()
var secret api.Secret
if err := json.Unmarshal([]byte(vaultTokenJSON), &secret); err != nil {
t.Fatal(err)
}
fixture.vaultClient.EXPECT().VerifyVaultToken(gomock.Any()).Return(&secret, nil).AnyTimes()
fixture.vaultClient.EXPECT().ServiceSecretPrefix(gomock.Any()).Return("/prefix/")
fixture.vaultClient.EXPECT().SetToken(gomock.Any()).AnyTimes()
fixture.vaultClient.EXPECT().Read(gomock.Any()).DoAndReturn(
func(path string) (*api.Secret, error) {
assert.Equal(t, "/prefix/path/in/vault", path)
response := Secret(exampleBase64SecretJSON)
return response, nil
}).Times(1)
fakeClock := testing2.NewFakeClock(time.Now())
ctx := clock.Set(context.Background(), fakeClock)
vtoken, err := fixture.syncer.GetVaultToken(ctx, *fixture.cliFlags)
assert.NoError(t, err)
err = fixture.syncer.PerformSync(ctx, vtoken, fakeClock.Now().AddDate(1, 0, 0), *fixture.cliFlags)
assert.NoError(t, err)
outputFile := path.Join(fixture.workDir, "foo-output.txt")
assert.FileExists(t, outputFile)
foo64Bytes, err := ioutil.ReadFile(outputFile)
assert.Equal(t, "Hello Hootsuite", string(foo64Bytes))
assert.Equal(t, 1, fixture.metrics.Counter(mtrics.BriefcaseReset))
assert.Equal(t, 1, fixture.metrics.Counter(mtrics.SecretUpdates))
assert.Equal(t, 0, fixture.metrics.Counter(mtrics.VaultTokenWritten))
assert.Equal(t, 0, fixture.metrics.Counter(mtrics.VaultTokenRefreshed))
}
|
andreasdr/tdme | src/net/drewke/tdme/engine/model/RotationOrder.java | <filename>src/net/drewke/tdme/engine/model/RotationOrder.java
package net.drewke.tdme.engine.model;
import net.drewke.tdme.math.Vector3;
/**
* Rotation order
* @author <NAME>
* @version $Id$
*/
public enum RotationOrder {
XYZ(new Vector3(1f,0f,0f), new Vector3(0f,1f,0f), new Vector3(0f,0f,1f), 0,1,2, 0,1,2),
YZX(new Vector3(0f,1f,0f), new Vector3(0f,0f,1f), new Vector3(1f,0f,0f), 1,2,0, 2,0,1),
ZYX(new Vector3(0f,0f,1f), new Vector3(0f,1f,0f), new Vector3(1f,0f,0f), 2,1,0, 2,1,0);
private Vector3 axis0;
private Vector3 axis1;
private Vector3 axis2;
private int axis0VectorIndex;
private int axis1VectorIndex;
private int axis2VectorIndex;
private int axisXIndex;
private int axisYIndex;
private int axisZIndex;
/**
* Constructor
* @param axis 0
* @param axis 1
* @param axis 2
* @param axis 0 vector (data) index
* @param axis 1 vector (data) index
* @param axis 2 vector (data) index
* @param axis X index
* @param axis Y index
* @param axis Z index
*/
private RotationOrder(
Vector3 axis0,
Vector3 axis1,
Vector3 axis2,
int axis0VectorIndex,
int axis1VectorIndex,
int axis2VectorIndex,
int axisXIndex,
int axisYIndex,
int axisZIndex) {
//
this.axis0 = axis0;
this.axis1 = axis1;
this.axis2 = axis2;
this.axis0VectorIndex = axis0VectorIndex;
this.axis1VectorIndex = axis1VectorIndex;
this.axis2VectorIndex = axis2VectorIndex;
this.axisXIndex = axisXIndex;
this.axisYIndex = axisYIndex;
this.axisZIndex = axisZIndex;
}
/**
* @return axis 0
*/
public Vector3 getAxis0() {
return axis0;
}
/**
* @return axis 1
*/
public Vector3 getAxis1() {
return axis1;
}
/**
* @return axis 2
*/
public Vector3 getAxis2() {
return axis2;
}
/**
* @return axis 0 vector index
*/
public int getAxis0VectorIndex() {
return axis0VectorIndex;
}
/**
* @return axis 1 vector index
*/
public int getAxis1VectorIndex() {
return axis1VectorIndex;
}
/**
* @return axis 2 vector index
*/
public int getAxis2VectorIndex() {
return axis2VectorIndex;
}
/**
* @return axis x index
*/
public int getAxisXIndex() {
return axisXIndex;
}
/**
* @return axis y index
*/
public int getAxisYIndex() {
return axisYIndex;
}
/**
* @return axis z index
*/
public int getAxisZIndex() {
return axisZIndex;
}
}
|
chuckyQ/briefcase | src/briefcase/commands/__init__.py | <filename>src/briefcase/commands/__init__.py
from .build import BuildCommand # noqa
from .create import CreateCommand # noqa
from .dev import DevCommand # noqa
from .new import NewCommand # noqa
from .package import PackageCommand # noqa
from .publish import PublishCommand # noqa
from .run import RunCommand # noqa
from .update import UpdateCommand # noqa
from .upgrade import UpgradeCommand # noqa
|
latcoin/rippled | src/ripple/validators/impl/SourceURL.cpp | //------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
namespace ripple {
namespace Validators {
class SourceURLImp
: public SourceURL
, public LeakChecked <SourceURLImp>
{
public:
explicit SourceURLImp (URL const& url)
: m_url (url)
, m_client (HTTPClientBase::New ())
{
}
~SourceURLImp ()
{
}
String name ()
{
return "URL: '" + m_url.toString() + "'";
}
String uniqueID ()
{
return "URL," + m_url.toString();
}
String createParam ()
{
return m_url.toString();
}
void cancel ()
{
m_client->cancel ();
}
void fetch (Results& results, Journal journal)
{
HTTPClientBase::result_type httpResult (m_client->get (m_url));
if (httpResult.first == 0)
{
Utilities::ParseResultLine lineFunction (results, journal);
std::string const s (httpResult.second->body().to_string());
Utilities::processLines (s.begin(), s.end(), lineFunction);
}
else
{
journal.error <<
"HTTP GET to " << m_url <<
" failed: '" << httpResult.first.message () << "'";
}
}
private:
URL m_url;
ScopedPointer <HTTPClientBase> m_client;
};
//------------------------------------------------------------------------------
SourceURL* SourceURL::New (
URL const& url)
{
return new SourceURLImp (url);
}
}
}
|
mikimaus78/ml_monorepo | trading-with-python/notebooks/trendy.py | import numpy as np
def movingaverage(interval, window_size=14, pad=False):
window = np.ones(int(window_size))/float(window_size)
ma= np.convolve(interval, window, 'same')
# pad the end properly
if pad:
w = window_size
x = np.array(interval)
n = len(ma)
start = n-w
for i in range(start, start+w):
seq=x[i-w:i]
ma[i]=seq.sum()/len(seq)
return ma
def gentrends(x, window=1/3.0, charts=True):
"""
Returns a Pandas dataframe with support and resistance lines.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
import pandas.io.data as pd
x = np.array(x)
if window < 1:
window = int(window * len(x))
max1 = np.where(x == max(x))[0][0] # find the index of the abs max
min1 = np.where(x == min(x))[0][0] # find the index of the abs min
# First the max
if max1 + window > len(x):
max2 = max(x[0:(max1 - window)])
else:
max2 = max(x[(max1 + window):])
# Now the min
if min1 - window < 0:
min2 = min(x[(min1 + window):])
else:
min2 = min(x[0:(min1 - window)])
# Now find the indices of the secondary extrema
max2 = np.where(x == max2)[0][0] # find the index of the 2nd max
min2 = np.where(x == min2)[0][0] # find the index of the 2nd min
# Create & extend the lines
maxslope = (x[max1] - x[max2]) / (max1 - max2) # slope between max points
minslope = (x[min1] - x[min2]) / (min1 - min2) # slope between min points
a_max = x[max1] - (maxslope * max1) # y-intercept for max trendline
a_min = x[min1] - (minslope * min1) # y-intercept for min trendline
b_max = x[max1] + (maxslope * (len(x) - max1)) # extend to last data pt
b_min = x[min1] + (minslope * (len(x) - min1)) # extend to last data point
maxline = np.linspace(a_max, b_max, len(x)) # Y values between max's
minline = np.linspace(a_min, b_min, len(x)) # Y values between min's
# OUTPUT
trends = np.transpose(np.array((x, maxline, minline)))
trends = pd.DataFrame(trends, index=np.arange(0, len(x)),
columns=['Data', 'Max Line', 'Min Line'])
if charts is True:
from matplotlib.pyplot import plot, grid, show
plot(trends)
grid()
show()
return trends, maxslope, minslope
def segtrends(x, segments=2, charts=True, momentum=False):
"""
Turn minitrends to iterative process more easily adaptable to
implementation in simple trading systems; allows backtesting functionality.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
n = len(x)
y = np.array(x)
movy = movingaverage(y, 7)
# Implement trendlines
# Find the indexes of these maxima in the data
segments = int(segments)
maxima = np.ones(segments)
minima = np.ones(segments)
x_maxima = np.ones(segments)
x_minima = np.ones(segments)
segsize = int(len(y)/segments)
for i in range(1, segments+1):
ind2 = i*segsize
ind1 = ind2 - segsize
seg = y[ind1:ind2]
maxima[i-1] = max(seg)
minima[i-1] = min(seg)
x_maxima[i-1] = ind1 + (np.where(seg == maxima[i-1])[0][0])
x_minima[i-1] = ind1 + (np.where(seg == minima[i-1])[0][0])
if charts:
import matplotlib.pyplot as plt
plt.plot(y)
plt.grid(True)
for i in range(0, segments-1):
maxslope = (maxima[i+1] - maxima[i]) / (x_maxima[i+1] - x_maxima[i])
a_max = maxima[i] - (maxslope * x_maxima[i])
b_max = maxima[i] + (maxslope * (len(y) - x_maxima[i]))
maxline = np.linspace(a_max, b_max, len(y))
minslope = (minima[i+1] - minima[i]) / (x_minima[i+1] - x_minima[i])
a_min = minima[i] - (minslope * x_minima[i])
b_min = minima[i] + (minslope * (len(y) - x_minima[i]))
minline = np.linspace(a_min, b_min, len(y))
if charts:
#plt.plot(maxline, 'g')
#plt.plot(minline, 'r')
pass
if charts:
plt.plot(range(n), movy, 'b')
plt.plot(x_maxima, maxima, 'g')
plt.plot(x_minima, minima, 'r')
plt.show()
# generate order strategy
order = np.zeros(n)
last_buy = y[0]
last_sale = y[0]
for i in range(1,n):
# get 2 latest support point y values prior to x
pmin = list(minima[np.where(x_minima<=i)][-2:])
pmax = list(maxima[np.where(x_maxima<=i)][-2:])
# sell if support slop is negative
min_sell = True if ((len(pmin)==2) and (pmin[1]-pmin[0])<0) else False
max_sell = True if ((len(pmax)==2) and (pmax[1]-pmax[0])<0) else False
# if support down, sell
buy = -1 if (min_sell and max_sell) else 0
# buy only if lower the moving average else sale
buy = 1 if ((buy == 0) and (y[i]<movy[i])) else -1
# sell only if ...
buy= -1 if ((buy == -1) and y[i]>last_buy) else 1
buy_price_dec = y[i]<last_buy
sale_price_dec = y[i]<last_sale
order[i] = buy
last_buy = y[i] if (buy==1) else last_buy
last_sale = y[i] if (buy==-1) else last_sale
import math
if momentum:
# add momentum for buy
if (buy==1) and (order[i-1]>=1):
#if buy_price_dec:
order[i]=round(math.log(2*order[i-1])+1)
#else:
# order[i]=max(1, round(order[i-1]/2))
# add momentum for sale
elif (buy==-1) and (order[i-1]<=-1):
#if sale_price_dec:
order[i]*=round(math.log(abs(order[i-1]*2))+1)
#else:
# order[i]=max(1, round(order[i-1]/2))
# OUTPUT
return x_maxima, maxima, x_minima, minima, order
def minitrends(x, window=20, charts=True):
"""
Turn minitrends to iterative process more easily adaptable to
implementation in simple trading systems; allows backtesting functionality.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
y = np.array(x)
if window < 1: # if window is given as fraction of data length
window = float(window)
window = int(window * len(y))
x = np.arange(0, len(y))
dy = y[window:] - y[:-window]
crit = dy[:-1] * dy[1:] < 0
# Find whether max's or min's
maxi = (y[x[crit]] - y[x[crit] + window] > 0) & \
(y[x[crit]] - y[x[crit] - window] > 0) * 1
mini = (y[x[crit]] - y[x[crit] + window] < 0) & \
(y[x[crit]] - y[x[crit] - window] < 0) * 1
maxi = maxi.astype(float)
mini = mini.astype(float)
maxi[maxi == 0] = np.nan
mini[mini == 0] = np.nan
xmax = x[crit] * maxi
xmax = xmax[~np.isnan(xmax)]
xmax = xmax.astype(int)
xmin = x[crit] * mini
xmin = xmin[~np.isnan(xmin)]
xmin = xmin.astype(int)
# See if better max or min in region
yMax = np.array([])
xMax = np.array([])
for i in xmax:
indx = np.where(xmax == i)[0][0] + 1
try:
Y = y[i:xmax[indx]]
yMax = np.append(yMax, Y.max())
xMax = np.append(xMax, np.where(y == yMax[-1])[0][0])
except:
pass
yMin = np.array([])
xMin = np.array([])
for i in xmin:
indx = np.where(xmin == i)[0][0] + 1
try:
Y = y[i:xmin[indx]]
yMin = np.append(yMin, Y.min())
xMin = np.append(xMin, np.where(y == yMin[-1])[0][0])
except:
pass
if y[-1] > yMax[-1]:
yMax = np.append(yMax, y[-1])
xMax = np.append(xMax, x[-1])
if y[0] not in yMax:
yMax = np.insert(yMax, 0, y[0])
xMax = np.insert(xMax, 0, x[0])
if y[-1] < yMin[-1]:
yMin = np.append(yMin, y[-1])
xMin = np.append(xMin, x[-1])
if y[0] not in yMin:
yMin = np.insert(yMin, 0, y[0])
xMin = np.insert(xMin, 0, x[0])
# Plot results if desired
if charts is True:
from matplotlib.pyplot import plot, show, grid
plot(x, y)
plot(xMax, yMax, '-o')
plot(xMin, yMin, '-o')
grid(True)
show()
# Return arrays of critical points
return xMax, yMax, xMin, yMin
def iterlines(x, window=30, charts=True):
"""
Turn minitrends to iterative process more easily adaptable to
implementation in simple trading systems; allows backtesting functionality.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
x = np.array(x)
n = len(x)
if window < 1:
window = int(window * n)
sigs = np.zeros(n, dtype=float)
i = window
while i != n:
if x[i] > max(x[i-window:i]): sigs[i] = 1
elif x[i] < min(x[i-window:i]): sigs[i] = -1
i += 1
xmin = np.where(sigs == -1.0)[0]
xmax = np.where(sigs == 1.0)[0]
ymin = x[xmin]
ymax = x[xmax]
if charts is True:
from matplotlib.pyplot import plot, grid, show
plot(x)
plot(xmin, ymin, 'ro')
plot(xmax, ymax, 'go')
grid(True)
show()
return sigs
|
Michal-Gagala/sympy | sympy/codegen/cutils.py | <reponame>Michal-Gagala/sympy<gh_stars>0
from sympy.printing.c import C99CodePrinter
def render_as_source_file(content, Printer=C99CodePrinter, settings=None):
""" Renders a C source file (with required #include statements) """
printer = Printer(settings or {})
code_str = printer.doprint(content)
includes = '\n'.join(['#include <%s>' % h for h in printer.headers])
return includes + '\n\n' + code_str
|
roadnarrows-robotics/rnr-sdk | Eudoxus/sw/openni/Modules/nimMockNodes/nimMockNodes.cpp | /****************************************************************************
* *
* OpenNI 1.x Alpha *
* Copyright (C) 2011 PrimeSense Ltd. *
* *
* This file is part of OpenNI. *
* *
* OpenNI is free software: you can redistribute it and/or modify *
* it under the terms of the GNU Lesser General Public License as published *
* by the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* OpenNI is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with OpenNI. If not, see <http://www.gnu.org/licenses/>. *
* *
****************************************************************************/
//---------------------------------------------------------------------------
// Includes
//---------------------------------------------------------------------------
#include <XnModuleCppRegistratration.h>
#include "ExportedMockNodes.h"
//---------------------------------------------------------------------------
// Exporting
//---------------------------------------------------------------------------
XN_EXPORT_MODULE(Module)
XN_EXPORT_NODE(ExportedMockProductionNode, XN_NODE_TYPE_PRODUCTION_NODE)
XN_EXPORT_NODE(ExportedMockGenerator, XN_NODE_TYPE_GENERATOR)
XN_EXPORT_NODE(ExportedMockMapGenerator, XN_NODE_TYPE_MAP_GENERATOR)
XN_EXPORT_NODE(ExportedMockDevice, XN_NODE_TYPE_DEVICE)
XN_EXPORT_NODE(ExportedMockDepth, XN_NODE_TYPE_DEPTH)
XN_EXPORT_NODE(ExportedMockIR, XN_NODE_TYPE_IR)
XN_EXPORT_NODE(ExportedMockImage, XN_NODE_TYPE_IMAGE)
XN_EXPORT_NODE(ExportedMockAudio, XN_NODE_TYPE_AUDIO)
|
meshy/django-conman | tests/redirects/factories.py | <reponame>meshy/django-conman
import factory
from conman.redirects import models
from tests.routes.factories import ChildRouteFactory, RouteFactory
class ChildRouteRedirectFactory(ChildRouteFactory):
"""Create a RouteRedirect with a target to a Child Route."""
target = factory.SubFactory(ChildRouteFactory)
class Meta:
model = models.RouteRedirect
class URLRedirectFactory(RouteFactory):
"""Create a URLRedirect with a target url."""
target = factory.Sequence('https://example.com/{}'.format)
class Meta:
model = models.URLRedirect
|
yanqirenshi/SOGH | src/components/common/ProductBacklogHeader.js | import React from 'react';
import { Link } from "react-router-dom";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import {
faWindowMinimize, faWindowMaximize,
} from "@fortawesome/free-solid-svg-icons";
import ANewTab from './ANewTab.js';
export default function ProductBacklogHeader (props) {
const project = props.project;
const project_id = project.id();
const project_name = project.name();
const project_number = project.number();
const clickClose = () => props.callbacks.projects.close(project_id);
const clickOpen = () => props.callbacks.projects.open(project_id);
const style_header = {
...project.colorByPriority(),
...{fontSize:14, display: 'flex'}
};
const pb_to = props.productbacklog_url_prefix + project_id;
return (
<div className="panel-heading" style={style_header}>
<div style={{flexGrow:1, display:'flex'}}>
{project_id
&& <Link to={pb_to} style={{color: 'inherit'}}>
<p>{project_name || '@Project 未割り当て'}</p>
</Link>}
{!project_id
&& <p>{project_name || '@Project 未割り当て'}</p>}
{project_number
&& (<p style={{marginLeft:11}}>
<ANewTab to={project.url()}>
({project_number})
</ANewTab>
</p>)}
</div>
<div>
{!props.close &&
<FontAwesomeIcon icon={faWindowMinimize} onClick={clickClose} />}
{props.close &&
<FontAwesomeIcon icon={faWindowMaximize} onClick={clickOpen} />}
</div>
</div>
);
}
|
tfisher1226/ARIES | aries/tx-manager/tx-manager-service/src/main/java/common/tx/service/participant/DurableTwoPhaseCommitParticipant.java | <reponame>tfisher1226/ARIES
package common.tx.service.participant;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.aries.tx.Durable2PCParticipant;
import org.aries.tx.ParticipantInternal;
import common.tx.exception.HeuristicCancelException;
import common.tx.exception.HeuristicHazardException;
import common.tx.exception.HeuristicMixedException;
import common.tx.exception.InvalidParticipantException;
import common.tx.exception.SystemCommunicationException;
import common.tx.exception.SystemException;
import common.tx.exception.WrongStateException;
import common.tx.vote.Prepared;
import common.tx.vote.ReadOnly;
import common.tx.vote.Vote;
import common.tx.vote.VoteCancel;
import common.tx.vote.VoteConfirm;
import common.tx.vote.VoteReadOnly;
public class DurableTwoPhaseCommitParticipant implements ParticipantInternal {
private static Log log = LogFactory.getLog(DurableTwoPhaseCommitParticipant.class);
private Durable2PCParticipant participant;
private String coordinatorId;
private boolean readonly;
private boolean rolledback;
// default ctor for crash recovery
public DurableTwoPhaseCommitParticipant() {
//nothing for now
}
public DurableTwoPhaseCommitParticipant(Durable2PCParticipant participant, String coordinatorId) {
this.participant = participant;
this.coordinatorId = coordinatorId;
}
public String id() throws SystemException {
return coordinatorId;
}
public Vote prepare() throws InvalidParticipantException, WrongStateException, HeuristicHazardException, HeuristicMixedException, SystemException {
try {
if (participant != null) {
Vote vt = participant.prepare();
if (vt instanceof ReadOnly) {
readonly = true;
return new VoteReadOnly();
} else {
if (vt instanceof Prepared) {
return new VoteConfirm();
} else {
rolledback = true;
return new VoteCancel();
}
}
} else
return new VoteCancel();
} catch (WrongStateException e) {
throw new SystemException(e);
}
/*
* catch (com.arjuna.mw.wst.exceptions.HeuristicHazardException ex {
* throw new HeuristicHazardException(ex.toString()); } catch
* (com.arjuna.mw.wst.exceptions.HeuristicMixedException ex) { throw new
* HeuristicMixedException(ex.toString()); }
*/
catch (SystemException e) {
if (e instanceof SystemCommunicationException) {
// log an error here or else the participant may be left hanging
// waiting for a prepare
String message = "Timeout attempting to prepare transaction participant: "+coordinatorId;
log.error(message);
throw new SystemCommunicationException(message);
} else {
throw new SystemException(e);
}
}
}
/**
* attempt to commit the participant
*
*/
public void confirm() throws SystemException {
if (participant != null) {
try {
if (!readonly) {
participant.commit();
}
} catch (WrongStateException e) {
throw new SystemException(e);
}
/*
* catch (com.arjuna.mw.wst.exceptions.HeuristicHazardException ex) {
* throw new HeuristicHazardException(ex.toString()); } catch
* (com.arjuna.mw.wst.exceptions.HeuristicMixedException ex) { throw
* new HeuristicMixedException(ex.toString()); } catch
* (com.arjuna.mw.wst.exceptions.HeuristicRollbackException ex) {
* throw new HeuristicCancelException(ex.toString()); }
*/
catch (SystemException e) {
if (e instanceof SystemCommunicationException) {
// log an error here -- we will end up writing a heuristic transaction record too
log.error("Timeout attempting to commit transaction participant: "+coordinatorId);
throw new SystemCommunicationException(e.toString());
}
throw new SystemException(e);
}
} else
throw new SystemException("Invalid participant");
}
public void cancel() throws SystemException {
if (participant != null) {
try {
if (!rolledback)
participant.rollback();
} catch (WrongStateException e) {
throw new SystemException(e);
}
/*
* catch (com.arjuna.mw.wst.exceptions.HeuristicHazardException ex) {
* throw new HeuristicHazardException(ex.toString()); } catch
* (com.arjuna.mw.wst.exceptions.HeuristicMixedException ex) { throw
* new HeuristicMixedException(ex.toString()); } catch
* (com.arjuna.mw.wst.exceptions.HeuristicCommitException ex) {
* throw new HeuristicConfirmException(ex.toString()); }
*/
catch (SystemException e) {
if (e instanceof SystemCommunicationException) {
// log an error here -- if the participant is dead it will retry anyway
log.error("Timeout attempting to cancel transaction participant: "+coordinatorId);
throw new SystemCommunicationException(e.toString());
} else {
throw new SystemException(e);
}
}
}
else
throw new SystemException("Invalid participant");
}
public void confirmOnePhase() throws SystemException {
if (participant != null) {
Vote v = null;
try {
v = prepare();
} catch (Exception e) {
// either the prepare timed out or the participant was invalid or in an
// invalid state
log.error("Error", e);
v = new VoteCancel();
}
if (v instanceof VoteReadOnly) {
readonly = true;
} else if (v instanceof VoteCancel) {
rolledback = false;
// TODO only do this if we didn't return VoteCancel
try {
cancel();
} catch (SystemCommunicationException e) {
// if the rollback times out as well as the prepare we
// return an exception which indicates a failed transaction
}
throw new SystemException("Participant cancelled");
} else {
if (v instanceof VoteConfirm) {
try {
confirm();
} catch (HeuristicHazardException e) {
throw e;
} catch (HeuristicMixedException e) {
throw e;
} catch (HeuristicCancelException e) {
throw e;
} catch (Exception e) {
throw new HeuristicHazardException();
}
} else {
cancel(); // TODO error
throw new HeuristicHazardException();
}
}
} else
throw new SystemException("Invalid participant");
}
public void forget() throws SystemException {
}
public void unknown() throws SystemException {
/*
* If the transaction is unknown, then we assume it rolled back.
*/
try {
cancel();
} catch (Exception e) {
// TODO
}
}
// public boolean save_state (OutputObjectState os) {
// return PersistableParticipantHelper.save_state(os, _resource) ;
// }
//
// public boolean restore_state (InputObjectState os) {
// _resource = (Durable2PCParticipant) PersistableParticipantHelper.restore_state(os) ;
// return true ;
// }
}
|
vbohinc/qpid-java | broker-plugins/management-http/src/main/java/resources/js/qpid/management/accesscontrolprovider/aclfile/add.js | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
define(["dojo/dom","dojo/query","dijit/registry","qpid/common/util"],
function (dom, query, registry, util)
{
var addACLFileAccessControlProvider =
{
init: function()
{
// Readers are HTML5
this.reader = window.FileReader ? new FileReader() : undefined;
},
show: function(data)
{
var that=this;
util.parseHtmlIntoDiv(data.containerNode, "accesscontrolprovider/aclfile/add.html", function(){that._postParse(data);});
},
_postParse: function(data)
{
var that=this;
this.aclServerPath = registry.byId("addAccessControlProvider.serverPath");
this.aclUploadFields = dom.byId("addAccessControlProvider.uploadFields");
this.aclSelectedFileContainer = dom.byId("addAccessControlProvider.selectedFile");
this.aclSelectedFileStatusContainer = dom.byId("addAccessControlProvider.selectedFileStatus");
this.aclFile = registry.byId("addAccessControlProvider.file");
this.aclFileClearButton = registry.byId("addAccessControlProvider.fileClearButton");
this.aclFileOldBrowserWarning = dom.byId("addAccessControlProvider.oldBrowserWarning");
//Only submitted field
this.aclPath = registry.byId("addAccessControlProvider.path");
this.addButton = data.parent.addButton;
if (this.reader)
{
this.reader.onload = function(evt) {that._aclUploadFileComplete(evt);};
this.reader.onerror = function(ex) {console.error("Failed to load ACL file", ex);};
this.aclFile.on("change", function(selected){that._aclFileChanged(selected)});
this.aclFileClearButton.on("click", function(event){that._aclFileClearButtonClicked(event)});
}
else
{
// Fall back for IE8/9 which do not support FileReader
this.aclUploadFields.style.display = "none";
this.aclFileOldBrowserWarning.innerHTML = "File upload requires a more recent browser with HTML5 support";
this.aclFileOldBrowserWarning.className = this.aclFileOldBrowserWarning.className.replace("hidden", "");
}
this.aclServerPath.on("blur", function(){that._aclServerPathChanged()});
},
_aclFileChanged: function (evt)
{
// We only ever expect a single file
var file = this.aclFile.domNode.children[0].files[0];
this.addButton.setDisabled(true);
this.aclSelectedFileContainer.innerHTML = file.name;
this.aclSelectedFileStatusContainer.className = "loadingIcon";
console.log("Beginning to read ACL file " + file.name);
this.reader.readAsDataURL(file);
},
_aclUploadFileComplete: function(evt)
{
var reader = evt.target;
var result = reader.result;
console.log("ACL file read complete, contents " + result);
this.addButton.setDisabled(false);
this.aclSelectedFileStatusContainer.className = "loadedIcon";
this.aclServerPath.set("value", "");
this.aclServerPath.setDisabled(true);
this.aclServerPath.set("required", false);
this.aclFileClearButton.setDisabled(false);
this.aclPath.set("value", result);
},
_aclFileClearButtonClicked: function(event)
{
this.aclFile.reset();
this.aclSelectedFileStatusContainer.className = "";
this.aclSelectedFileContainer.innerHTML = "";
this.aclServerPath.set("required", true);
this.aclServerPath.setDisabled(false);
this.aclFileClearButton.setDisabled(true);
this.aclPath.set("value", "");
},
_aclServerPathChanged: function()
{
var serverPathValue = this.aclServerPath.get("value");
this.aclPath.set("value", serverPathValue);
}
};
try
{
addACLFileAccessControlProvider.init();
}
catch(e)
{
console.warn(e);
}
return addACLFileAccessControlProvider;
}
);
|
HappyTramp/libft | src/str/ft_strjoin3.c | /* ************************************************************************** */
/* */
/* ::: :::::::: */
/* ft_strjoin3.c :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: charles <<EMAIL>> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2020/04/01 18:00:49 by charles #+# #+# */
/* Updated: 2020/04/01 18:01:43 by charles ### ########.fr */
/* */
/* ************************************************************************** */
#include "libft.h"
/*
** \brief Join 3 strings in a new malloc'd one
** \param s1 String 1
** \param s2 String 2
** \param s3 String 3
** \return The joined string
*/
char *ft_strjoin3(char const *s1, char const *s2, char const *s3)
{
char *joined;
if (s1 == NULL || s2 == NULL || s3 == NULL)
return (NULL);
if ((joined = (char*)malloc(sizeof(char)
* (ft_strlen(s1) + ft_strlen(s2) + ft_strlen(s3) + 1))) == NULL)
return (NULL);
ft_strcpy(joined, s1);
ft_strcat(joined, s2);
ft_strcat(joined, s3);
return (joined);
}
|
SAFE-anwang/SafeWallet-android | telegram/TMessagesProj/jni/voip/tgcalls/v2_4_0_0/Signaling_4_0_0.h | #ifndef TGCALLS_SIGNALING_4_0_0_H
#define TGCALLS_SIGNALING_4_0_0_H
#include <string>
#include <vector>
#include "absl/types/variant.h"
#include "absl/types/optional.h"
#include "api/rtp_parameters.h"
namespace tgcalls {
namespace signaling_4_0_0 {
struct DtlsFingerprint {
std::string hash;
std::string setup;
std::string fingerprint;
};
struct ConnectionAddress {
std::string ip;
int port = 0;
};
struct IceCandidate {
std::string sdpString;
};
struct SsrcGroup {
std::vector<uint32_t> ssrcs;
std::string semantics;
};
struct FeedbackType {
std::string type;
std::string subtype;
};
struct PayloadType {
uint32_t id = 0;
std::string name;
uint32_t clockrate = 0;
uint32_t channels = 0;
std::vector<FeedbackType> feedbackTypes;
std::vector<std::pair<std::string, std::string>> parameters;
};
struct MediaContent {
uint32_t ssrc = 0;
std::vector<SsrcGroup> ssrcGroups;
std::vector<PayloadType> payloadTypes;
std::vector<webrtc::RtpExtension> rtpExtensions;
};
struct InitialSetupMessage {
std::string ufrag;
std::string pwd;
std::vector<DtlsFingerprint> fingerprints;
absl::optional<MediaContent> audio;
absl::optional<MediaContent> video;
absl::optional<MediaContent> screencast;
};
struct CandidatesMessage {
std::vector<IceCandidate> iceCandidates;
};
struct MediaStateMessage {
enum class VideoState {
Inactive,
Suspended,
Active
};
enum class VideoRotation {
Rotation0,
Rotation90,
Rotation180,
Rotation270
};
bool isMuted = false;
VideoState videoState = VideoState::Inactive;
VideoRotation videoRotation = VideoRotation::Rotation0;
VideoState screencastState = VideoState::Inactive;
bool isBatteryLow = false;
};
struct Message {
absl::variant<
InitialSetupMessage,
CandidatesMessage,
MediaStateMessage> data;
std::vector<uint8_t> serialize() const;
static absl::optional<Message> parse(const std::vector<uint8_t> &data);
};
};
} // namespace tgcalls
#endif |
maginbit/jslearn219 | variable_009/app.js | // hay 3 formas de ceclarar variables var - let -const
// var la forma antigua
// let-const = forma nueva
var nombre = 'jose';//puede ser comillas dobles o simples "" o ''
nombre = 'maria';//sobreescribe el primer nombre
var nombre = "Juan";//sobre escribe el segundo nombre
console.log(nombre);
/*
undifined ejemplo
var carrito;
console.log(carrito);
*/
//inicializar variable
var carrito;
// inicializar varias variables
var carrito = 'libro',
carrito1= 'libro2',
carrito2= 'libro3';
console.log(carrito2);
//nomesclatura variables
var producto;
//var 2019producto; NO PUEDE TENER NUMEROS O CARACTERES AL PRINCIPIO
var x=230,
z=40;
var sum=x+350+z*3;
console.log(sum);
console.log('......................');
var x = 0;
var j = '';
var h = '';
var k=1;
while(x<13){
for (var i = 1; i < 11; i++) {
console.log(x+'*'+i+'='+x*i);
//document.getElementById('app').innerHTML = '<li id="a'+x+'">'+x+'</li>'
j=j+'<td><p id="a'+x+'">'+x+'*'+i+'='+x*i+' | </p></';
}
h=h+j+' </td>';
j='';
x++;
}
console.log(j);
document.getElementById('app').innerHTML = h;
console.log('...............');
// esto es una Template String
var saludo = `¡Hola Mundo!`;
// esto es una cadena normal con comillas simples
var saludo = '¡Hola Mundo!';
// esto es una cadena normal con comillas dobles
var saludo = "¡Hola Mundo!";
// Sustitución simple de cadenas
var nombre = "Juan";
console.log(`¡Hola ${nombre}!`);
// resultado => "¡Hola Juan!"
var a = 10;
var b = 10;
//console.log(`¡JavaScript se publicó hace ${a+b} años!`);
// resultado => ¡JavaScript se publicó hace 20 años!
//console.log(`Existen ${2 * (a + b)} frameworks JavaScript y no ${10 * (a + b)}.`);
// resultado => Existen 40 frameworks JavaScript y no 2000.
var usuario = {
nombre: '<NAME>',
edad:25,
correo:'<EMAIL>',
fono:'555555',
direccion:'Las Plameras # 4232'
};
//console.log(`Estás conectado como ${usuario.nombre.toUpperCase()}.`);
// resultado => "Estás conectado como JUAN PEREZ.";
var divisa = 'Euro';
//console.log(`Los precios se indican en ${divisa}. Utiliza nuestro conversor para convertir ${divisa} en tu moneda local.`);
// resultado => Los precios se indican en Euro. Utiliza nuestro conversor para convertir Euro en tu moneda local.
var largo = Object.keys(usuario).length;
//var size = Object.size(usuario);
//console.log(largo);
console.log(usuario.fono);
for(key in usuario){
console.log(usuario[key]);
}
console.log('.................');
const fruits = {
apple: 28,
orange: 17,
pear: 54,
}
const keys = Object.keys(fruits)
console.log(keys) // [apple, orange, pear]
console.log('..........................');
const fruits2 = {
apple: 28,
orange: 17,
pear: 54,
}
const values2 = Object.values(fruits2)
console.log(values2) // [28, 17, 54]
console.log('..........................');
var autos = [
'nissan',
'ford',
'toyota',
'reno',
'kia',
'hyndai'
];
console.log(autos.length);
autos.forEach( function(element, index) {
// statements
console.log(index,element);
});
for (var i = 0; i < autos.length; i++) {
console.log(autos[i]);
} |
OpenWebGlobe/Application-SDK | source/og-core/io/FileSystemWriterDisk.h |
#ifndef _FILESYSTEMWRITERDISK_H_
#define _FILESYSTEMWRITERDISK_H_
#include "og.h"
#include <string>
#include <exception>
#include <boost/shared_ptr.hpp>
#include "IFileSystemWriter.h"
//! \brief File System for writing from hard disk-like file hierarchy.
//! Abstraction for writing files to a hard disk-like file hierarchy.
//! \author <NAME>, <EMAIL>
//! \ingroup filesystem
class OPENGLOBE_API FileSystemWriterDisk : public IFileSystemWriter
{
public:
FileSystemWriterDisk(const std::string& sRoot);
virtual ~FileSystemWriterDisk();
virtual bool WriteData(const std::string& sRelativePath, const boost::shared_array<unsigned char>& data, const size_t& data_size);
/*!
* Closes the output stream after writing.
* \return virtual void
* \throw Exception if an error occurs.
*/
virtual void CloseOutputStream();
protected:
std::string _sRoot;
};
#endif |
fabiojna02/OpenCellular | firmware/coreboot/src/include/cpu/amd/amdfam10_sysconf.h | /*
* This file is part of the coreboot project.
*
* Copyright (C) 2007 Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef AMDFAM10_SYSCONF_H
#define AMDFAM10_SYSCONF_H
#include "northbridge/amd/amdfam10/nums.h"
#include <cpu/x86/msr.h>
struct p_state_t {
unsigned int corefreq;
unsigned int power;
unsigned int transition_lat;
unsigned int busmaster_lat;
unsigned int control;
unsigned int status;
};
struct amdfam10_sysconf_t {
//ht
unsigned int hc_possible_num;
unsigned int pci1234[HC_POSSIBLE_NUM];
unsigned int hcdn[HC_POSSIBLE_NUM];
unsigned int hcid[HC_POSSIBLE_NUM]; //record ht chain type
unsigned int sbdn;
unsigned int sblk;
unsigned int nodes;
unsigned int ht_c_num; // we only can have 32 ht chain at most
// 4-->32: 4:segn, 8:bus_max, 8:bus_min, 4:linkn, 6: nodeid, 2: enable
unsigned int ht_c_conf_bus[HC_NUMS];
unsigned int io_addr_num;
unsigned int conf_io_addr[HC_NUMS];
unsigned int conf_io_addrx[HC_NUMS];
unsigned int mmio_addr_num;
unsigned int conf_mmio_addr[HC_NUMS*2]; // mem and pref mem
unsigned int conf_mmio_addrx[HC_NUMS*2];
unsigned int segbit;
unsigned int hcdn_reg[HC_NUMS]; // it will be used by get_pci1234
// quad cores all cores in one node should be the same, and p0,..p5
msr_t msr_pstate[NODE_NUMS * 5];
unsigned int needs_update_pstate_msrs;
unsigned int bsp_apicid;
int enabled_apic_ext_id;
unsigned int lift_bsp_apicid;
int apicid_offset;
void *mb; // pointer for mb related struct
};
extern struct amdfam10_sysconf_t sysconf;
void get_sblk_pci1234(void);
void get_bus_conf(void);
#endif
|
JPMoresmau/camel | components/camel-dynamic-router/src/main/java/org/apache/camel/component/dynamicrouter/DynamicRouterConstants.java | <reponame>JPMoresmau/camel<filename>components/camel-dynamic-router/src/main/java/org/apache/camel/component/dynamicrouter/DynamicRouterConstants.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.dynamicrouter;
import java.util.regex.Pattern;
/**
* Contains constants that are used within this component.
*/
public abstract class DynamicRouterConstants {
/**
* The camel version where this router became dynamic.
*/
public static final String FIRST_VERSION = "3.15.0";
/**
* The component name/scheme for the {@link DynamicRouterEndpoint}.
*/
public static final String COMPONENT_SCHEME = "dynamic-router";
/**
* The control channel, where routing participants subscribe and provide their routing rules and endpoint URIs.
*/
public static final String CONTROL_CHANNEL_NAME = "control";
/**
* Convenient constant for the control channel URI.
*/
public static final String CONTROL_CHANNEL_URI = COMPONENT_SCHEME + ":" + CONTROL_CHANNEL_NAME;
/**
* The title, for the auto-generated documentation.
*/
public static final String TITLE = "Dynamic Router";
/**
* The mode for sending an exchange to recipients: send only to the first match.
*/
public static final String MODE_FIRST_MATCH = "firstMatch";
/**
* The mode for sending an exchange to recipients: send to all matching.
*/
public static final String MODE_ALL_MATCH = "allMatch";
/**
* The syntax, for the auto-generated documentation.
*/
public static final String SYNTAX = COMPONENT_SCHEME + ":channel";
/**
* Name of the control action parameter.
*/
public static final String CONTROL_ACTION_PARAM = "controlAction";
/**
* Name of the channel parameter.
*/
public static final String SUBSCRIPTION_CHANNEL_PARAM = "subscribeChannel";
/**
* The alternate control-channel syntax.
*/
public static final String CONTROL_SYNTAX
= SYNTAX + "/" + CONTROL_ACTION_PARAM + "/" + SUBSCRIPTION_CHANNEL_PARAM;
/**
* Subscribe control channel action.
*/
public static final String CONTROL_ACTION_SUBSCRIBE = "subscribe";
/**
* Unsubscribe control channel action.
*/
public static final String CONTROL_ACTION_UNSUBSCRIBE = "unsubscribe";
/**
* The name for the regex capture group that captures the channel name.
*/
public static final String CHANNEL_GROUP = "channel";
/**
* The name for the regex capture group that captures the control channel action.
*/
public static final String ACTION_GROUP = "action";
/**
* The name for the regex capture group that captures the channel name for the subscription.
*/
public static final String SUBSCRIBE_GROUP = "subscribe";
/**
* Regular expression to parse URI path parameters.
*/
public static final Pattern PATH_PARAMS_PATTERN = Pattern.compile(
String.format("(?<%s>[^/]+)(/(?<%s>[^/]+)/(?<%s>[^/]+))?", CHANNEL_GROUP, ACTION_GROUP, SUBSCRIBE_GROUP));
}
|
sho25/hive | ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java | begin_unit|revision:0.9.5;language:Java;cregit-version:0.0.1
begin_comment
comment|/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */
end_comment
begin_package
package|package
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|parse
package|;
end_package
begin_import
import|import static
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|plan
operator|.
name|ReduceSinkDesc
operator|.
name|ReducerTraits
operator|.
name|AUTOPARALLEL
import|;
end_import
begin_import
import|import static
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|plan
operator|.
name|ReduceSinkDesc
operator|.
name|ReducerTraits
operator|.
name|UNIFORM
import|;
end_import
begin_import
import|import
name|java
operator|.
name|util
operator|.
name|*
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|fs
operator|.
name|FileStatus
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|fs
operator|.
name|Path
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|conf
operator|.
name|HiveConf
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|AbstractFileMergeOperator
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|AppMasterEventOperator
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|FetchTask
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|FileSinkOperator
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|FilterOperator
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|GroupByOperator
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|HashTableDummyOperator
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|MapJoinOperator
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|Operator
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|OperatorUtils
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|ReduceSinkOperator
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|SerializationUtilities
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|TableScanOperator
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|UnionOperator
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|exec
operator|.
name|Utilities
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|lib
operator|.
name|*
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|optimizer
operator|.
name|GenMapRedUtils
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|plan
operator|.
name|*
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|plan
operator|.
name|TezEdgeProperty
operator|.
name|EdgeType
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|udf
operator|.
name|generic
operator|.
name|GenericUDFBetween
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|ql
operator|.
name|udf
operator|.
name|generic
operator|.
name|GenericUDFInBloomFilter
import|;
end_import
begin_import
import|import
name|org
operator|.
name|apache
operator|.
name|hadoop
operator|.
name|hive
operator|.
name|serde2
operator|.
name|typeinfo
operator|.
name|TypeInfoFactory
import|;
end_import
begin_import
import|import
name|org
operator|.
name|slf4j
operator|.
name|Logger
import|;
end_import
begin_import
import|import
name|org
operator|.
name|slf4j
operator|.
name|LoggerFactory
import|;
end_import
begin_import
import|import
name|com
operator|.
name|google
operator|.
name|common
operator|.
name|collect
operator|.
name|BiMap
import|;
end_import
begin_import
import|import
name|com
operator|.
name|google
operator|.
name|common
operator|.
name|collect
operator|.
name|HashBiMap
import|;
end_import
begin_comment
comment|/** * GenTezUtils is a collection of shared helper methods to produce TezWork. * All the methods in this class should be static, but some aren't; this is to facilitate testing. * Methods are made non-static on as needed basis. */
end_comment
begin_class
specifier|public
class|class
name|GenTezUtils
block|{
specifier|static
specifier|final
specifier|private
name|Logger
name|LOG
init|=
name|LoggerFactory
operator|.
name|getLogger
argument_list|(
name|GenTezUtils
operator|.
name|class
argument_list|)
decl_stmt|;
specifier|public
name|GenTezUtils
parameter_list|()
block|{ }
specifier|public
specifier|static
name|UnionWork
name|createUnionWork
parameter_list|(
name|GenTezProcContext
name|context
parameter_list|,
name|Operator
argument_list|<
name|?
argument_list|>
name|root
parameter_list|,
name|Operator
argument_list|<
name|?
argument_list|>
name|leaf
parameter_list|,
name|TezWork
name|tezWork
parameter_list|)
block|{
name|UnionWork
name|unionWork
init|=
operator|new
name|UnionWork
argument_list|(
literal|"Union "
operator|+
name|context
operator|.
name|nextSequenceNumber
argument_list|()
argument_list|)
decl_stmt|;
name|context
operator|.
name|rootUnionWorkMap
operator|.
name|put
argument_list|(
name|root
argument_list|,
name|unionWork
argument_list|)
expr_stmt|;
name|context
operator|.
name|unionWorkMap
operator|.
name|put
argument_list|(
name|leaf
argument_list|,
name|unionWork
argument_list|)
expr_stmt|;
name|tezWork
operator|.
name|add
argument_list|(
name|unionWork
argument_list|)
expr_stmt|;
return|return
name|unionWork
return|;
block|}
specifier|public
specifier|static
name|ReduceWork
name|createReduceWork
parameter_list|(
name|GenTezProcContext
name|context
parameter_list|,
name|Operator
argument_list|<
name|?
argument_list|>
name|root
parameter_list|,
name|TezWork
name|tezWork
parameter_list|)
block|{
assert|assert
operator|!
name|root
operator|.
name|getParentOperators
argument_list|()
operator|.
name|isEmpty
argument_list|()
assert|;
name|boolean
name|isAutoReduceParallelism
init|=
name|context
operator|.
name|conf
operator|.
name|getBoolVar
argument_list|(
name|HiveConf
operator|.
name|ConfVars
operator|.
name|TEZ_AUTO_REDUCER_PARALLELISM
argument_list|)
decl_stmt|;
name|float
name|maxPartitionFactor
init|=
name|context
operator|.
name|conf
operator|.
name|getFloatVar
argument_list|(
name|HiveConf
operator|.
name|ConfVars
operator|.
name|TEZ_MAX_PARTITION_FACTOR
argument_list|)
decl_stmt|;
name|float
name|minPartitionFactor
init|=
name|context
operator|.
name|conf
operator|.
name|getFloatVar
argument_list|(
name|HiveConf
operator|.
name|ConfVars
operator|.
name|TEZ_MIN_PARTITION_FACTOR
argument_list|)
decl_stmt|;
name|long
name|bytesPerReducer
init|=
name|context
operator|.
name|conf
operator|.
name|getLongVar
argument_list|(
name|HiveConf
operator|.
name|ConfVars
operator|.
name|BYTESPERREDUCER
argument_list|)
decl_stmt|;
name|int
name|defaultTinyBufferSize
init|=
name|context
operator|.
name|conf
operator|.
name|getIntVar
argument_list|(
name|HiveConf
operator|.
name|ConfVars
operator|.
name|TEZ_SIMPLE_CUSTOM_EDGE_TINY_BUFFER_SIZE_MB
argument_list|)
decl_stmt|;
name|ReduceWork
name|reduceWork
init|=
operator|new
name|ReduceWork
argument_list|(
name|Utilities
operator|.
name|REDUCENAME
operator|+
name|context
operator|.
name|nextSequenceNumber
argument_list|()
argument_list|)
decl_stmt|;
name|LOG
operator|.
name|debug
argument_list|(
literal|"Adding reduce work ("
operator|+
name|reduceWork
operator|.
name|getName
argument_list|()
operator|+
literal|") for "
operator|+
name|root
argument_list|)
expr_stmt|;
name|reduceWork
operator|.
name|setReducer
argument_list|(
name|root
argument_list|)
expr_stmt|;
name|reduceWork
operator|.
name|setNeedsTagging
argument_list|(
name|GenMapRedUtils
operator|.
name|needsTagging
argument_list|(
name|reduceWork
argument_list|)
argument_list|)
expr_stmt|;
comment|// All parents should be reduce sinks. We pick the one we just walked
comment|// to choose the number of reducers. In the join/union case they will
comment|// all be -1. In sort/order case where it matters there will be only
comment|// one parent.
assert|assert
name|context
operator|.
name|parentOfRoot
operator|instanceof
name|ReduceSinkOperator
assert|;
name|ReduceSinkOperator
name|reduceSink
init|=
operator|(
name|ReduceSinkOperator
operator|)
name|context
operator|.
name|parentOfRoot
decl_stmt|;
name|reduceWork
operator|.
name|setNumReduceTasks
argument_list|(
name|reduceSink
operator|.
name|getConf
argument_list|()
operator|.
name|getNumReducers
argument_list|()
argument_list|)
expr_stmt|;
name|reduceWork
operator|.
name|setSlowStart
argument_list|(
name|reduceSink
operator|.
name|getConf
argument_list|()
operator|.
name|isSlowStart
argument_list|()
argument_list|)
expr_stmt|;
name|reduceWork
operator|.
name|setUniformDistribution
argument_list|(
name|reduceSink
operator|.
name|getConf
argument_list|()
operator|.
name|getReducerTraits
argument_list|()
operator|.
name|contains
argument_list|(
name|UNIFORM
argument_list|)
argument_list|)
expr_stmt|;
if|if
condition|(
name|isAutoReduceParallelism
operator|&&
name|reduceSink
operator|.
name|getConf
argument_list|()
operator|.
name|getReducerTraits
argument_list|()
operator|.
name|contains
argument_list|(
name|AUTOPARALLEL
argument_list|)
condition|)
block|{
comment|// configured limit for reducers
specifier|final
name|int
name|maxReducers
init|=
name|context
operator|.
name|conf
operator|.
name|getIntVar
argument_list|(
name|HiveConf
operator|.
name|ConfVars
operator|.
name|MAXREDUCERS
argument_list|)
decl_stmt|;
comment|// estimated number of reducers
specifier|final
name|int
name|nReducers
init|=
name|reduceSink
operator|.
name|getConf
argument_list|()
operator|.
name|getNumReducers
argument_list|()
decl_stmt|;
comment|// min we allow tez to pick
name|int
name|minPartition
init|=
name|Math
operator|.
name|max
argument_list|(
literal|1
argument_list|,
call|(
name|int
call|)
argument_list|(
name|nReducers
operator|*
name|minPartitionFactor
argument_list|)
argument_list|)
decl_stmt|;
name|minPartition
operator|=
operator|(
name|minPartition
operator|>
name|maxReducers
operator|)
condition|?
name|maxReducers
else|:
name|minPartition
expr_stmt|;
comment|// max we allow tez to pick
name|int
name|maxPartition
init|=
name|Math
operator|.
name|max
argument_list|(
literal|1
argument_list|,
call|(
name|int
call|)
argument_list|(
name|nReducers
operator|*
name|maxPartitionFactor
argument_list|)
argument_list|)
decl_stmt|;
name|maxPartition
operator|=
operator|(
name|maxPartition
operator|>
name|maxReducers
operator|)
condition|?
name|maxReducers
else|:
name|maxPartition
expr_stmt|;
comment|// reduce only if the parameters are significant
if|if
condition|(
name|minPartition
operator|<
name|maxPartition
operator|&&
name|nReducers
operator|*
name|minPartitionFactor
operator|>=
literal|1.0
condition|)
block|{
name|reduceWork
operator|.
name|setAutoReduceParallelism
argument_list|(
literal|true
argument_list|)
expr_stmt|;
name|reduceWork
operator|.
name|setMinReduceTasks
argument_list|(
name|minPartition
argument_list|)
expr_stmt|;
name|reduceWork
operator|.
name|setMaxReduceTasks
argument_list|(
name|maxPartition
argument_list|)
expr_stmt|;
block|}
elseif|else
if|if
condition|(
name|nReducers
operator|<
name|maxPartition
condition|)
block|{
comment|// the max is good, the min is too low
name|reduceWork
operator|.
name|setNumReduceTasks
argument_list|(
name|maxPartition
argument_list|)
expr_stmt|;
block|}
block|}
name|setupReduceSink
argument_list|(
name|context
argument_list|,
name|reduceWork
argument_list|,
name|reduceSink
argument_list|)
expr_stmt|;
name|tezWork
operator|.
name|add
argument_list|(
name|reduceWork
argument_list|)
expr_stmt|;
name|TezEdgeProperty
name|edgeProp
decl_stmt|;
name|EdgeType
name|edgeType
init|=
name|determineEdgeType
argument_list|(
name|context
operator|.
name|preceedingWork
argument_list|,
name|reduceWork
argument_list|,
name|reduceSink
argument_list|)
decl_stmt|;
if|if
condition|(
name|reduceWork
operator|.
name|isAutoReduceParallelism
argument_list|()
condition|)
block|{
name|edgeProp
operator|=
operator|new
name|TezEdgeProperty
argument_list|(
name|context
operator|.
name|conf
argument_list|,
name|edgeType
argument_list|,
literal|true
argument_list|,
name|reduceWork
operator|.
name|isSlowStart
argument_list|()
argument_list|,
name|reduceWork
operator|.
name|getMinReduceTasks
argument_list|()
argument_list|,
name|reduceWork
operator|.
name|getMaxReduceTasks
argument_list|()
argument_list|,
name|bytesPerReducer
argument_list|)
expr_stmt|;
block|}
else|else
block|{
name|edgeProp
operator|=
operator|new
name|TezEdgeProperty
argument_list|(
name|edgeType
argument_list|)
expr_stmt|;
name|edgeProp
operator|.
name|setSlowStart
argument_list|(
name|reduceWork
operator|.
name|isSlowStart
argument_list|()
argument_list|)
expr_stmt|;
block|}
name|edgeProp
operator|.
name|setBufferSize
argument_list|(
name|obtainBufferSize
argument_list|(
name|root
argument_list|,
name|reduceSink
argument_list|,
name|defaultTinyBufferSize
argument_list|)
argument_list|)
expr_stmt|;
name|reduceWork
operator|.
name|setEdgePropRef
argument_list|(
name|edgeProp
argument_list|)
expr_stmt|;
name|tezWork
operator|.
name|connect
argument_list|(
name|context
operator|.
name|preceedingWork
argument_list|,
name|reduceWork
argument_list|,
name|edgeProp
argument_list|)
expr_stmt|;
name|context
operator|.
name|connectedReduceSinks
operator|.
name|add
argument_list|(
name|reduceSink
argument_list|)
expr_stmt|;
return|return
name|reduceWork
return|;
block|}
specifier|private
specifier|static
name|void
name|setupReduceSink
parameter_list|(
name|GenTezProcContext
name|context
parameter_list|,
name|ReduceWork
name|reduceWork
parameter_list|,
name|ReduceSinkOperator
name|reduceSink
parameter_list|)
block|{
name|LOG
operator|.
name|debug
argument_list|(
literal|"Setting up reduce sink: "
operator|+
name|reduceSink
operator|+
literal|" with following reduce work: "
operator|+
name|reduceWork
operator|.
name|getName
argument_list|()
argument_list|)
expr_stmt|;
comment|// need to fill in information about the key and value in the reducer
name|GenMapRedUtils
operator|.
name|setKeyAndValueDesc
argument_list|(
name|reduceWork
argument_list|,
name|reduceSink
argument_list|)
expr_stmt|;
comment|// remember which parent belongs to which tag
name|int
name|tag
init|=
name|reduceSink
operator|.
name|getConf
argument_list|()
operator|.
name|getTag
argument_list|()
decl_stmt|;
name|reduceWork
operator|.
name|getTagToInput
argument_list|()
operator|.
name|put
argument_list|(
name|tag
operator|==
operator|-
literal|1
condition|?
literal|0
else|:
name|tag
argument_list|,
name|context
operator|.
name|preceedingWork
operator|.
name|getName
argument_list|()
argument_list|)
expr_stmt|;
comment|// remember the output name of the reduce sink
name|reduceSink
operator|.
name|getConf
argument_list|()
operator|.
name|setOutputName
argument_list|(
name|reduceWork
operator|.
name|getName
argument_list|()
argument_list|)
expr_stmt|;
block|}
specifier|public
name|MapWork
name|createMapWork
parameter_list|(
name|GenTezProcContext
name|context
parameter_list|,
name|Operator
argument_list|<
name|?
argument_list|>
name|root
parameter_list|,
name|TezWork
name|tezWork
parameter_list|,
name|PrunedPartitionList
name|partitions
parameter_list|)
throws|throws
name|SemanticException
block|{
assert|assert
name|root
operator|.
name|getParentOperators
argument_list|()
operator|.
name|isEmpty
argument_list|()
assert|;
name|MapWork
name|mapWork
init|=
operator|new
name|MapWork
argument_list|(
name|Utilities
operator|.
name|MAPNAME
operator|+
name|context
operator|.
name|nextSequenceNumber
argument_list|()
argument_list|)
decl_stmt|;
name|LOG
operator|.
name|debug
argument_list|(
literal|"Adding map work ("
operator|+
name|mapWork
operator|.
name|getName
argument_list|()
operator|+
literal|") for "
operator|+
name|root
argument_list|)
expr_stmt|;
comment|// map work starts with table scan operators
assert|assert
name|root
operator|instanceof
name|TableScanOperator
assert|;
name|TableScanOperator
name|ts
init|=
operator|(
name|TableScanOperator
operator|)
name|root
decl_stmt|;
name|String
name|alias
init|=
name|ts
operator|.
name|getConf
argument_list|()
operator|.
name|getAlias
argument_list|()
decl_stmt|;
name|setupMapWork
argument_list|(
name|mapWork
argument_list|,
name|context
argument_list|,
name|partitions
argument_list|,
name|ts
argument_list|,
name|alias
argument_list|)
expr_stmt|;
if|if
condition|(
name|ts
operator|.
name|getConf
argument_list|()
operator|.
name|getTableMetadata
argument_list|()
operator|!=
literal|null
operator|&&
name|ts
operator|.
name|getConf
argument_list|()
operator|.
name|getTableMetadata
argument_list|()
operator|.
name|isDummyTable
argument_list|()
condition|)
block|{
name|mapWork
operator|.
name|setDummyTableScan
argument_list|(
literal|true
argument_list|)
expr_stmt|;
block|}
if|if
condition|(
name|ts
operator|.
name|getConf
argument_list|()
operator|.
name|getNumBuckets
argument_list|()
operator|>
literal|0
condition|)
block|{
name|mapWork
operator|.
name|setIncludedBuckets
argument_list|(
name|ts
operator|.
name|getConf
argument_list|()
operator|.
name|getIncludedBuckets
argument_list|()
argument_list|)
expr_stmt|;
block|}
comment|// add new item to the tez work
name|tezWork
operator|.
name|add
argument_list|(
name|mapWork
argument_list|)
expr_stmt|;
return|return
name|mapWork
return|;
block|}
comment|// this method's main use is to help unit testing this class
specifier|protected
name|void
name|setupMapWork
parameter_list|(
name|MapWork
name|mapWork
parameter_list|,
name|GenTezProcContext
name|context
parameter_list|,
name|PrunedPartitionList
name|partitions
parameter_list|,
name|TableScanOperator
name|root
parameter_list|,
name|String
name|alias
parameter_list|)
throws|throws
name|SemanticException
block|{
comment|// All the setup is done in GenMapRedUtils
name|GenMapRedUtils
operator|.
name|setMapWork
argument_list|(
name|mapWork
argument_list|,
name|context
operator|.
name|parseContext
argument_list|,
name|context
operator|.
name|inputs
argument_list|,
name|partitions
argument_list|,
name|root
argument_list|,
name|alias
argument_list|,
name|context
operator|.
name|conf
argument_list|,
literal|false
argument_list|)
expr_stmt|;
block|}
comment|// removes any union operator and clones the plan
specifier|public
specifier|static
name|void
name|removeUnionOperators
parameter_list|(
name|GenTezProcContext
name|context
parameter_list|,
name|BaseWork
name|work
parameter_list|,
name|int
name|indexForTezUnion
parameter_list|)
throws|throws
name|SemanticException
block|{
name|List
argument_list|<
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|>
name|roots
init|=
operator|new
name|ArrayList
argument_list|<
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|>
argument_list|()
decl_stmt|;
name|roots
operator|.
name|addAll
argument_list|(
name|work
operator|.
name|getAllRootOperators
argument_list|()
argument_list|)
expr_stmt|;
if|if
condition|(
name|work
operator|.
name|getDummyOps
argument_list|()
operator|!=
literal|null
condition|)
block|{
name|roots
operator|.
name|addAll
argument_list|(
name|work
operator|.
name|getDummyOps
argument_list|()
argument_list|)
expr_stmt|;
block|}
name|roots
operator|.
name|addAll
argument_list|(
name|context
operator|.
name|eventOperatorSet
argument_list|)
expr_stmt|;
comment|// need to clone the plan.
name|List
argument_list|<
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|>
name|newRoots
init|=
name|SerializationUtilities
operator|.
name|cloneOperatorTree
argument_list|(
name|roots
argument_list|)
decl_stmt|;
comment|// we're cloning the operator plan but we're retaining the original work. That means
comment|// that root operators have to be replaced with the cloned ops. The replacement map
comment|// tells you what that mapping is.
name|BiMap
argument_list|<
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|,
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|>
name|replacementMap
init|=
name|HashBiMap
operator|.
name|create
argument_list|()
decl_stmt|;
comment|// there's some special handling for dummyOps required. Mapjoins won't be properly
comment|// initialized if their dummy parents aren't initialized. Since we cloned the plan
comment|// we need to replace the dummy operators in the work with the cloned ones.
name|List
argument_list|<
name|HashTableDummyOperator
argument_list|>
name|dummyOps
init|=
operator|new
name|LinkedList
argument_list|<
name|HashTableDummyOperator
argument_list|>
argument_list|()
decl_stmt|;
name|Iterator
argument_list|<
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|>
name|it
init|=
name|newRoots
operator|.
name|iterator
argument_list|()
decl_stmt|;
for|for
control|(
name|Operator
argument_list|<
name|?
argument_list|>
name|orig
range|:
name|roots
control|)
block|{
name|Set
argument_list|<
name|FileSinkOperator
argument_list|>
name|fsOpSet
init|=
name|OperatorUtils
operator|.
name|findOperators
argument_list|(
name|orig
argument_list|,
name|FileSinkOperator
operator|.
name|class
argument_list|)
decl_stmt|;
for|for
control|(
name|FileSinkOperator
name|fsOp
range|:
name|fsOpSet
control|)
block|{
name|context
operator|.
name|fileSinkSet
operator|.
name|remove
argument_list|(
name|fsOp
argument_list|)
expr_stmt|;
block|}
name|Operator
argument_list|<
name|?
argument_list|>
name|newRoot
init|=
name|it
operator|.
name|next
argument_list|()
decl_stmt|;
name|replacementMap
operator|.
name|put
argument_list|(
name|orig
argument_list|,
name|newRoot
argument_list|)
expr_stmt|;
if|if
condition|(
name|newRoot
operator|instanceof
name|HashTableDummyOperator
condition|)
block|{
comment|// dummy ops need to be updated to the cloned ones.
name|dummyOps
operator|.
name|add
argument_list|(
operator|(
name|HashTableDummyOperator
operator|)
name|newRoot
argument_list|)
expr_stmt|;
name|it
operator|.
name|remove
argument_list|()
expr_stmt|;
block|}
elseif|else
if|if
condition|(
name|newRoot
operator|instanceof
name|AppMasterEventOperator
condition|)
block|{
comment|// event operators point to table scan operators. When cloning these we
comment|// need to restore the original scan.
if|if
condition|(
name|newRoot
operator|.
name|getConf
argument_list|()
operator|instanceof
name|DynamicPruningEventDesc
condition|)
block|{
name|TableScanOperator
name|ts
init|=
operator|(
operator|(
name|DynamicPruningEventDesc
operator|)
name|orig
operator|.
name|getConf
argument_list|()
operator|)
operator|.
name|getTableScan
argument_list|()
decl_stmt|;
if|if
condition|(
name|ts
operator|==
literal|null
condition|)
block|{
throw|throw
operator|new
name|AssertionError
argument_list|(
literal|"No table scan associated with dynamic event pruning. "
operator|+
name|orig
argument_list|)
throw|;
block|}
operator|(
operator|(
name|DynamicPruningEventDesc
operator|)
name|newRoot
operator|.
name|getConf
argument_list|()
operator|)
operator|.
name|setTableScan
argument_list|(
name|ts
argument_list|)
expr_stmt|;
block|}
name|it
operator|.
name|remove
argument_list|()
expr_stmt|;
block|}
else|else
block|{
if|if
condition|(
name|newRoot
operator|instanceof
name|TableScanOperator
condition|)
block|{
if|if
condition|(
name|context
operator|.
name|tsToEventMap
operator|.
name|containsKey
argument_list|(
name|orig
argument_list|)
condition|)
block|{
comment|// we need to update event operators with the cloned table scan
for|for
control|(
name|AppMasterEventOperator
name|event
range|:
name|context
operator|.
name|tsToEventMap
operator|.
name|get
argument_list|(
name|orig
argument_list|)
control|)
block|{
operator|(
operator|(
name|DynamicPruningEventDesc
operator|)
name|event
operator|.
name|getConf
argument_list|()
operator|)
operator|.
name|setTableScan
argument_list|(
operator|(
name|TableScanOperator
operator|)
name|newRoot
argument_list|)
expr_stmt|;
block|}
block|}
comment|// This TableScanOperator could be part of semijoin optimization.
name|Map
argument_list|<
name|ReduceSinkOperator
argument_list|,
name|SemiJoinBranchInfo
argument_list|>
name|rsToSemiJoinBranchInfo
init|=
name|context
operator|.
name|parseContext
operator|.
name|getRsToSemiJoinBranchInfo
argument_list|()
decl_stmt|;
for|for
control|(
name|ReduceSinkOperator
name|rs
range|:
name|rsToSemiJoinBranchInfo
operator|.
name|keySet
argument_list|()
control|)
block|{
name|SemiJoinBranchInfo
name|sjInfo
init|=
name|rsToSemiJoinBranchInfo
operator|.
name|get
argument_list|(
name|rs
argument_list|)
decl_stmt|;
if|if
condition|(
name|sjInfo
operator|.
name|getTsOp
argument_list|()
operator|==
name|orig
condition|)
block|{
name|SemiJoinBranchInfo
name|newSJInfo
init|=
operator|new
name|SemiJoinBranchInfo
argument_list|(
operator|(
name|TableScanOperator
operator|)
name|newRoot
argument_list|,
name|sjInfo
operator|.
name|getIsHint
argument_list|()
argument_list|)
decl_stmt|;
name|rsToSemiJoinBranchInfo
operator|.
name|put
argument_list|(
name|rs
argument_list|,
name|newSJInfo
argument_list|)
expr_stmt|;
block|}
block|}
comment|// This TableScanOperator could also be part of other events in eventOperatorSet.
for|for
control|(
name|AppMasterEventOperator
name|event
range|:
name|context
operator|.
name|eventOperatorSet
control|)
block|{
if|if
condition|(
name|event
operator|.
name|getConf
argument_list|()
operator|instanceof
name|DynamicPruningEventDesc
condition|)
block|{
name|TableScanOperator
name|ts
init|=
operator|(
operator|(
name|DynamicPruningEventDesc
operator|)
name|event
operator|.
name|getConf
argument_list|()
operator|)
operator|.
name|getTableScan
argument_list|()
decl_stmt|;
if|if
condition|(
name|ts
operator|.
name|equals
argument_list|(
name|orig
argument_list|)
condition|)
block|{
operator|(
operator|(
name|DynamicPruningEventDesc
operator|)
name|event
operator|.
name|getConf
argument_list|()
operator|)
operator|.
name|setTableScan
argument_list|(
operator|(
name|TableScanOperator
operator|)
name|newRoot
argument_list|)
expr_stmt|;
block|}
block|}
block|}
block|}
name|context
operator|.
name|rootToWorkMap
operator|.
name|remove
argument_list|(
name|orig
argument_list|)
expr_stmt|;
name|context
operator|.
name|rootToWorkMap
operator|.
name|put
argument_list|(
name|newRoot
argument_list|,
name|work
argument_list|)
expr_stmt|;
block|}
block|}
comment|// now we remove all the unions. we throw away any branch that's not reachable from
comment|// the current set of roots. The reason is that those branches will be handled in
comment|// different tasks.
name|Deque
argument_list|<
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|>
name|operators
init|=
operator|new
name|LinkedList
argument_list|<
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|>
argument_list|()
decl_stmt|;
name|operators
operator|.
name|addAll
argument_list|(
name|newRoots
argument_list|)
expr_stmt|;
name|Set
argument_list|<
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|>
name|seen
init|=
operator|new
name|HashSet
argument_list|<
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|>
argument_list|()
decl_stmt|;
name|Set
argument_list|<
name|FileStatus
argument_list|>
name|fileStatusesToFetch
init|=
literal|null
decl_stmt|;
if|if
condition|(
name|context
operator|.
name|parseContext
operator|.
name|getFetchTask
argument_list|()
operator|!=
literal|null
condition|)
block|{
comment|// File sink operator keeps a reference to a list of files. This reference needs to be passed on
comment|// to other file sink operators which could have been added by removal of Union Operator
name|fileStatusesToFetch
operator|=
name|context
operator|.
name|parseContext
operator|.
name|getFetchTask
argument_list|()
operator|.
name|getWork
argument_list|()
operator|.
name|getFilesToFetch
argument_list|()
expr_stmt|;
block|}
while|while
condition|(
operator|!
name|operators
operator|.
name|isEmpty
argument_list|()
condition|)
block|{
name|Operator
argument_list|<
name|?
argument_list|>
name|current
init|=
name|operators
operator|.
name|pop
argument_list|()
decl_stmt|;
if|if
condition|(
name|seen
operator|.
name|add
argument_list|(
name|current
argument_list|)
operator|&&
name|current
operator|instanceof
name|FileSinkOperator
condition|)
block|{
name|FileSinkOperator
name|fileSink
init|=
operator|(
name|FileSinkOperator
operator|)
name|current
decl_stmt|;
comment|// remember it for additional processing later
if|if
condition|(
name|context
operator|.
name|fileSinkSet
operator|.
name|contains
argument_list|(
name|fileSink
argument_list|)
condition|)
block|{
continue|continue;
block|}
else|else
block|{
name|context
operator|.
name|fileSinkSet
operator|.
name|add
argument_list|(
name|fileSink
argument_list|)
expr_stmt|;
block|}
name|FileSinkDesc
name|desc
init|=
name|fileSink
operator|.
name|getConf
argument_list|()
decl_stmt|;
name|Path
name|path
init|=
name|desc
operator|.
name|getDirName
argument_list|()
decl_stmt|;
name|List
argument_list|<
name|FileSinkDesc
argument_list|>
name|linked
decl_stmt|;
if|if
condition|(
operator|!
name|context
operator|.
name|linkedFileSinks
operator|.
name|containsKey
argument_list|(
name|path
argument_list|)
condition|)
block|{
name|linked
operator|=
operator|new
name|ArrayList
argument_list|<
name|FileSinkDesc
argument_list|>
argument_list|()
expr_stmt|;
name|context
operator|.
name|linkedFileSinks
operator|.
name|put
argument_list|(
name|path
argument_list|,
name|linked
argument_list|)
expr_stmt|;
block|}
name|linked
operator|=
name|context
operator|.
name|linkedFileSinks
operator|.
name|get
argument_list|(
name|path
argument_list|)
expr_stmt|;
name|linked
operator|.
name|add
argument_list|(
name|desc
argument_list|)
expr_stmt|;
name|desc
operator|.
name|setDirName
argument_list|(
operator|new
name|Path
argument_list|(
name|path
argument_list|,
name|AbstractFileMergeOperator
operator|.
name|UNION_SUDBIR_PREFIX
operator|+
name|linked
operator|.
name|size
argument_list|()
argument_list|)
argument_list|)
expr_stmt|;
name|Utilities
operator|.
name|FILE_OP_LOGGER
operator|.
name|debug
argument_list|(
literal|"removing union - new desc with "
operator|+
name|desc
operator|.
name|getDirName
argument_list|()
operator|+
literal|"; parent "
operator|+
name|path
argument_list|)
expr_stmt|;
name|desc
operator|.
name|setLinkedFileSink
argument_list|(
literal|true
argument_list|)
expr_stmt|;
name|desc
operator|.
name|setLinkedFileSinkDesc
argument_list|(
name|linked
argument_list|)
expr_stmt|;
name|desc
operator|.
name|setFilesToFetch
argument_list|(
name|fileStatusesToFetch
argument_list|)
expr_stmt|;
block|}
if|if
condition|(
name|current
operator|instanceof
name|AppMasterEventOperator
condition|)
block|{
comment|// remember for additional processing later
name|context
operator|.
name|eventOperatorSet
operator|.
name|add
argument_list|(
operator|(
name|AppMasterEventOperator
operator|)
name|current
argument_list|)
expr_stmt|;
comment|// mark the original as abandoned. Don't need it anymore.
name|context
operator|.
name|abandonedEventOperatorSet
operator|.
name|add
argument_list|(
operator|(
name|AppMasterEventOperator
operator|)
name|replacementMap
operator|.
name|inverse
argument_list|()
operator|.
name|get
argument_list|(
name|current
argument_list|)
argument_list|)
expr_stmt|;
block|}
if|if
condition|(
name|current
operator|instanceof
name|UnionOperator
condition|)
block|{
name|Operator
argument_list|<
name|?
argument_list|>
name|parent
init|=
literal|null
decl_stmt|;
name|int
name|count
init|=
literal|0
decl_stmt|;
for|for
control|(
name|Operator
argument_list|<
name|?
argument_list|>
name|op
range|:
name|current
operator|.
name|getParentOperators
argument_list|()
control|)
block|{
if|if
condition|(
name|seen
operator|.
name|contains
argument_list|(
name|op
argument_list|)
condition|)
block|{
operator|++
name|count
expr_stmt|;
name|parent
operator|=
name|op
expr_stmt|;
block|}
block|}
comment|// we should have been able to reach the union from only one side.
assert|assert
name|count
operator|<=
literal|1
assert|;
if|if
condition|(
name|parent
operator|==
literal|null
condition|)
block|{
comment|// root operator is union (can happen in reducers)
name|replacementMap
operator|.
name|put
argument_list|(
name|current
argument_list|,
name|current
operator|.
name|getChildOperators
argument_list|()
operator|.
name|get
argument_list|(
literal|0
argument_list|)
argument_list|)
expr_stmt|;
block|}
else|else
block|{
name|parent
operator|.
name|removeChildAndAdoptItsChildren
argument_list|(
name|current
argument_list|)
expr_stmt|;
block|}
block|}
if|if
condition|(
name|current
operator|instanceof
name|FileSinkOperator
operator|||
name|current
operator|instanceof
name|ReduceSinkOperator
condition|)
block|{
name|current
operator|.
name|setChildOperators
argument_list|(
literal|null
argument_list|)
expr_stmt|;
block|}
else|else
block|{
name|operators
operator|.
name|addAll
argument_list|(
name|current
operator|.
name|getChildOperators
argument_list|()
argument_list|)
expr_stmt|;
block|}
block|}
name|LOG
operator|.
name|debug
argument_list|(
literal|"Setting dummy ops for work "
operator|+
name|work
operator|.
name|getName
argument_list|()
operator|+
literal|": "
operator|+
name|dummyOps
argument_list|)
expr_stmt|;
name|work
operator|.
name|setDummyOps
argument_list|(
name|dummyOps
argument_list|)
expr_stmt|;
name|work
operator|.
name|replaceRoots
argument_list|(
name|replacementMap
argument_list|)
expr_stmt|;
block|}
specifier|public
specifier|static
name|void
name|processFileSink
parameter_list|(
name|GenTezProcContext
name|context
parameter_list|,
name|FileSinkOperator
name|fileSink
parameter_list|)
throws|throws
name|SemanticException
block|{
name|ParseContext
name|parseContext
init|=
name|context
operator|.
name|parseContext
decl_stmt|;
name|boolean
name|isInsertTable
init|=
comment|// is INSERT OVERWRITE TABLE
name|GenMapRedUtils
operator|.
name|isInsertInto
argument_list|(
name|parseContext
argument_list|,
name|fileSink
argument_list|)
decl_stmt|;
name|HiveConf
name|hconf
init|=
name|parseContext
operator|.
name|getConf
argument_list|()
decl_stmt|;
name|boolean
name|chDir
init|=
name|GenMapRedUtils
operator|.
name|isMergeRequired
argument_list|(
name|context
operator|.
name|moveTask
argument_list|,
name|hconf
argument_list|,
name|fileSink
argument_list|,
name|context
operator|.
name|currentTask
argument_list|,
name|isInsertTable
argument_list|)
decl_stmt|;
name|Path
name|finalName
init|=
name|GenMapRedUtils
operator|.
name|createMoveTask
argument_list|(
name|context
operator|.
name|currentTask
argument_list|,
name|chDir
argument_list|,
name|fileSink
argument_list|,
name|parseContext
argument_list|,
name|context
operator|.
name|moveTask
argument_list|,
name|hconf
argument_list|,
name|context
operator|.
name|dependencyTask
argument_list|)
decl_stmt|;
if|if
condition|(
name|chDir
condition|)
block|{
comment|// Merge the files in the destination table/partitions by creating Map-only merge job
comment|// If underlying data is RCFile or OrcFile, RCFileBlockMerge task or
comment|// OrcFileStripeMerge task would be created.
name|LOG
operator|.
name|info
argument_list|(
literal|"using CombineHiveInputformat for the merge job"
argument_list|)
expr_stmt|;
name|Utilities
operator|.
name|FILE_OP_LOGGER
operator|.
name|debug
argument_list|(
literal|"will generate MR work for merging files from "
operator|+
name|fileSink
operator|.
name|getConf
argument_list|()
operator|.
name|getDirName
argument_list|()
operator|+
literal|" to "
operator|+
name|finalName
argument_list|)
expr_stmt|;
name|GenMapRedUtils
operator|.
name|createMRWorkForMergingFiles
argument_list|(
name|fileSink
argument_list|,
name|finalName
argument_list|,
name|context
operator|.
name|dependencyTask
argument_list|,
name|context
operator|.
name|moveTask
argument_list|,
name|hconf
argument_list|,
name|context
operator|.
name|currentTask
argument_list|,
name|parseContext
operator|.
name|getQueryState
argument_list|()
operator|.
name|getLineageState
argument_list|()
argument_list|)
expr_stmt|;
block|}
name|FetchTask
name|fetchTask
init|=
name|parseContext
operator|.
name|getFetchTask
argument_list|()
decl_stmt|;
if|if
condition|(
name|fetchTask
operator|!=
literal|null
operator|&&
name|context
operator|.
name|currentTask
operator|.
name|getNumChild
argument_list|()
operator|==
literal|0
condition|)
block|{
if|if
condition|(
name|fetchTask
operator|.
name|isFetchFrom
argument_list|(
name|fileSink
operator|.
name|getConf
argument_list|()
argument_list|)
condition|)
block|{
name|context
operator|.
name|currentTask
operator|.
name|setFetchSource
argument_list|(
literal|true
argument_list|)
expr_stmt|;
block|}
block|}
block|}
comment|/** * processAppMasterEvent sets up the event descriptor and the MapWork. * * @param procCtx * @param event */
specifier|public
specifier|static
name|void
name|processAppMasterEvent
parameter_list|(
name|GenTezProcContext
name|procCtx
parameter_list|,
name|AppMasterEventOperator
name|event
parameter_list|)
block|{
if|if
condition|(
name|procCtx
operator|.
name|abandonedEventOperatorSet
operator|.
name|contains
argument_list|(
name|event
argument_list|)
condition|)
block|{
comment|// don't need this anymore
return|return;
block|}
name|DynamicPruningEventDesc
name|eventDesc
init|=
operator|(
name|DynamicPruningEventDesc
operator|)
name|event
operator|.
name|getConf
argument_list|()
decl_stmt|;
name|TableScanOperator
name|ts
init|=
name|eventDesc
operator|.
name|getTableScan
argument_list|()
decl_stmt|;
name|MapWork
name|work
init|=
operator|(
name|MapWork
operator|)
name|procCtx
operator|.
name|rootToWorkMap
operator|.
name|get
argument_list|(
name|ts
argument_list|)
decl_stmt|;
if|if
condition|(
name|work
operator|==
literal|null
condition|)
block|{
throw|throw
operator|new
name|AssertionError
argument_list|(
literal|"No work found for tablescan "
operator|+
name|ts
argument_list|)
throw|;
block|}
name|BaseWork
name|enclosingWork
init|=
name|getEnclosingWork
argument_list|(
name|event
argument_list|,
name|procCtx
argument_list|)
decl_stmt|;
if|if
condition|(
name|enclosingWork
operator|==
literal|null
condition|)
block|{
throw|throw
operator|new
name|AssertionError
argument_list|(
literal|"Cannot find work for operator"
operator|+
name|event
argument_list|)
throw|;
block|}
name|String
name|sourceName
init|=
name|enclosingWork
operator|.
name|getName
argument_list|()
decl_stmt|;
comment|// store the vertex name in the operator pipeline
name|eventDesc
operator|.
name|setVertexName
argument_list|(
name|work
operator|.
name|getName
argument_list|()
argument_list|)
expr_stmt|;
name|eventDesc
operator|.
name|setInputName
argument_list|(
name|work
operator|.
name|getAliases
argument_list|()
operator|.
name|get
argument_list|(
literal|0
argument_list|)
argument_list|)
expr_stmt|;
comment|// store table descriptor in map-work
if|if
condition|(
operator|!
name|work
operator|.
name|getEventSourceTableDescMap
argument_list|()
operator|.
name|containsKey
argument_list|(
name|sourceName
argument_list|)
condition|)
block|{
name|work
operator|.
name|getEventSourceTableDescMap
argument_list|()
operator|.
name|put
argument_list|(
name|sourceName
argument_list|,
operator|new
name|LinkedList
argument_list|<
name|TableDesc
argument_list|>
argument_list|()
argument_list|)
expr_stmt|;
block|}
name|List
argument_list|<
name|TableDesc
argument_list|>
name|tables
init|=
name|work
operator|.
name|getEventSourceTableDescMap
argument_list|()
operator|.
name|get
argument_list|(
name|sourceName
argument_list|)
decl_stmt|;
name|tables
operator|.
name|add
argument_list|(
name|event
operator|.
name|getConf
argument_list|()
operator|.
name|getTable
argument_list|()
argument_list|)
expr_stmt|;
comment|// store column name in map-work
if|if
condition|(
operator|!
name|work
operator|.
name|getEventSourceColumnNameMap
argument_list|()
operator|.
name|containsKey
argument_list|(
name|sourceName
argument_list|)
condition|)
block|{
name|work
operator|.
name|getEventSourceColumnNameMap
argument_list|()
operator|.
name|put
argument_list|(
name|sourceName
argument_list|,
operator|new
name|LinkedList
argument_list|<
name|String
argument_list|>
argument_list|()
argument_list|)
expr_stmt|;
block|}
name|List
argument_list|<
name|String
argument_list|>
name|columns
init|=
name|work
operator|.
name|getEventSourceColumnNameMap
argument_list|()
operator|.
name|get
argument_list|(
name|sourceName
argument_list|)
decl_stmt|;
name|columns
operator|.
name|add
argument_list|(
name|eventDesc
operator|.
name|getTargetColumnName
argument_list|()
argument_list|)
expr_stmt|;
if|if
condition|(
operator|!
name|work
operator|.
name|getEventSourceColumnTypeMap
argument_list|()
operator|.
name|containsKey
argument_list|(
name|sourceName
argument_list|)
condition|)
block|{
name|work
operator|.
name|getEventSourceColumnTypeMap
argument_list|()
operator|.
name|put
argument_list|(
name|sourceName
argument_list|,
operator|new
name|LinkedList
argument_list|<
name|String
argument_list|>
argument_list|()
argument_list|)
expr_stmt|;
block|}
name|List
argument_list|<
name|String
argument_list|>
name|columnTypes
init|=
name|work
operator|.
name|getEventSourceColumnTypeMap
argument_list|()
operator|.
name|get
argument_list|(
name|sourceName
argument_list|)
decl_stmt|;
name|columnTypes
operator|.
name|add
argument_list|(
name|eventDesc
operator|.
name|getTargetColumnType
argument_list|()
argument_list|)
expr_stmt|;
comment|// store partition key expr in map-work
if|if
condition|(
operator|!
name|work
operator|.
name|getEventSourcePartKeyExprMap
argument_list|()
operator|.
name|containsKey
argument_list|(
name|sourceName
argument_list|)
condition|)
block|{
name|work
operator|.
name|getEventSourcePartKeyExprMap
argument_list|()
operator|.
name|put
argument_list|(
name|sourceName
argument_list|,
operator|new
name|LinkedList
argument_list|<
name|ExprNodeDesc
argument_list|>
argument_list|()
argument_list|)
expr_stmt|;
block|}
name|List
argument_list|<
name|ExprNodeDesc
argument_list|>
name|keys
init|=
name|work
operator|.
name|getEventSourcePartKeyExprMap
argument_list|()
operator|.
name|get
argument_list|(
name|sourceName
argument_list|)
decl_stmt|;
name|keys
operator|.
name|add
argument_list|(
name|eventDesc
operator|.
name|getPartKey
argument_list|()
argument_list|)
expr_stmt|;
block|}
comment|/** * getEncosingWork finds the BaseWork any given operator belongs to. */
specifier|public
specifier|static
name|BaseWork
name|getEnclosingWork
parameter_list|(
name|Operator
argument_list|<
name|?
argument_list|>
name|op
parameter_list|,
name|GenTezProcContext
name|procCtx
parameter_list|)
block|{
name|List
argument_list|<
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|>
name|ops
init|=
operator|new
name|ArrayList
argument_list|<
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|>
argument_list|()
decl_stmt|;
name|findRoots
argument_list|(
name|op
argument_list|,
name|ops
argument_list|)
expr_stmt|;
for|for
control|(
name|Operator
argument_list|<
name|?
argument_list|>
name|r
range|:
name|ops
control|)
block|{
name|BaseWork
name|work
init|=
name|procCtx
operator|.
name|rootToWorkMap
operator|.
name|get
argument_list|(
name|r
argument_list|)
decl_stmt|;
if|if
condition|(
name|work
operator|!=
literal|null
condition|)
block|{
return|return
name|work
return|;
block|}
block|}
return|return
literal|null
return|;
block|}
comment|/* * findRoots returns all root operators (in ops) that result in operator op */
specifier|private
specifier|static
name|void
name|findRoots
parameter_list|(
name|Operator
argument_list|<
name|?
argument_list|>
name|op
parameter_list|,
name|List
argument_list|<
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|>
name|ops
parameter_list|)
block|{
name|List
argument_list|<
name|Operator
argument_list|<
name|?
argument_list|>
argument_list|>
name|parents
init|=
name|op
operator|.
name|getParentOperators
argument_list|()
decl_stmt|;
if|if
condition|(
name|parents
operator|==
literal|null
operator|||
name|parents
operator|.
name|isEmpty
argument_list|()
condition|)
block|{
name|ops
operator|.
name|add
argument_list|(
name|op
argument_list|)
expr_stmt|;
return|return;
block|}
for|for
control|(
name|Operator
argument_list|<
name|?
argument_list|>
name|p
range|:
name|parents
control|)
block|{
name|findRoots
argument_list|(
name|p
argument_list|,
name|ops
argument_list|)
expr_stmt|;
block|}
block|}
comment|/** * Remove an operator branch. When we see a fork, we know it's time to do the removal. * @param event the leaf node of which branch to be removed */
specifier|public
specifier|static
name|Operator
argument_list|<
name|?
argument_list|>
name|removeBranch
parameter_list|(
name|Operator
argument_list|<
name|?
argument_list|>
name|event
parameter_list|)
block|{
name|Operator
argument_list|<
name|?
argument_list|>
name|child
init|=
name|event
decl_stmt|;
name|Operator
argument_list|<
name|?
argument_list|>
name|curr
init|=
name|event
decl_stmt|;
while|while
condition|(
name|curr
operator|.
name|getChildOperators
argument_list|()
operator|.
name|size
argument_list|()
operator|<=
literal|1
condition|)
block|{
name|child
operator|=
name|curr
expr_stmt|;
name|curr
operator|=
name|curr
operator|.
name|getParentOperators
argument_list|()
operator|.
name|get
argument_list|(
literal|0
argument_list|)
expr_stmt|;
block|}
name|curr
operator|.
name|removeChild
argument_list|(
name|child
argument_list|)
expr_stmt|;
return|return
name|child
return|;
block|}
specifier|public
specifier|static
name|EdgeType
name|determineEdgeType
parameter_list|(
name|BaseWork
name|preceedingWork
parameter_list|,
name|BaseWork
name|followingWork
parameter_list|,
name|ReduceSinkOperator
name|reduceSinkOperator
parameter_list|)
block|{
comment|// The 1-1 edge should also work for sorted cases, however depending on the details of the shuffle
comment|// this might end up writing multiple compressed files or end up using an in-memory partitioned kv writer
comment|// the condition about ordering = false can be removed at some point with a tweak to the unordered writer
comment|// to never split a single output across multiple files (and never attempt a final merge)
if|if
condition|(
name|reduceSinkOperator
operator|.
name|getConf
argument_list|()
operator|.
name|isForwarding
argument_list|()
operator|&&
operator|!
name|reduceSinkOperator
operator|.
name|getConf
argument_list|()
operator|.
name|isOrdering
argument_list|()
condition|)
block|{
return|return
name|EdgeType
operator|.
name|ONE_TO_ONE_EDGE
return|;
block|}
if|if
condition|(
name|followingWork
operator|instanceof
name|ReduceWork
condition|)
block|{
comment|// Ideally there should be a better way to determine that the followingWork contains
comment|// a dynamic partitioned hash join, but in some cases (createReduceWork()) it looks like
comment|// the work must be created/connected first, before the GenTezProcContext can be updated
comment|// with the mapjoin/work relationship.
name|ReduceWork
name|reduceWork
init|=
operator|(
name|ReduceWork
operator|)
name|followingWork
decl_stmt|;
if|if
condition|(
name|reduceWork
operator|.
name|getReducer
argument_list|()
operator|instanceof
name|MapJoinOperator
condition|)
block|{
name|MapJoinOperator
name|joinOp
init|=
operator|(
name|MapJoinOperator
operator|)
name|reduceWork
operator|.
name|getReducer
argument_list|()
decl_stmt|;
if|if
condition|(
name|joinOp
operator|.
name|getConf
argument_list|()
operator|.
name|isDynamicPartitionHashJoin
argument_list|()
condition|)
block|{
return|return
name|EdgeType
operator|.
name|CUSTOM_SIMPLE_EDGE
return|;
block|}
block|}
block|}
if|if
condition|(
operator|!
name|reduceSinkOperator
operator|.
name|getConf
argument_list|()
operator|.
name|isOrdering
argument_list|()
condition|)
block|{
comment|//if no sort keys are specified, use an edge that does not sort
return|return
name|EdgeType
operator|.
name|CUSTOM_SIMPLE_EDGE
return|;
block|}
return|return
name|EdgeType
operator|.
name|SIMPLE_EDGE
return|;
block|}
specifier|public
specifier|static
name|void
name|processDynamicSemiJoinPushDownOperator
parameter_list|(
name|GenTezProcContext
name|procCtx
parameter_list|,
name|RuntimeValuesInfo
name|runtimeValuesInfo
parameter_list|,
name|ReduceSinkOperator
name|rs
parameter_list|)
throws|throws
name|SemanticException
block|{
name|SemiJoinBranchInfo
name|sjInfo
init|=
name|procCtx
operator|.
name|parseContext
operator|.
name|getRsToSemiJoinBranchInfo
argument_list|()
operator|.
name|get
argument_list|(
name|rs
argument_list|)
decl_stmt|;
name|List
argument_list|<
name|BaseWork
argument_list|>
name|rsWorkList
init|=
name|procCtx
operator|.
name|childToWorkMap
operator|.
name|get
argument_list|(
name|rs
argument_list|)
decl_stmt|;
if|if
condition|(
name|sjInfo
operator|==
literal|null
operator|||
name|rsWorkList
operator|==
literal|null
condition|)
block|{
comment|// This happens when the ReduceSink's edge has been removed by cycle
comment|// detection logic. Nothing to do here.
return|return;
block|}
if|if
condition|(
name|rsWorkList
operator|.
name|size
argument_list|()
operator|!=
literal|1
condition|)
block|{
name|StringBuilder
name|sb
init|=
operator|new
name|StringBuilder
argument_list|()
decl_stmt|;
for|for
control|(
name|BaseWork
name|curWork
range|:
name|rsWorkList
control|)
block|{
if|if
condition|(
name|sb
operator|.
name|length
argument_list|()
operator|>
literal|0
condition|)
block|{
name|sb
operator|.
name|append
argument_list|(
literal|", "
argument_list|)
expr_stmt|;
block|}
name|sb
operator|.
name|append
argument_list|(
name|curWork
operator|.
name|getName
argument_list|()
argument_list|)
expr_stmt|;
block|}
throw|throw
operator|new
name|SemanticException
argument_list|(
name|rs
operator|+
literal|" belongs to multiple BaseWorks: "
operator|+
name|sb
operator|.
name|toString
argument_list|()
argument_list|)
throw|;
block|}
name|TableScanOperator
name|ts
init|=
name|sjInfo
operator|.
name|getTsOp
argument_list|()
decl_stmt|;
name|LOG
operator|.
name|debug
argument_list|(
literal|"ResduceSink "
operator|+
name|rs
operator|+
literal|" to TableScan "
operator|+
name|ts
argument_list|)
expr_stmt|;
name|BaseWork
name|parentWork
init|=
name|rsWorkList
operator|.
name|get
argument_list|(
literal|0
argument_list|)
decl_stmt|;
name|BaseWork
name|childWork
init|=
name|procCtx
operator|.
name|rootToWorkMap
operator|.
name|get
argument_list|(
name|ts
argument_list|)
decl_stmt|;
comment|// Connect parent/child work with a brodacast edge.
name|LOG
operator|.
name|debug
argument_list|(
literal|"Connecting Baswork - "
operator|+
name|parentWork
operator|.
name|getName
argument_list|()
operator|+
literal|" to "
operator|+
name|childWork
operator|.
name|getName
argument_list|()
argument_list|)
expr_stmt|;
name|TezEdgeProperty
name|edgeProperty
init|=
operator|new
name|TezEdgeProperty
argument_list|(
name|EdgeType
operator|.
name|BROADCAST_EDGE
argument_list|)
decl_stmt|;
name|TezWork
name|tezWork
init|=
name|procCtx
operator|.
name|currentTask
operator|.
name|getWork
argument_list|()
decl_stmt|;
name|tezWork
operator|.
name|connect
argument_list|(
name|parentWork
argument_list|,
name|childWork
argument_list|,
name|edgeProperty
argument_list|)
expr_stmt|;
comment|// Set output names in ReduceSink
name|rs
operator|.
name|getConf
argument_list|()
operator|.
name|setOutputName
argument_list|(
name|childWork
operator|.
name|getName
argument_list|()
argument_list|)
expr_stmt|;
comment|// Set up the dynamic values in the childWork.
name|RuntimeValuesInfo
name|childRuntimeValuesInfo
init|=
operator|new
name|RuntimeValuesInfo
argument_list|()
decl_stmt|;
name|childRuntimeValuesInfo
operator|.
name|setTableDesc
argument_list|(
name|runtimeValuesInfo
operator|.
name|getTableDesc
argument_list|()
argument_list|)
expr_stmt|;
name|childRuntimeValuesInfo
operator|.
name|setDynamicValueIDs
argument_list|(
name|runtimeValuesInfo
operator|.
name|getDynamicValueIDs
argument_list|()
argument_list|)
expr_stmt|;
name|childRuntimeValuesInfo
operator|.
name|setColExprs
argument_list|(
name|runtimeValuesInfo
operator|.
name|getColExprs
argument_list|()
argument_list|)
expr_stmt|;
name|childWork
operator|.
name|setInputSourceToRuntimeValuesInfo
argument_list|(
name|parentWork
operator|.
name|getName
argument_list|()
argument_list|,
name|childRuntimeValuesInfo
argument_list|)
expr_stmt|;
block|}
comment|// Functionality to remove semi-join optimization
specifier|public
specifier|static
name|void
name|removeSemiJoinOperator
parameter_list|(
name|ParseContext
name|context
parameter_list|,
name|ReduceSinkOperator
name|rs
parameter_list|,
name|TableScanOperator
name|ts
parameter_list|)
throws|throws
name|SemanticException
block|{
comment|// Cleanup the synthetic predicate in the tablescan operator by
comment|// replacing it with "true"
name|LOG
operator|.
name|debug
argument_list|(
literal|"Removing ReduceSink "
operator|+
name|rs
operator|+
literal|" and TableScan "
operator|+
name|ts
argument_list|)
expr_stmt|;
name|ExprNodeDesc
name|constNode
init|=
operator|new
name|ExprNodeConstantDesc
argument_list|(
name|TypeInfoFactory
operator|.
name|booleanTypeInfo
argument_list|,
name|Boolean
operator|.
name|TRUE
argument_list|)
decl_stmt|;
comment|// TS operator
name|DynamicValuePredicateContext
name|filterDynamicValuePredicatesCollection
init|=
operator|new
name|DynamicValuePredicateContext
argument_list|()
decl_stmt|;
if|if
condition|(
name|ts
operator|.
name|getConf
argument_list|()
operator|.
name|getFilterExpr
argument_list|()
operator|!=
literal|null
condition|)
block|{
name|collectDynamicValuePredicates
argument_list|(
name|ts
operator|.
name|getConf
argument_list|()
operator|.
name|getFilterExpr
argument_list|()
argument_list|,
name|filterDynamicValuePredicatesCollection
argument_list|)
expr_stmt|;
for|for
control|(
name|ExprNodeDesc
name|nodeToRemove
range|:
name|filterDynamicValuePredicatesCollection
operator|.
name|childParentMapping
operator|.
name|keySet
argument_list|()
control|)
block|{
comment|// Find out if this synthetic predicate belongs to the current cycle
if|if
condition|(
name|removeSemiJoinPredicate
argument_list|(
name|context
argument_list|,
name|rs
argument_list|,
name|nodeToRemove
argument_list|)
condition|)
block|{
name|ExprNodeDesc
name|nodeParent
init|=
name|filterDynamicValuePredicatesCollection
operator|.
name|childParentMapping
operator|.
name|get
argument_list|(
name|nodeToRemove
argument_list|)
decl_stmt|;
if|if
condition|(
name|nodeParent
operator|==
literal|null
condition|)
block|{
comment|// This was the only predicate, set filter expression to null
name|ts
operator|.
name|getConf
argument_list|()
operator|.
name|setFilterExpr
argument_list|(
literal|null
argument_list|)
expr_stmt|;
block|}
else|else
block|{
name|int
name|i
init|=
name|nodeParent
operator|.
name|getChildren
argument_list|()
operator|.
name|indexOf
argument_list|(
name|nodeToRemove
argument_list|)
decl_stmt|;
name|nodeParent
operator|.
name|getChildren
argument_list|()
operator|.
name|remove
argument_list|(
name|i
argument_list|)
expr_stmt|;
name|nodeParent
operator|.
name|getChildren
argument_list|()
operator|.
name|add
argument_list|(
name|i
argument_list|,
name|constNode
argument_list|)
expr_stmt|;
block|}
block|}
block|}
block|}
comment|// Filter operator
for|for
control|(
name|Operator
argument_list|<
name|?
argument_list|>
name|op
range|:
name|ts
operator|.
name|getChildOperators
argument_list|()
control|)
block|{
if|if
condition|(
operator|!
operator|(
name|op
operator|instanceof
name|FilterOperator
operator|)
condition|)
block|{
continue|continue;
block|}
name|FilterDesc
name|filterDesc
init|=
operator|(
operator|(
name|FilterOperator
operator|)
name|op
operator|)
operator|.
name|getConf
argument_list|()
decl_stmt|;
name|filterDynamicValuePredicatesCollection
operator|=
operator|new
name|DynamicValuePredicateContext
argument_list|()
expr_stmt|;
name|collectDynamicValuePredicates
argument_list|(
name|filterDesc
operator|.
name|getPredicate
argument_list|()
argument_list|,
name|filterDynamicValuePredicatesCollection
argument_list|)
expr_stmt|;
for|for
control|(
name|ExprNodeDesc
name|nodeToRemove
range|:
name|filterDynamicValuePredicatesCollection
operator|.
name|childParentMapping
operator|.
name|keySet
argument_list|()
control|)
block|{
comment|// Find out if this synthetic predicate belongs to the current cycle
if|if
condition|(
name|removeSemiJoinPredicate
argument_list|(
name|context
argument_list|,
name|rs
argument_list|,
name|nodeToRemove
argument_list|)
condition|)
block|{
name|ExprNodeDesc
name|nodeParent
init|=
name|filterDynamicValuePredicatesCollection
operator|.
name|childParentMapping
operator|.
name|get
argument_list|(
name|nodeToRemove
argument_list|)
decl_stmt|;
if|if
condition|(
name|nodeParent
operator|==
literal|null
condition|)
block|{
comment|// This was the only predicate, set filter expression to const
name|filterDesc
operator|.
name|setPredicate
argument_list|(
name|constNode
argument_list|)
expr_stmt|;
block|}
else|else
block|{
name|int
name|i
init|=
name|nodeParent
operator|.
name|getChildren
argument_list|()
operator|.
name|indexOf
argument_list|(
name|nodeToRemove
argument_list|)
decl_stmt|;
name|nodeParent
operator|.
name|getChildren
argument_list|()
operator|.
name|remove
argument_list|(
name|i
argument_list|)
expr_stmt|;
name|nodeParent
operator|.
name|getChildren
argument_list|()
operator|.
name|add
argument_list|(
name|i
argument_list|,
name|constNode
argument_list|)
expr_stmt|;
block|}
block|}
block|}
block|}
name|context
operator|.
name|getRsToSemiJoinBranchInfo
argument_list|()
operator|.
name|remove
argument_list|(
name|rs
argument_list|)
expr_stmt|;
block|}
comment|/** Find out if this predicate constains the synthetic predicate to be removed */
specifier|private
specifier|static
name|boolean
name|removeSemiJoinPredicate
parameter_list|(
name|ParseContext
name|context
parameter_list|,
name|ReduceSinkOperator
name|rs
parameter_list|,
name|ExprNodeDesc
name|nodeToRemove
parameter_list|)
block|{
name|boolean
name|remove
init|=
literal|false
decl_stmt|;
for|for
control|(
name|ExprNodeDesc
name|expr
range|:
name|nodeToRemove
operator|.
name|getChildren
argument_list|()
control|)
block|{
if|if
condition|(
name|expr
operator|instanceof
name|ExprNodeDynamicValueDesc
condition|)
block|{
name|String
name|dynamicValueIdFromExpr
init|=
operator|(
operator|(
name|ExprNodeDynamicValueDesc
operator|)
name|expr
operator|)
operator|.
name|getDynamicValue
argument_list|()
operator|.
name|getId
argument_list|()
decl_stmt|;
name|List
argument_list|<
name|String
argument_list|>
name|dynamicValueIdsFromMap
init|=
name|context
operator|.
name|getRsToRuntimeValuesInfoMap
argument_list|()
operator|.
name|get
argument_list|(
name|rs
argument_list|)
operator|.
name|getDynamicValueIDs
argument_list|()
decl_stmt|;
for|for
control|(
name|String
name|dynamicValueIdFromMap
range|:
name|dynamicValueIdsFromMap
control|)
block|{
if|if
condition|(
name|dynamicValueIdFromExpr
operator|.
name|equals
argument_list|(
name|dynamicValueIdFromMap
argument_list|)
condition|)
block|{
comment|// Intended predicate to be removed
name|remove
operator|=
literal|true
expr_stmt|;
break|break;
block|}
block|}
block|}
block|}
return|return
name|remove
return|;
block|}
comment|// Functionality to remove semi-join optimization
specifier|public
specifier|static
name|void
name|removeSemiJoinOperator
parameter_list|(
name|ParseContext
name|context
parameter_list|,
name|AppMasterEventOperator
name|eventOp
parameter_list|,
name|TableScanOperator
name|ts
parameter_list|)
throws|throws
name|SemanticException
block|{
comment|// Cleanup the synthetic predicate in the tablescan operator and filter by
comment|// replacing it with "true"
name|LOG
operator|.
name|debug
argument_list|(
literal|"Removing AppMasterEventOperator "
operator|+
name|eventOp
operator|+
literal|" and TableScan "
operator|+
name|ts
argument_list|)
expr_stmt|;
name|ExprNodeDesc
name|constNode
init|=
operator|new
name|ExprNodeConstantDesc
argument_list|(
name|TypeInfoFactory
operator|.
name|booleanTypeInfo
argument_list|,
name|Boolean
operator|.
name|TRUE
argument_list|)
decl_stmt|;
comment|// Retrieve generator
name|DynamicPruningEventDesc
name|dped
init|=
operator|(
name|DynamicPruningEventDesc
operator|)
name|eventOp
operator|.
name|getConf
argument_list|()
decl_stmt|;
comment|// TS operator
name|DynamicPartitionPrunerContext
name|filterDynamicListPredicatesCollection
init|=
operator|new
name|DynamicPartitionPrunerContext
argument_list|()
decl_stmt|;
if|if
condition|(
name|ts
operator|.
name|getConf
argument_list|()
operator|.
name|getFilterExpr
argument_list|()
operator|!=
literal|null
condition|)
block|{
name|collectDynamicPruningConditions
argument_list|(
name|ts
operator|.
name|getConf
argument_list|()
operator|.
name|getFilterExpr
argument_list|()
argument_list|,
name|filterDynamicListPredicatesCollection
argument_list|)
expr_stmt|;
for|for
control|(
name|DynamicListContext
name|ctx
range|:
name|filterDynamicListPredicatesCollection
control|)
block|{
if|if
condition|(
name|ctx
operator|.
name|generator
operator|!=
name|dped
operator|.
name|getGenerator
argument_list|()
condition|)
block|{
continue|continue;
block|}
comment|// remove the condition by replacing it with "true"
if|if
condition|(
name|ctx
operator|.
name|grandParent
operator|==
literal|null
condition|)
block|{
comment|// This was the only predicate, set filter expression to const
name|ts
operator|.
name|getConf
argument_list|()
operator|.
name|setFilterExpr
argument_list|(
literal|null
argument_list|)
expr_stmt|;
block|}
else|else
block|{
name|int
name|i
init|=
name|ctx
operator|.
name|grandParent
operator|.
name|getChildren
argument_list|()
operator|.
name|indexOf
argument_list|(
name|ctx
operator|.
name|parent
argument_list|)
decl_stmt|;
name|ctx
operator|.
name|grandParent
operator|.
name|getChildren
argument_list|()
operator|.
name|remove
argument_list|(
name|i
argument_list|)
expr_stmt|;
name|ctx
operator|.
name|grandParent
operator|.
name|getChildren
argument_list|()
operator|.
name|add
argument_list|(
name|i
argument_list|,
name|constNode
argument_list|)
expr_stmt|;
block|}
block|}
block|}
comment|// Filter operator
name|filterDynamicListPredicatesCollection
operator|.
name|dynLists
operator|.
name|clear
argument_list|()
expr_stmt|;
for|for
control|(
name|Operator
argument_list|<
name|?
argument_list|>
name|op
range|:
name|ts
operator|.
name|getChildOperators
argument_list|()
control|)
block|{
if|if
condition|(
operator|!
operator|(
name|op
operator|instanceof
name|FilterOperator
operator|)
condition|)
block|{
continue|continue;
block|}
name|FilterDesc
name|filterDesc
init|=
operator|(
operator|(
name|FilterOperator
operator|)
name|op
operator|)
operator|.
name|getConf
argument_list|()
decl_stmt|;
name|collectDynamicPruningConditions
argument_list|(
name|filterDesc
operator|.
name|getPredicate
argument_list|()
argument_list|,
name|filterDynamicListPredicatesCollection
argument_list|)
expr_stmt|;
for|for
control|(
name|DynamicListContext
name|ctx
range|:
name|filterDynamicListPredicatesCollection
control|)
block|{
if|if
condition|(
name|ctx
operator|.
name|generator
operator|!=
name|dped
operator|.
name|getGenerator
argument_list|()
condition|)
block|{
continue|continue;
block|}
comment|// remove the condition by replacing it with "true"
if|if
condition|(
name|ctx
operator|.
name|grandParent
operator|==
literal|null
condition|)
block|{
comment|// This was the only predicate, set filter expression to const
name|filterDesc
operator|.
name|setPredicate
argument_list|(
name|constNode
argument_list|)
expr_stmt|;
block|}
else|else
block|{
name|int
name|i
init|=
name|ctx
operator|.
name|grandParent
operator|.
name|getChildren
argument_list|()
operator|.
name|indexOf
argument_list|(
name|ctx
operator|.
name|parent
argument_list|)
decl_stmt|;
name|ctx
operator|.
name|grandParent
operator|.
name|getChildren
argument_list|()
operator|.
name|remove
argument_list|(
name|i
argument_list|)
expr_stmt|;
name|ctx
operator|.
name|grandParent
operator|.
name|getChildren
argument_list|()
operator|.
name|add
argument_list|(
name|i
argument_list|,
name|constNode
argument_list|)
expr_stmt|;
block|}
block|}
block|}
block|}
specifier|private
specifier|static
class|class
name|DynamicValuePredicateContext
implements|implements
name|NodeProcessorCtx
block|{
name|HashMap
argument_list|<
name|ExprNodeDesc
argument_list|,
name|ExprNodeDesc
argument_list|>
name|childParentMapping
init|=
operator|new
name|HashMap
argument_list|<
name|ExprNodeDesc
argument_list|,
name|ExprNodeDesc
argument_list|>
argument_list|()
decl_stmt|;
block|}
specifier|private
specifier|static
class|class
name|DynamicValuePredicateProc
implements|implements
name|SemanticNodeProcessor
block|{
annotation|@
name|Override
specifier|public
name|Object
name|process
parameter_list|(
name|Node
name|nd
parameter_list|,
name|Stack
argument_list|<
name|Node
argument_list|>
name|stack
parameter_list|,
name|NodeProcessorCtx
name|procCtx
parameter_list|,
name|Object
modifier|...
name|nodeOutputs
parameter_list|)
throws|throws
name|SemanticException
block|{
name|DynamicValuePredicateContext
name|ctx
init|=
operator|(
name|DynamicValuePredicateContext
operator|)
name|procCtx
decl_stmt|;
name|ExprNodeDesc
name|parent
init|=
operator|(
name|ExprNodeDesc
operator|)
name|stack
operator|.
name|get
argument_list|(
name|stack
operator|.
name|size
argument_list|()
operator|-
literal|2
argument_list|)
decl_stmt|;
if|if
condition|(
name|parent
operator|instanceof
name|ExprNodeGenericFuncDesc
condition|)
block|{
name|ExprNodeGenericFuncDesc
name|parentFunc
init|=
operator|(
name|ExprNodeGenericFuncDesc
operator|)
name|parent
decl_stmt|;
if|if
condition|(
name|parentFunc
operator|.
name|getGenericUDF
argument_list|()
operator|instanceof
name|GenericUDFBetween
operator|||
name|parentFunc
operator|.
name|getGenericUDF
argument_list|()
operator|instanceof
name|GenericUDFInBloomFilter
condition|)
block|{
name|ExprNodeDesc
name|grandParent
init|=
name|stack
operator|.
name|size
argument_list|()
operator|>=
literal|3
condition|?
operator|(
name|ExprNodeDesc
operator|)
name|stack
operator|.
name|get
argument_list|(
name|stack
operator|.
name|size
argument_list|()
operator|-
literal|3
argument_list|)
else|:
literal|null
decl_stmt|;
name|ctx
operator|.
name|childParentMapping
operator|.
name|put
argument_list|(
name|parentFunc
argument_list|,
name|grandParent
argument_list|)
expr_stmt|;
block|}
block|}
return|return
literal|null
return|;
block|}
block|}
specifier|private
specifier|static
name|void
name|collectDynamicValuePredicates
parameter_list|(
name|ExprNodeDesc
name|pred
parameter_list|,
name|NodeProcessorCtx
name|ctx
parameter_list|)
throws|throws
name|SemanticException
block|{
comment|// create a walker which walks the tree in a DFS manner while maintaining
comment|// the operator stack. The dispatcher
comment|// generates the plan from the operator tree
name|Map
argument_list|<
name|SemanticRule
argument_list|,
name|SemanticNodeProcessor
argument_list|>
name|exprRules
init|=
operator|new
name|LinkedHashMap
argument_list|<
name|SemanticRule
argument_list|,
name|SemanticNodeProcessor
argument_list|>
argument_list|()
decl_stmt|;
name|exprRules
operator|.
name|put
argument_list|(
operator|new
name|RuleRegExp
argument_list|(
literal|"R1"
argument_list|,
name|ExprNodeDynamicValueDesc
operator|.
name|class
operator|.
name|getName
argument_list|()
operator|+
literal|"%"
argument_list|)
argument_list|,
operator|new
name|DynamicValuePredicateProc
argument_list|()
argument_list|)
expr_stmt|;
name|SemanticDispatcher
name|disp
init|=
operator|new
name|DefaultRuleDispatcher
argument_list|(
literal|null
argument_list|,
name|exprRules
argument_list|,
name|ctx
argument_list|)
decl_stmt|;
name|SemanticGraphWalker
name|egw
init|=
operator|new
name|DefaultGraphWalker
argument_list|(
name|disp
argument_list|)
decl_stmt|;
name|List
argument_list|<
name|Node
argument_list|>
name|startNodes
init|=
operator|new
name|ArrayList
argument_list|<
name|Node
argument_list|>
argument_list|()
decl_stmt|;
name|startNodes
operator|.
name|add
argument_list|(
name|pred
argument_list|)
expr_stmt|;
name|egw
operator|.
name|startWalking
argument_list|(
name|startNodes
argument_list|,
literal|null
argument_list|)
expr_stmt|;
block|}
specifier|public
specifier|static
class|class
name|DynamicListContext
block|{
specifier|public
name|ExprNodeDynamicListDesc
name|desc
decl_stmt|;
specifier|public
name|ExprNodeDesc
name|parent
decl_stmt|;
specifier|public
name|ExprNodeDesc
name|grandParent
decl_stmt|;
specifier|public
name|ReduceSinkOperator
name|generator
decl_stmt|;
specifier|public
name|DynamicListContext
parameter_list|(
name|ExprNodeDynamicListDesc
name|desc
parameter_list|,
name|ExprNodeDesc
name|parent
parameter_list|,
name|ExprNodeDesc
name|grandParent
parameter_list|,
name|ReduceSinkOperator
name|generator
parameter_list|)
block|{
name|this
operator|.
name|desc
operator|=
name|desc
expr_stmt|;
name|this
operator|.
name|parent
operator|=
name|parent
expr_stmt|;
name|this
operator|.
name|grandParent
operator|=
name|grandParent
expr_stmt|;
name|this
operator|.
name|generator
operator|=
name|generator
expr_stmt|;
block|}
specifier|public
name|ExprNodeDesc
name|getKeyCol
parameter_list|()
block|{
name|ExprNodeDesc
name|keyCol
init|=
name|desc
operator|.
name|getTarget
argument_list|()
decl_stmt|;
if|if
condition|(
name|keyCol
operator|!=
literal|null
condition|)
block|{
return|return
name|keyCol
return|;
block|}
return|return
name|generator
operator|.
name|getConf
argument_list|()
operator|.
name|getKeyCols
argument_list|()
operator|.
name|get
argument_list|(
name|desc
operator|.
name|getKeyIndex
argument_list|()
argument_list|)
return|;
block|}
block|}
specifier|public
specifier|static
class|class
name|DynamicPartitionPrunerContext
implements|implements
name|NodeProcessorCtx
implements|,
name|Iterable
argument_list|<
name|DynamicListContext
argument_list|>
block|{
specifier|public
name|List
argument_list|<
name|DynamicListContext
argument_list|>
name|dynLists
init|=
operator|new
name|ArrayList
argument_list|<
name|DynamicListContext
argument_list|>
argument_list|()
decl_stmt|;
specifier|public
name|void
name|addDynamicList
parameter_list|(
name|ExprNodeDynamicListDesc
name|desc
parameter_list|,
name|ExprNodeDesc
name|parent
parameter_list|,
name|ExprNodeDesc
name|grandParent
parameter_list|,
name|ReduceSinkOperator
name|generator
parameter_list|)
block|{
name|dynLists
operator|.
name|add
argument_list|(
operator|new
name|DynamicListContext
argument_list|(
name|desc
argument_list|,
name|parent
argument_list|,
name|grandParent
argument_list|,
name|generator
argument_list|)
argument_list|)
expr_stmt|;
block|}
annotation|@
name|Override
specifier|public
name|Iterator
argument_list|<
name|DynamicListContext
argument_list|>
name|iterator
parameter_list|()
block|{
return|return
name|dynLists
operator|.
name|iterator
argument_list|()
return|;
block|}
block|}
specifier|public
specifier|static
class|class
name|DynamicPartitionPrunerProc
implements|implements
name|SemanticNodeProcessor
block|{
comment|/** * process simply remembers all the dynamic partition pruning expressions * found */
annotation|@
name|Override
specifier|public
name|Object
name|process
parameter_list|(
name|Node
name|nd
parameter_list|,
name|Stack
argument_list|<
name|Node
argument_list|>
name|stack
parameter_list|,
name|NodeProcessorCtx
name|procCtx
parameter_list|,
name|Object
modifier|...
name|nodeOutputs
parameter_list|)
throws|throws
name|SemanticException
block|{
name|ExprNodeDynamicListDesc
name|desc
init|=
operator|(
name|ExprNodeDynamicListDesc
operator|)
name|nd
decl_stmt|;
name|DynamicPartitionPrunerContext
name|context
init|=
operator|(
name|DynamicPartitionPrunerContext
operator|)
name|procCtx
decl_stmt|;
comment|// Rule is searching for dynamic pruning expr. There's at least an IN
comment|// expression wrapping it.
name|ExprNodeDesc
name|parent
init|=
operator|(
name|ExprNodeDesc
operator|)
name|stack
operator|.
name|get
argument_list|(
name|stack
operator|.
name|size
argument_list|()
operator|-
literal|2
argument_list|)
decl_stmt|;
name|ExprNodeDesc
name|grandParent
init|=
name|stack
operator|.
name|size
argument_list|()
operator|>=
literal|3
condition|?
operator|(
name|ExprNodeDesc
operator|)
name|stack
operator|.
name|get
argument_list|(
name|stack
operator|.
name|size
argument_list|()
operator|-
literal|3
argument_list|)
else|:
literal|null
decl_stmt|;
name|context
operator|.
name|addDynamicList
argument_list|(
name|desc
argument_list|,
name|parent
argument_list|,
name|grandParent
argument_list|,
operator|(
name|ReduceSinkOperator
operator|)
name|desc
operator|.
name|getSource
argument_list|()
argument_list|)
expr_stmt|;
return|return
name|context
return|;
block|}
block|}
specifier|public
specifier|static
name|Map
argument_list|<
name|Node
argument_list|,
name|Object
argument_list|>
name|collectDynamicPruningConditions
parameter_list|(
name|ExprNodeDesc
name|pred
parameter_list|,
name|NodeProcessorCtx
name|ctx
parameter_list|)
throws|throws
name|SemanticException
block|{
comment|// create a walker which walks the tree in a DFS manner while maintaining
comment|// the operator stack. The dispatcher
comment|// generates the plan from the operator tree
name|Map
argument_list|<
name|SemanticRule
argument_list|,
name|SemanticNodeProcessor
argument_list|>
name|exprRules
init|=
operator|new
name|LinkedHashMap
argument_list|<
name|SemanticRule
argument_list|,
name|SemanticNodeProcessor
argument_list|>
argument_list|()
decl_stmt|;
name|exprRules
operator|.
name|put
argument_list|(
operator|new
name|RuleRegExp
argument_list|(
literal|"R1"
argument_list|,
name|ExprNodeDynamicListDesc
operator|.
name|class
operator|.
name|getName
argument_list|()
operator|+
literal|"%"
argument_list|)
argument_list|,
operator|new
name|DynamicPartitionPrunerProc
argument_list|()
argument_list|)
expr_stmt|;
comment|// The dispatcher fires the processor corresponding to the closest matching
comment|// rule and passes the context along
name|SemanticDispatcher
name|disp
init|=
operator|new
name|DefaultRuleDispatcher
argument_list|(
literal|null
argument_list|,
name|exprRules
argument_list|,
name|ctx
argument_list|)
decl_stmt|;
name|SemanticGraphWalker
name|egw
init|=
operator|new
name|DefaultGraphWalker
argument_list|(
name|disp
argument_list|)
decl_stmt|;
name|List
argument_list|<
name|Node
argument_list|>
name|startNodes
init|=
operator|new
name|ArrayList
argument_list|<
name|Node
argument_list|>
argument_list|()
decl_stmt|;
name|startNodes
operator|.
name|add
argument_list|(
name|pred
argument_list|)
expr_stmt|;
name|HashMap
argument_list|<
name|Node
argument_list|,
name|Object
argument_list|>
name|outputMap
init|=
operator|new
name|HashMap
argument_list|<
name|Node
argument_list|,
name|Object
argument_list|>
argument_list|()
decl_stmt|;
name|egw
operator|.
name|startWalking
argument_list|(
name|startNodes
argument_list|,
name|outputMap
argument_list|)
expr_stmt|;
return|return
name|outputMap
return|;
block|}
specifier|private
specifier|static
name|Integer
name|obtainBufferSize
parameter_list|(
name|Operator
argument_list|<
name|?
argument_list|>
name|op
parameter_list|,
name|ReduceSinkOperator
name|rsOp
parameter_list|,
name|int
name|defaultTinyBufferSize
parameter_list|)
block|{
if|if
condition|(
name|op
operator|instanceof
name|GroupByOperator
condition|)
block|{
name|GroupByOperator
name|groupByOperator
init|=
operator|(
name|GroupByOperator
operator|)
name|op
decl_stmt|;
if|if
condition|(
name|groupByOperator
operator|.
name|getConf
argument_list|()
operator|.
name|getKeys
argument_list|()
operator|.
name|isEmpty
argument_list|()
operator|&&
name|groupByOperator
operator|.
name|getConf
argument_list|()
operator|.
name|getMode
argument_list|()
operator|==
name|GroupByDesc
operator|.
name|Mode
operator|.
name|MERGEPARTIAL
condition|)
block|{
comment|// Check configuration and value is -1, infer value
name|int
name|result
init|=
name|defaultTinyBufferSize
operator|==
operator|-
literal|1
condition|?
operator|(
name|int
operator|)
name|Math
operator|.
name|ceil
argument_list|(
operator|(
name|double
operator|)
name|groupByOperator
operator|.
name|getStatistics
argument_list|()
operator|.
name|getDataSize
argument_list|()
operator|/
literal|1E6
argument_list|)
else|:
name|defaultTinyBufferSize
decl_stmt|;
if|if
condition|(
name|LOG
operator|.
name|isDebugEnabled
argument_list|()
condition|)
block|{
name|LOG
operator|.
name|debug
argument_list|(
literal|"Buffer size for output from operator {} can be set to {}Mb"
argument_list|,
name|rsOp
argument_list|,
name|result
argument_list|)
expr_stmt|;
block|}
return|return
name|result
return|;
block|}
block|}
return|return
literal|null
return|;
block|}
block|}
end_class
end_unit
|
akhilgupta01/tool.codoogle | codoogle-calltrace-indexer/src/main/java/gupta/akhil/tools/indexer/CallTraceCollector.java | package gupta.akhil.tools.indexer;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import gupta.akhil.tools.common.config.SystemConfig;
import gupta.akhil.tools.common.config.TraceCollectionGroup;
import gupta.akhil.tools.wls.tda.ThreadSnapshot;
public class CallTraceCollector {
public static void main(String[] args) throws Exception{
Map<String, ArrayBlockingQueue<ThreadSnapshot>> threadSnapshotQueueMap = new HashMap<String,ArrayBlockingQueue<ThreadSnapshot>>();
List<TraceCollectionGroup> traceCollectionGroups = SystemConfig.getTraceCollectionGroups();
ExecutorService executorService = Executors.newFixedThreadPool(traceCollectionGroups.size() * 2);
for(TraceCollectionGroup traceCollectionGroup : traceCollectionGroups){
ArrayBlockingQueue<ThreadSnapshot> threadDumpsQueue = threadSnapshotQueueMap.get(traceCollectionGroup.getIndexGroup());
if(threadDumpsQueue == null){
threadDumpsQueue = new ArrayBlockingQueue<ThreadSnapshot>(5000);
}
ServerRuntimeProvider serverRuntimeProvider = new ServerRuntimeProvider(traceCollectionGroup.getDomainConfigurations());
executorService.execute(new ThreadSnapshotRetriever(threadDumpsQueue, serverRuntimeProvider, traceCollectionGroup));
executorService.execute(new ThreadSnapshotProcessor(threadDumpsQueue, traceCollectionGroup.getIndexGroup()));
}
executorService.awaitTermination(24, TimeUnit.HOURS);
}
} |
Cyanss/JavaToolkit | Toolkit/Rest-Wrapper/Chief-Wrapper/Chief-Rooter/chief-toolkit-starter/src/main/java/cyan/toolkit/chief/ChiefService.java | <gh_stars>1-10
package cyan.toolkit.chief;
import cyan.toolkit.chief.service.BuilderAdvice;
import cyan.toolkit.chief.service.InfoService;
import java.util.Date;
/**
* <p>RestService</p>
* @author Cyan (<EMAIL>)
* @version V.0.0.1
* @group cyan.tool.kit
* @date 16:00 2020/11/3
*/
public abstract class ChiefService<M extends ChiefModel,E extends ChiefEntity,F extends ChiefFilter> extends InfoService<Long, Date, M, E, F> implements BuilderAdvice<Long,Date,M, E> {
@Override
protected E createEntity(M model, Boolean isInsert) {
return model.toEntity(isInsert);
}
@Override
protected M createModel(E entity) {
return entity.toModel();
}
}
|
Kentik/telegraf | plugins/outputs/kentik/kentik.go | <reponame>Kentik/telegraf<filename>plugins/outputs/kentik/kentik.go
package kentik
import (
"fmt"
"log"
"net"
"regexp"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/kentik/libkflow"
)
var (
allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-_./\p{L}]`)
hypenChars = strings.NewReplacer(
"@", "-",
"*", "-",
`%`, "-",
"#", "-",
"$", "-")
)
const (
PLUGIN_NAME = "telegraph"
PLUGIN_VERSION = "1.0.0"
DEFAULT_CIDR = "10.0.0.0/8"
)
type Kentik struct {
Prefix string
Email string
Token string
DeviceID int
FlowDest string
Cidr string
Debug bool
IgnoreField string
sendingIP net.IP
client *libkflow.Sender
customIdStrings map[string]uint32
customIdInts map[string]uint32
}
var sampleConfig = `
## prefix for metrics keys
prefix = "my.specific.prefix."
## Kentik user email
email = ""
## Kentik user api token
token = ""
## Kentik device id
deviceID = 0
## DNS name of the Kentik server. Defaults to flow.kentik.com
flowDest = ""
## Debug true - Prints Kentik communication
debug = false
## IPRange to use
cidr = "10.0.0.0/8"
## IgnoreField "" - If fieldName matches this, don't add the field name to the metric passed to TSDB.
ignoreField = ""
`
func (o *Kentik) Connect() error {
config := libkflow.NewConfig(o.Email, o.Token, PLUGIN_NAME, PLUGIN_VERSION)
if o.FlowDest != "" {
config.SetFlow(o.FlowDest)
}
if o.Debug {
config.SetVerbose(1)
}
errors := make(chan error, 0)
var client *libkflow.Sender
var err error
if o.DeviceID != 0 {
client, err = libkflow.NewSenderWithDeviceID(o.DeviceID, errors, config)
if err != nil {
return fmt.Errorf("Cannot start client: %v", err)
}
} else {
if strings.TrimSpace(o.Cidr) == "" {
o.Cidr = DEFAULT_CIDR
}
_, ipr, err := net.ParseCIDR(o.Cidr)
if err != nil {
return fmt.Errorf("Invalid CIDR: %s %v", o.Cidr, err)
}
// Try to find device based on ip
ifaces, err := net.Interfaces()
if err != nil {
return fmt.Errorf("Cannot find client ip address: %v", err)
}
outer:
for _, i := range ifaces {
addrs, err := i.Addrs()
if err != nil {
return fmt.Errorf("Cannot find client ip address from if: %v", err)
}
// handle err
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ipr.Contains(ip) {
o.sendingIP = ip
client, err = libkflow.NewSenderWithDeviceIP(ip, errors, config)
if err != nil {
log.Printf("Kentik: No Device found for this IP, turning off, %s", ip)
return nil
} else {
log.Printf("Kentik, Using IP %s", ip)
}
break outer
}
}
}
}
if client == nil {
log.Printf("Kentik: No DeviceID found, turning off")
return nil
}
go o.handleErrors(errors)
o.client = client
o.customIdStrings = map[string]uint32{}
o.customIdInts = map[string]uint32{}
for _, c := range client.Device.Customs {
if c.Type == "string" {
o.customIdStrings[c.Name] = uint32(c.ID)
} else {
o.customIdInts[c.Name] = uint32(c.ID)
}
}
log.Printf("Kentik, connected using %s. %d custom strs found, %d ints", o.Email, len(o.customIdStrings), len(o.customIdInts))
return nil
}
func (o *Kentik) handleErrors(errors chan error) {
for {
select {
case msg := <-errors:
log.Printf("LibError: %v", msg)
}
}
}
func (o *Kentik) Write(metrics []telegraf.Metric) error {
if len(metrics) == 0 {
return nil
}
if o.client == nil {
return nil
}
return o.WriteHttp(metrics)
}
func (o *Kentik) WriteHttp(metrics []telegraf.Metric) error {
for _, m := range metrics {
now := m.UnixNano() / 1000000000
tags := cleanTags(m.Tags())
for fieldName, value := range m.Fields() {
bval, err := buildValue(value)
if err != nil {
log.Printf("D! Kentik does not support metric value: [%s] of type [%T]. %v\n", value, value, err)
continue
}
var metricName string
if fieldName != o.IgnoreField {
metricName = sanitize(fmt.Sprintf("%s%s_%s", o.Prefix, m.Name(), fieldName))
} else {
metricName = sanitize(fmt.Sprintf("%s%s", o.Prefix, m.Name()))
}
metric := &KentikMetric{
Metric: metricName,
Tags: tags,
Timestamp: now,
Value: bval,
}
flow := ToFlow(o.customIdStrings, o.customIdInts, metric, o.sendingIP)
o.client.Send(flow)
if o.Debug {
metric.Print()
}
}
}
return nil
}
func cleanTags(tags map[string]string) map[string]string {
tagSet := make(map[string]string, len(tags))
for k, v := range tags {
tagSet[sanitize(k)] = sanitize(v)
}
return tagSet
}
func buildValue(v interface{}) (uint64, error) {
var retv uint64
switch p := v.(type) {
case int64:
retv = uint64(p)
case uint64:
retv = uint64(p)
case float64:
retv = uint64(p)
default:
return retv, fmt.Errorf("unexpected type %T with value %v for Kentik", v, v)
}
return retv, nil
}
func (o *Kentik) SampleConfig() string {
return sampleConfig
}
func (o *Kentik) Description() string {
return "Configuration for Kentik server to send metrics to"
}
func (o *Kentik) Close() error {
return nil
}
func sanitize(value string) string {
// Apply special hypenation rules to preserve backwards compatibility
value = hypenChars.Replace(value)
// Replace any remaining illegal chars
return allowedChars.ReplaceAllLiteralString(value, "_")
}
func init() {
outputs.Add("kentik", func() telegraf.Output {
return &Kentik{}
})
}
|
aldernero/genart | visual_tests/kdtree_mouse/kdtree_mouse.go | <filename>visual_tests/kdtree_mouse/kdtree_mouse.go
package main
import (
"flag"
"github.com/aldernero/sketchy"
"github.com/hajimehoshi/ebiten/v2"
"github.com/hajimehoshi/ebiten/v2/inpututil"
"github.com/tdewolff/canvas"
"image/color"
"log"
"strconv"
)
var kdtree *sketchy.KDTree
var nearestPoints []sketchy.IndexPoint
var count int
func update(s *sketchy.Sketch) {
// Update logic goes here
if s.Toggle("Clear") {
kdtree.Clear()
count = 0
}
nearestPoints = []sketchy.IndexPoint{}
if inpututil.IsMouseButtonJustReleased(ebiten.MouseButtonLeft) {
x, y := ebiten.CursorPosition()
if s.PointInSketchArea(float64(x), float64(y)) {
p := s.CanvasCoords(float64(x), float64(y))
kdtree.Insert(p.ToIndexPoint(count))
count++
}
} else {
x, y := ebiten.CursorPosition()
if s.PointInSketchArea(float64(x), float64(y)) {
p := s.CanvasCoords(float64(x), float64(y))
nearestPoints = kdtree.NearestNeighbors(p.ToIndexPoint(-1), int(s.Slider("Closest Neighbors")))
}
}
}
func draw(s *sketchy.Sketch, c *canvas.Context) {
// Drawing code goes here
c.SetStrokeColor(color.White)
c.SetFillColor(color.Transparent)
c.SetStrokeCapper(canvas.ButtCap)
c.SetStrokeWidth(s.Slider("Line Thickness"))
pointSize := s.Slider("Point Size")
if s.Toggle("Show Points") {
kdtree.DrawWithPoints(pointSize, c)
} else {
kdtree.Draw(c)
}
queryRect := sketchy.Rect{
X: 0.4 * c.Width(),
Y: 0.4 * c.Height(),
W: 0.2 * c.Width(),
H: 0.2 * c.Height(),
}
foundPoints := kdtree.Query(queryRect)
c.SetStrokeColor(canvas.Blue)
c.DrawPath(queryRect.X, queryRect.Y, canvas.Rectangle(queryRect.W, queryRect.H))
for _, p := range foundPoints {
p.Draw(pointSize, c)
}
c.SetStrokeColor(canvas.Magenta)
if len(nearestPoints) > 0 {
for _, p := range nearestPoints {
p.Draw(pointSize, c)
}
}
ff := s.FontFamily.Face(14, canvas.Red, canvas.FontRegular, canvas.FontNormal)
textBox := canvas.NewTextBox(ff, strconv.FormatInt(int64(len(foundPoints)), 10), 100, 20, canvas.Left, canvas.Bottom, 0, 0)
c.DrawText(0.1*c.Width(), 0.95*c.Height(), textBox)
}
func main() {
var configFile string
var prefix string
var randomSeed int64
flag.StringVar(&configFile, "c", "sketch.json", "Sketch config file")
flag.StringVar(&prefix, "p", "", "Output file prefix")
flag.Int64Var(&randomSeed, "s", 0, "Random number generator seed")
flag.Parse()
s, err := sketchy.NewSketchFromFile(configFile)
if err != nil {
log.Fatal(err)
}
if prefix != "" {
s.Prefix = prefix
}
s.RandomSeed = randomSeed
s.Updater = update
s.Drawer = draw
s.Init()
w := s.Width()
h := s.Height()
rect := sketchy.Rect{
X: 0,
Y: 0,
W: w,
H: h,
}
kdtree = sketchy.NewKDTree(rect)
ebiten.SetWindowSize(int(s.ControlWidth+s.SketchWidth), int(s.SketchHeight))
ebiten.SetWindowTitle("Sketchy - " + s.Title)
ebiten.SetWindowResizable(false)
ebiten.SetFPSMode(ebiten.FPSModeVsyncOffMaximum)
ebiten.SetMaxTPS(ebiten.SyncWithFPS)
if err := ebiten.RunGame(s); err != nil {
log.Fatal(err)
}
}
|
HongZeBin98/HZB_ONE | app/src/main/java/com/hongzebin/bean/VideoDetail.java | <reponame>HongZeBin98/HZB_ONE
package com.hongzebin.bean;
/**
* 影视列表
* Created by 洪泽彬
*/
public class VideoDetail {
private String title;
private User user;
private String summary;
private String content;
private String praisenum;
public VideoDetail(String title, User user, String summary, String content, String praisenum) {
this.title = title;
this.user = user;
this.summary = summary;
this.content = content;
this.praisenum = praisenum;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public User getUser() {
return user;
}
public void setUser(User user) {
this.user = user;
}
public String getSummary() {
return summary;
}
public void setSummary(String summary) {
this.summary = summary;
}
public String getContent() {
return content;
}
public void setContent(String content) {
this.content = content;
}
public String getPraisenum() {
return praisenum;
}
public void setPraisenum(String praisenum) {
this.praisenum = praisenum;
}
}
|
barak/talks-cam.railsapp | vendor/rails/activesupport/lib/active_support/core_ext/blank.rb | <filename>vendor/rails/activesupport/lib/active_support/core_ext/blank.rb<gh_stars>10-100
class Object #:nodoc:
# "", " ", nil, [], and {} are blank
def blank?
if respond_to?(:empty?) && respond_to?(:strip)
empty? or strip.empty?
elsif respond_to?(:empty?)
empty?
else
!self
end
end
end
class NilClass #:nodoc:
def blank?
true
end
end
class FalseClass #:nodoc:
def blank?
true
end
end
class TrueClass #:nodoc:
def blank?
false
end
end
class Array #:nodoc:
alias_method :blank?, :empty?
end
class Hash #:nodoc:
alias_method :blank?, :empty?
end
class String #:nodoc:
def blank?
empty? || strip.empty?
end
end
class Numeric #:nodoc:
def blank?
false
end
end |
vedhavyas/go-centrifuge | protobufs/gen/go/nft/service.pb.go | <filename>protobufs/gen/go/nft/service.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: nft/service.proto
package nftpb
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ResponseHeader struct {
TransactionId string `protobuf:"bytes,5,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ResponseHeader) Reset() { *m = ResponseHeader{} }
func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
func (*ResponseHeader) ProtoMessage() {}
func (*ResponseHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_service_10e4a4ecba67c7da, []int{0}
}
func (m *ResponseHeader) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ResponseHeader.Unmarshal(m, b)
}
func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic)
}
func (dst *ResponseHeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResponseHeader.Merge(dst, src)
}
func (m *ResponseHeader) XXX_Size() int {
return xxx_messageInfo_ResponseHeader.Size(m)
}
func (m *ResponseHeader) XXX_DiscardUnknown() {
xxx_messageInfo_ResponseHeader.DiscardUnknown(m)
}
var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo
func (m *ResponseHeader) GetTransactionId() string {
if m != nil {
return m.TransactionId
}
return ""
}
type NFTMintRequest struct {
// Document identifier
Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
// The contract address of the registry where the token should be minted
RegistryAddress string `protobuf:"bytes,2,opt,name=registry_address,json=registryAddress,proto3" json:"registry_address,omitempty"`
DepositAddress string `protobuf:"bytes,3,opt,name=deposit_address,json=depositAddress,proto3" json:"deposit_address,omitempty"`
ProofFields []string `protobuf:"bytes,4,rep,name=proof_fields,json=proofFields,proto3" json:"proof_fields,omitempty"`
// proof that nft is part of document
SubmitTokenProof bool `protobuf:"varint,5,opt,name=submit_token_proof,json=submitTokenProof,proto3" json:"submit_token_proof,omitempty"`
// proof that nft owner can access the document if nft_grant_access is true
SubmitNftOwnerAccessProof bool `protobuf:"varint,7,opt,name=submit_nft_owner_access_proof,json=submitNftOwnerAccessProof,proto3" json:"submit_nft_owner_access_proof,omitempty"`
// grant nft read access to the document
GrantNftAccess bool `protobuf:"varint,8,opt,name=grant_nft_access,json=grantNftAccess,proto3" json:"grant_nft_access,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NFTMintRequest) Reset() { *m = NFTMintRequest{} }
func (m *NFTMintRequest) String() string { return proto.CompactTextString(m) }
func (*NFTMintRequest) ProtoMessage() {}
func (*NFTMintRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_service_10e4a4ecba67c7da, []int{1}
}
func (m *NFTMintRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NFTMintRequest.Unmarshal(m, b)
}
func (m *NFTMintRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NFTMintRequest.Marshal(b, m, deterministic)
}
func (dst *NFTMintRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_NFTMintRequest.Merge(dst, src)
}
func (m *NFTMintRequest) XXX_Size() int {
return xxx_messageInfo_NFTMintRequest.Size(m)
}
func (m *NFTMintRequest) XXX_DiscardUnknown() {
xxx_messageInfo_NFTMintRequest.DiscardUnknown(m)
}
var xxx_messageInfo_NFTMintRequest proto.InternalMessageInfo
func (m *NFTMintRequest) GetIdentifier() string {
if m != nil {
return m.Identifier
}
return ""
}
func (m *NFTMintRequest) GetRegistryAddress() string {
if m != nil {
return m.RegistryAddress
}
return ""
}
func (m *NFTMintRequest) GetDepositAddress() string {
if m != nil {
return m.DepositAddress
}
return ""
}
func (m *NFTMintRequest) GetProofFields() []string {
if m != nil {
return m.ProofFields
}
return nil
}
func (m *NFTMintRequest) GetSubmitTokenProof() bool {
if m != nil {
return m.SubmitTokenProof
}
return false
}
func (m *NFTMintRequest) GetSubmitNftOwnerAccessProof() bool {
if m != nil {
return m.SubmitNftOwnerAccessProof
}
return false
}
func (m *NFTMintRequest) GetGrantNftAccess() bool {
if m != nil {
return m.GrantNftAccess
}
return false
}
type NFTMintResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
TokenId string `protobuf:"bytes,2,opt,name=token_id,json=tokenId,proto3" json:"token_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NFTMintResponse) Reset() { *m = NFTMintResponse{} }
func (m *NFTMintResponse) String() string { return proto.CompactTextString(m) }
func (*NFTMintResponse) ProtoMessage() {}
func (*NFTMintResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_service_10e4a4ecba67c7da, []int{2}
}
func (m *NFTMintResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NFTMintResponse.Unmarshal(m, b)
}
func (m *NFTMintResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NFTMintResponse.Marshal(b, m, deterministic)
}
func (dst *NFTMintResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_NFTMintResponse.Merge(dst, src)
}
func (m *NFTMintResponse) XXX_Size() int {
return xxx_messageInfo_NFTMintResponse.Size(m)
}
func (m *NFTMintResponse) XXX_DiscardUnknown() {
xxx_messageInfo_NFTMintResponse.DiscardUnknown(m)
}
var xxx_messageInfo_NFTMintResponse proto.InternalMessageInfo
func (m *NFTMintResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *NFTMintResponse) GetTokenId() string {
if m != nil {
return m.TokenId
}
return ""
}
func init() {
proto.RegisterType((*ResponseHeader)(nil), "nft.ResponseHeader")
proto.RegisterType((*NFTMintRequest)(nil), "nft.NFTMintRequest")
proto.RegisterType((*NFTMintResponse)(nil), "nft.NFTMintResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// NFTServiceClient is the client API for NFTService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type NFTServiceClient interface {
MintNFT(ctx context.Context, in *NFTMintRequest, opts ...grpc.CallOption) (*NFTMintResponse, error)
}
type nFTServiceClient struct {
cc *grpc.ClientConn
}
func NewNFTServiceClient(cc *grpc.ClientConn) NFTServiceClient {
return &nFTServiceClient{cc}
}
func (c *nFTServiceClient) MintNFT(ctx context.Context, in *NFTMintRequest, opts ...grpc.CallOption) (*NFTMintResponse, error) {
out := new(NFTMintResponse)
err := c.cc.Invoke(ctx, "/nft.NFTService/MintNFT", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// NFTServiceServer is the server API for NFTService service.
type NFTServiceServer interface {
MintNFT(context.Context, *NFTMintRequest) (*NFTMintResponse, error)
}
func RegisterNFTServiceServer(s *grpc.Server, srv NFTServiceServer) {
s.RegisterService(&_NFTService_serviceDesc, srv)
}
func _NFTService_MintNFT_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(NFTMintRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NFTServiceServer).MintNFT(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/nft.NFTService/MintNFT",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NFTServiceServer).MintNFT(ctx, req.(*NFTMintRequest))
}
return interceptor(ctx, in, info, handler)
}
var _NFTService_serviceDesc = grpc.ServiceDesc{
ServiceName: "nft.NFTService",
HandlerType: (*NFTServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "MintNFT",
Handler: _NFTService_MintNFT_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "nft/service.proto",
}
func init() { proto.RegisterFile("nft/service.proto", fileDescriptor_service_10e4a4ecba67c7da) }
var fileDescriptor_service_10e4a4ecba67c7da = []byte{
// 484 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xc1, 0x6e, 0x13, 0x3d,
0x10, 0x80, 0x95, 0xe4, 0x6f, 0x93, 0x3a, 0xfd, 0x93, 0x60, 0x38, 0xa4, 0x11, 0xa0, 0x25, 0x12,
0x10, 0xa0, 0xcd, 0x4a, 0xe5, 0x80, 0xc4, 0x89, 0x14, 0x14, 0xd1, 0x03, 0x4b, 0xb4, 0xe4, 0x02,
0x97, 0x95, 0xb3, 0x1e, 0x2f, 0x16, 0xcd, 0x78, 0xb1, 0x27, 0x44, 0x5c, 0x91, 0x78, 0x01, 0x78,
0x22, 0x9e, 0x81, 0x57, 0xe0, 0x41, 0xd0, 0xda, 0xdb, 0xaa, 0x11, 0xa7, 0xd5, 0x7e, 0xf3, 0xcd,
0xd8, 0x9e, 0x19, 0x76, 0x03, 0x15, 0xc5, 0x0e, 0xec, 0x17, 0x9d, 0xc3, 0xb4, 0xb4, 0x86, 0x0c,
0x6f, 0xa1, 0xa2, 0xd1, 0xed, 0xc2, 0x98, 0xe2, 0x02, 0x62, 0x51, 0xea, 0x58, 0x20, 0x1a, 0x12,
0xa4, 0x0d, 0xba, 0xa0, 0x8c, 0x8e, 0xfd, 0x27, 0x3f, 0x29, 0x00, 0x4f, 0xdc, 0x56, 0x14, 0x05,
0xd8, 0xd8, 0x94, 0xde, 0xf8, 0xd7, 0x1e, 0x3f, 0x63, 0xbd, 0x14, 0x5c, 0x69, 0xd0, 0xc1, 0x6b,
0x10, 0x12, 0x2c, 0xbf, 0xcf, 0x7a, 0x64, 0x05, 0x3a, 0x91, 0x57, 0x5e, 0xa6, 0xe5, 0x70, 0x2f,
0x6a, 0x4c, 0x0e, 0xd2, 0xff, 0xaf, 0xd1, 0x73, 0x39, 0xfe, 0xd5, 0x64, 0xbd, 0x64, 0xbe, 0x7c,
0xa3, 0x91, 0x52, 0xf8, 0xbc, 0x01, 0x47, 0xfc, 0x2e, 0x63, 0x5a, 0x02, 0x92, 0x56, 0x1a, 0xec,
0xb0, 0xe1, 0xb3, 0xae, 0x11, 0xfe, 0x88, 0x0d, 0x2c, 0x14, 0xda, 0x91, 0xfd, 0x9a, 0x09, 0x29,
0x2d, 0x38, 0x37, 0x6c, 0x7a, 0xab, 0x7f, 0xc9, 0x67, 0x01, 0xf3, 0x87, 0xac, 0x2f, 0xa1, 0x34,
0x4e, 0xd3, 0x95, 0xd9, 0xf2, 0x66, 0xaf, 0xc6, 0x97, 0xe2, 0x3d, 0x76, 0x58, 0x5a, 0x63, 0x54,
0xa6, 0x34, 0x5c, 0x48, 0x37, 0xfc, 0x2f, 0x6a, 0x4d, 0x0e, 0xd2, 0xae, 0x67, 0x73, 0x8f, 0xf8,
0x31, 0xe3, 0x6e, 0xb3, 0x5a, 0x6b, 0xca, 0xc8, 0x7c, 0x02, 0xcc, 0x7c, 0xcc, 0x3f, 0xaa, 0x93,
0x0e, 0x42, 0x64, 0x59, 0x05, 0x16, 0x15, 0xe7, 0x2f, 0xd8, 0x9d, 0xda, 0x46, 0x45, 0x99, 0xd9,
0x22, 0xd8, 0x4c, 0xe4, 0x39, 0x38, 0x57, 0x27, 0xb6, 0x7d, 0xe2, 0x51, 0x90, 0x12, 0x45, 0x6f,
0x2b, 0x65, 0xe6, 0x8d, 0x50, 0x61, 0xc2, 0x06, 0x85, 0x15, 0x18, 0x0a, 0x84, 0xd4, 0x61, 0xc7,
0x27, 0xf5, 0x3c, 0x4f, 0x14, 0x05, 0x7d, 0xfc, 0x9e, 0xf5, 0xaf, 0x5a, 0x18, 0x66, 0xc0, 0x9f,
0xb0, 0xfd, 0x8f, 0x7e, 0x0e, 0xbe, 0x7f, 0xdd, 0xd3, 0x9b, 0x53, 0x54, 0x34, 0xdd, 0x1d, 0x51,
0x5a, 0x2b, 0xfc, 0x88, 0x75, 0xc2, 0x93, 0xb4, 0xac, 0x1b, 0xd9, 0xf6, 0xff, 0xe7, 0xf2, 0xf4,
0x7b, 0x83, 0xb1, 0x64, 0xbe, 0x7c, 0x17, 0xb6, 0x87, 0x6f, 0x59, 0xbb, 0x3a, 0x26, 0x99, 0x2f,
0x79, 0xa8, 0xb8, 0x3b, 0xba, 0xd1, 0xad, 0x5d, 0x18, 0x4e, 0x1b, 0xcf, 0x7e, 0xcc, 0x26, 0xa3,
0x07, 0x15, 0x8a, 0x04, 0x46, 0xc9, 0x7c, 0x19, 0x29, 0x6b, 0xd6, 0x91, 0x88, 0x5e, 0x02, 0x92,
0xd5, 0x6a, 0x53, 0x40, 0xf4, 0xca, 0xe4, 0x9b, 0x35, 0x20, 0x7d, 0xfb, 0xfd, 0xe7, 0x67, 0x73,
0x30, 0xee, 0xc6, 0xfe, 0x06, 0xf1, 0x5a, 0x23, 0x3d, 0x6f, 0x3c, 0x3e, 0x8b, 0x58, 0x3b, 0x37,
0xeb, 0xaa, 0xfa, 0xd9, 0x61, 0x7d, 0x99, 0x45, 0xb5, 0x78, 0x8b, 0xc6, 0x87, 0x3d, 0x54, 0x54,
0xae, 0x56, 0xfb, 0x7e, 0x11, 0x9f, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xdb, 0x53, 0x5a,
0xee, 0x02, 0x00, 0x00,
}
|
grantmontgomery/seknd | src/components/Date-Parts-Piece/index.js | <reponame>grantmontgomery/seknd<filename>src/components/Date-Parts-Piece/index.js
export { default as DatePartsPiece } from "./DatePartsPiece";
|
Papabyte/odex-frontend | src/types/orders.js | <reponame>Papabyte/odex-frontend
export type NewOrderParams = {
userAddress: string,
exchangeAddress: string,
pair: TokenPair,
amount: number,
price: number,
side: 'BUY' | 'SELL'
}
export type RawOrder = {
exchangeAddress: string,
userAddress: string,
baseToken: string,
quoteToken: string,
amount: number,
price: string,
side: 'BUY' | 'SELL',
nonce: string,
status: string,
hash: string,
}
export type Order = {
time: number,
amount: number,
filled: number,
price: number,
hash: string,
side: 'BUY' | 'SELL',
pair: string,
type: 'MARKET' | 'LIMIT',
status: 'NEW' | 'OPEN' | 'CANCELLED' | 'AUTO_CANCELLED' | 'FILLED' | 'PARTIAL_FILLED'
}
// eslint-disable-next-line
type Orders = Array<Order>
// eslint-disable-next-line
type OrdersState = {
byHash: { number: Order }
}
|
TomasHofman/infinispan | server/integration/testsuite/src/test/java/org/infinispan/server/test/cs/custom/CustomCacheStoreIT.java | <reponame>TomasHofman/infinispan
package org.infinispan.server.test.cs.custom;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import java.io.File;
import javax.management.ObjectName;
import org.infinispan.arquillian.core.InfinispanResource;
import org.infinispan.arquillian.core.RemoteInfinispanServer;
import org.infinispan.arquillian.core.RunningServer;
import org.infinispan.arquillian.core.WithRunningServer;
import org.infinispan.arquillian.utils.MBeanServerConnectionProvider;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.RemoteCacheManager;
import org.infinispan.commons.logging.Log;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.persistence.cluster.MyCustomCacheStore;
import org.infinispan.persistence.spi.ExternalStore;
import org.infinispan.server.infinispan.spi.InfinispanSubsystem;
import org.infinispan.server.test.category.CacheStore;
import org.infinispan.server.test.util.ITestUtils;
import org.jboss.arquillian.junit.Arquillian;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.exporter.ZipExporter;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
/**
* Tests Deployeable Cache Stores which are placed into server deployments directory.
*
* @author <a href="mailto:<EMAIL>"><NAME></a>
* @author <NAME>
*/
@RunWith(Arquillian.class)
@Category(CacheStore.class)
public class CustomCacheStoreIT {
private static final Log log = LogFactory.getLog(CustomCacheStoreIT.class);
@InfinispanResource("standalone-customcs")
RemoteInfinispanServer server;
final int managementPort = 9990;
final String cacheLoaderMBean = "jboss." + InfinispanSubsystem.SUBSYSTEM_NAME + ":type=Cache,name=\"default(local)\",manager=\"local\",component=CacheLoader";
@BeforeClass
public static void before() throws Exception {
String serverDir = System.getProperty("server1.dist");
JavaArchive deployedCacheStore = ShrinkWrap.create(JavaArchive.class);
deployedCacheStore.addPackage(MyCustomCacheStore.class.getPackage());
deployedCacheStore.addAsServiceProvider(ExternalStore.class, MyCustomCacheStore.class);
deployedCacheStore.as(ZipExporter.class).exportTo(
new File(serverDir, "/standalone/deployments/custom-store.jar"), true);
}
@Test
@WithRunningServer({@RunningServer(name = "standalone-customcs")})
public void testIfDeployedCacheContainsProperValues() throws Exception {
RemoteCacheManager rcm = ITestUtils.createCacheManager(server);
RemoteCache<String, String> rc = rcm.getCache();
assertNull(rc.get("key1"));
rc.put("key1", "value1");
assertEquals("value1", rc.get("key1"));
// check via jmx that MyCustomCacheStore is indeed used
MBeanServerConnectionProvider provider = new MBeanServerConnectionProvider(server.getHotrodEndpoint().getInetAddress().getHostName(), managementPort);
assertEquals("[org.infinispan.persistence.cluster.MyCustomCacheStore]", getAttribute(provider, cacheLoaderMBean, "stores"));
}
private String getAttribute(MBeanServerConnectionProvider provider, String mbean, String attr) throws Exception {
return provider.getConnection().getAttribute(new ObjectName(mbean), attr).toString();
}
}
|
eregnier/kestra | runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaTemplateExecutor.java | <filename>runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaTemplateExecutor.java
package io.kestra.runner.kafka;
import io.kestra.runner.kafka.services.SafeKeyValueStore;
import io.micronaut.context.annotation.Replaces;
import io.micronaut.context.annotation.Requires;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.streams.state.ReadOnlyKeyValueStore;
import io.kestra.core.models.templates.Template;
@Slf4j
@KafkaQueueEnabled
@Replaces(io.kestra.core.tasks.flows.Template.MemoryTemplateExecutor.class)
@Requires(property = "kestra.server-type", value = "EXECUTOR")
public class KafkaTemplateExecutor implements io.kestra.core.tasks.flows.Template.TemplateExecutorInterface {
private final SafeKeyValueStore<String, Template> store;
public KafkaTemplateExecutor(ReadOnlyKeyValueStore<String, Template> store, String name) {
this.store = new SafeKeyValueStore<>(store, name);
}
public Template findById(String namespace, String templateId) {
return this.store.get(Template.uid(namespace, templateId)).orElse(null);
}
}
|
Dixa-public/browser-extension | src/integrations/sprintly.js | <reponame>Dixa-public/browser-extension<filename>src/integrations/sprintly.js
clockifyButton.render(
'.modal-content .card_container:not(.clockify)',
{ observe: true },
(elem) => {
var link, description;
description = $('.card_container .body a.title', elem).textContent.trim();
link = clockifyButton.createButton(description);
$('.card_container .card .top', elem).appendChild(link);
}
);
|
NyteCore/Senior_School_Projects | CCC C++/CCC C++/main.cpp | //
// main.cpp
// CCC C++
//
// Created by - on 2017/02/14.
// Copyright © 2017 <NAME> House of Ren. All rights reserved.
//
#include <iostream>
#include <valarray>
using namespace std;
int main() {
int n = 0;
cin >> n;
int A[100001];
for (int i = 0; i < 100001; i++){
A[i] = 0;
}
int i = 0;
for (int x = 0; x < n; x++){
int s =0;
cin>> s;
A[i] = s;
if (s == 0){
A[i-1] = 0;
i--;
} else{
i++;
}
}
int sum = 0;
for (int i = 0; i < 100001; i++){
sum+= A[i];
}
cout<<sum << endl;
}
|
rpatil524/datacollector | sdc-solr_6-lib/src/test/java/com/streamsets/pipeline/solr/impl/Solr06ServerUtil.java | <filename>sdc-solr_6-lib/src/test/java/com/streamsets/pipeline/solr/impl/Solr06ServerUtil.java
/**
* Copyright 2016 StreamSets Inc.
*
* Licensed under the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.streamsets.pipeline.solr.impl;
import org.apache.solr.SolrJettyTestBase;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.response.QueryResponse;
import java.io.IOException;
public class Solr06ServerUtil extends SolrJettyTestBase {
private HttpSolrClient client;
public Solr06ServerUtil(String url) throws Exception {
try {
// setup the client...
client = getHttpSolrClient(url);
client.setConnectionTimeout(DEFAULT_CONNECTION_TIMEOUT);
client.setDefaultMaxConnectionsPerHost(100);
client.setMaxTotalConnections(100);
}
catch( Exception ex ) {
throw new RuntimeException( ex );
}
}
public void destroy() {
if(client != null) {
try {
client.close();
} catch (IOException ex) {
}
}
}
public void deleteByQuery(String q) throws SolrServerException, IOException {
client.deleteByQuery(q);
}
public QueryResponse query(SolrQuery q) throws SolrServerException, IOException {
return client.query(q);
}
}
|
onap/aai-aai-service | ajsc-aai/src/main/java/org/openecomp/aai/parsers/query/QueryParser.java | <filename>ajsc-aai/src/main/java/org/openecomp/aai/parsers/query/QueryParser.java
/*-
* ============LICENSE_START=======================================================
* org.openecomp.aai
* ================================================================================
* Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.
* ================================================================================
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ============LICENSE_END=========================================================
*/
package org.openecomp.aai.parsers.query;
import java.net.URI;
import org.openecomp.aai.introspection.Loader;
import org.openecomp.aai.logging.LogLineBuilder;
import org.openecomp.aai.query.builder.QueryBuilder;
/**
* The Class QueryParser.
*/
public abstract class QueryParser {
protected Loader loader = null;
protected QueryBuilder queryBuilder = null;
protected QueryBuilder parentQueryBuilder = null;
protected URI uri = null;
protected String resultResource = "";
protected String parentResourceType = "";
protected String containerResource = "";
protected final LogLineBuilder llBuilder;
/**
* Instantiates a new query parser.
*
* @param loader the loader
* @param queryBuilder the query builder
* @param uri the uri
*/
protected QueryParser(Loader loader, QueryBuilder queryBuilder, URI uri) {
this.uri = uri;
this.queryBuilder = queryBuilder;
this.loader = loader;
this.llBuilder = loader.getLogLineBuilder();
//this.init(loader, queryBuilder, uri);
}
/**
* Instantiates a new query parser.
*
* @param loader the loader
* @param queryBuilder the query builder
*/
protected QueryParser(Loader loader, QueryBuilder queryBuilder) {
this.queryBuilder = queryBuilder;
this.loader = loader;
this.llBuilder = loader.getLogLineBuilder();
}
/**
* Gets the container type.
*
* @return the container type
*/
public String getContainerType() {
return this.containerResource;
}
/**
* Gets the parent result type.
*
* @return the parent result type
*/
public String getParentResultType() {
return this.parentResourceType;
}
/**
* Gets the result type.
*
* @return the result type
*/
public String getResultType() {
return this.resultResource;
}
/**
* Gets the query builder.
*
* @return the query builder
*/
public QueryBuilder getQueryBuilder() {
return this.queryBuilder;
}
/**
* Gets the uri.
*
* @return the uri
*/
public URI getUri() {
return this.uri;
}
/**
* Gets the parent query builder.
*
* @return the parent query builder
*/
public QueryBuilder getParentQueryBuilder() {
if (this.parentQueryBuilder != null) {
return this.parentQueryBuilder;
} else {
return this.queryBuilder;
}
}
/**
* Checks if is dependent.
*
* @return true, if is dependent
*/
public boolean isDependent() {
return !this.queryBuilder.getQuery().toString().equals(this.queryBuilder.getParentQuery().toString());
}
}
|
sameerjj/WCF-android | app/src/main/java/com/android/wcf/settings/TeamMembershipFragment.java | <filename>app/src/main/java/com/android/wcf/settings/TeamMembershipFragment.java
package com.android.wcf.settings;
import android.app.AlertDialog;
import android.content.Context;
import android.os.Bundle;
import android.text.Editable;
import android.text.TextWatcher;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.TextView;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.appcompat.widget.SwitchCompat;
import androidx.recyclerview.widget.DividerItemDecoration;
import androidx.recyclerview.widget.LinearLayoutManager;
import androidx.recyclerview.widget.RecyclerView;
import com.android.wcf.R;
import com.android.wcf.application.DataHolder;
import com.android.wcf.base.BaseFragment;
import com.android.wcf.helper.SharedPreferencesUtil;
import com.android.wcf.helper.view.ListPaddingDecoration;
import com.android.wcf.model.Constants;
import com.android.wcf.model.Event;
import com.android.wcf.model.Participant;
import com.android.wcf.model.Team;
import com.bumptech.glide.Glide;
import com.bumptech.glide.request.RequestOptions;
import com.google.android.material.textfield.TextInputEditText;
import com.google.android.material.textfield.TextInputLayout;
import org.jetbrains.annotations.NotNull;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Iterator;
import java.util.List;
import static com.android.wcf.application.WCFApplication.isProdBackend;
public class TeamMembershipFragment extends BaseFragment implements TeamMembershipMvp.View, TeamMembershipAdapterMvp.Host {
private static final String TAG = TeamMembershipFragment.class.getSimpleName();
private static final String IS_TEAM_LEAD_ARG = "is_team_lead";
private TeamMembershipMvp.Host host;
private Team team;
private Event event;
private boolean isTeamLead = false;
private TeamMembershipMvp.Presenter presenter;
private RecyclerView teamMembershipRecyclerView = null;
private TeamMembershipAdapter teamMembershipAdapter = null;
private View settingsTeamProfileContainer;
private View settingsTeamMembershipContainer;
private View settingsTeamInviteContainer;
private View deleteTeamContainer;
private MenuItem teamEditMenuEtem;
private boolean inEditMode = false;
AlertDialog editTeamNameDialogBuilder;
View editTeamDialogView = null;
TextView teamNameTv;
EditTextDialogListener editTeamNameDialogListener = new EditTextDialogListener() {
@Override
public void onDialogDone(@NotNull String newName) {
TextView editTeamteamNameTv = teamNameTv;
editTeamteamNameTv.setText(newName);
DataHolder.updateParticipantTeamName(newName);
}
@Override
public void onDialogCancel() {
}
};
private View.OnClickListener onClickListener = new View.OnClickListener() {
@Override
public void onClick(View view) {
switch (view.getId()) {
case R.id.team_invite_chevron:
inviteTeamMembers();
break;
case R.id.delete_team:
confirmDeleteTeam();
break;
case R.id.team_name:
editTeamName();
break;
}
}
};
public static TeamMembershipFragment getInstance(boolean isTeamLead) {
TeamMembershipFragment fragment = new TeamMembershipFragment();
Bundle args = new Bundle();
args.putBoolean(IS_TEAM_LEAD_ARG, isTeamLead);
fragment.setArguments(args);
return fragment;
}
@Override
public void onAttach(Context context) {
super.onAttach(context);
if (context instanceof TeamMembershipMvp.Host) {
host = (TeamMembershipMvp.Host) context;
} else {
throw new RuntimeException(context.toString()
+ " must implement TeamChallengeProgressMvp.Host");
}
}
@Override
public void onDetach() {
super.onDetach();
host = null;
}
@Override
public void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setHasOptionsMenu(true);
editTeamNameDialogBuilder = new AlertDialog.Builder(getContext()).create();
}
@Nullable
@Override
public View onCreateView(@NonNull LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) {
Bundle bundle = getArguments();
if (bundle != null) {
isTeamLead = bundle.getBoolean(IS_TEAM_LEAD_ARG);
}
View fragmentView = inflater.inflate(R.layout.fragment_team_membership, container, false);
return fragmentView;
}
@Override
public void onViewCreated(@NonNull View view, @Nullable Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
team = getParticipantTeam();
event = getEvent();
host.setToolbarTitle(getString(R.string.settings_team_membership_title), true);
presenter = new TeamMembershipPresenter(this);
setupView(view);
}
@Override
public void onCreateOptionsMenu(@NonNull Menu menu, @NonNull MenuInflater menuInflater) {
menuInflater.inflate(R.menu.menu_team_edit, menu);
teamEditMenuEtem = menu.findItem(R.id.menu_item_team_edit);
if (team != null) {
if (teamEditMenuEtem != null) teamEditMenuEtem.setVisible(isTeamLead);
}
super.onCreateOptionsMenu(menu, menuInflater);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
boolean handled = super.onOptionsItemSelected(item);
if (!handled) {
switch (item.getItemId()) {
case android.R.id.home:
closeView();
handled = true;
break;
case R.id.menu_item_team_edit:
inEditMode = !inEditMode;
teamEditMenuEtem.setTitle(inEditMode ?
getString(R.string.team_edit_done_title) : getString(R.string.team_edit_start_title));
teamMembershipAdapter.updateEditMode(inEditMode);
default:
break;
}
}
return handled;
}
@Override
public void onStart() {
super.onStart();
refreshTeamParticipantsList();
}
@Override
public void onStop() {
super.onStop();
presenter.onStop();
editTeamNameDialogBuilder = null;
}
void refreshTeamParticipantsList() {
teamMembershipAdapter.clearSelectionPosition();
teamMembershipRecyclerView.scrollToPosition(0);
teamMembershipAdapter.updateParticipantsData(team.getParticipants());
}
void setupView(View fragmentView) {
settingsTeamProfileContainer = fragmentView.findViewById(R.id.settings_team_profile_container);
settingsTeamMembershipContainer = fragmentView.findViewById(R.id.settings_team_membership_container);
settingsTeamInviteContainer = fragmentView.findViewById(R.id.settings_team_invite_container);
deleteTeamContainer = fragmentView.findViewById(R.id.settings_delete_team_container);
setupSettingsTeamProfileContainer(settingsTeamProfileContainer);
setupSettingsTeamMembershipContainer(settingsTeamMembershipContainer);
setupChallengeTeamInviteContainer(settingsTeamInviteContainer);
setupDeleteTeamContainer(deleteTeamContainer);
}
void setupSettingsTeamProfileContainer(View container) {
ImageView teamProfileImage = container.findViewById(R.id.team_image);
teamNameTv = container.findViewById(R.id.team_name);
TextView challengeNameTv = container.findViewById(R.id.challenge_name);
TextView challengeDatesTv = container.findViewById(R.id.challenge_dates);
Event event = getEvent();
challengeNameTv.setText(event.getName());
SimpleDateFormat sdf = new SimpleDateFormat("MMM d, yyyy");
String startDate = sdf.format(event.getStartDate());
String endDate = sdf.format(event.getEndDate());
challengeDatesTv.setText(startDate + " to " + endDate);
//TODO: remove this when new date for challenge is decided
if (isProdBackend() && Constants.getChallengeStartSoonMessage()) {
challengeDatesTv.setText(getString(R.string.message_journey_starting_soon));
}
Team team = getParticipantTeam();
teamNameTv.setText(team.getName());
if (isTeamLead) {
teamNameTv.setOnClickListener(onClickListener);
} else {
teamNameTv.setOnClickListener(null);
}
String teamImageUrl = team.getImage();
if (teamImageUrl != null && !teamImageUrl.isEmpty()) {
Log.d(TAG, "teamImageUrl=" + teamImageUrl);
Glide.with(getContext())
.load(teamImageUrl)
.apply(RequestOptions.circleCropTransform())
.into(teamProfileImage);
}
}
void setupSettingsTeamMembershipContainer(View container) {
if (teamMembershipAdapter == null) {
String teamLeadParticipantId = "";
if (team != null) {
isTeamLead = team.isTeamLeader(SharedPreferencesUtil.getMyParticipantId());
teamLeadParticipantId = team.getLeaderId();
}
teamMembershipAdapter = new TeamMembershipAdapter(this,
event.getTeamLimit(),
SharedPreferencesUtil.getMyParticipantId(),
teamLeadParticipantId,
isTeamLead,
event.hasChallengeStarted()
);
}
teamMembershipRecyclerView = container.findViewById(R.id.team_members_list);
teamMembershipRecyclerView.setLayoutManager(new LinearLayoutManager(getContext()));
teamMembershipRecyclerView.addItemDecoration(new DividerItemDecoration(getContext(),
DividerItemDecoration.VERTICAL));
teamMembershipRecyclerView.addItemDecoration(new ListPaddingDecoration(getContext()));
teamMembershipRecyclerView.setAdapter(teamMembershipAdapter);
}
void setupChallengeTeamInviteContainer(View container) {
TextView inviteLabel = container.findViewById(R.id.team_invite_label);
boolean showTeamInvite = false;
if (isTeamLead && event != null) {
if (event.daysToStartEvent() >= 0 && !event.hasTeamBuildingEnded()) {
List<Participant> participants = team.getParticipants();
if (participants != null) {
int openSlots = event.getTeamLimit() - participants.size();
if (openSlots > 0) {
showTeamInvite = true;
String openSlotMessage = getResources().getQuantityString(R.plurals.team_invite_more_members_message, openSlots, openSlots);
inviteLabel.setText(openSlotMessage);
}
}
}
}
if (showTeamInvite) {
View image = container.findViewById(R.id.team_invite_chevron);
expandViewHitArea(image, container);
image.setOnClickListener(onClickListener);
}
container.setVisibility(showTeamInvite ? View.VISIBLE : View.GONE);
}
void setupDeleteTeamContainer(View container) {
if (isTeamLead) {
container.setVisibility(View.VISIBLE);
container.findViewById(R.id.delete_team).setOnClickListener(onClickListener);
} else {
container.setVisibility(View.GONE);
container.findViewById(R.id.delete_team).setOnClickListener(null);
}
}
void closeView() {
getActivity().onBackPressed();
}
@Override
public void removeMemberFromTeam(String participantName, String participantId) {
confirmToRemoveTeamMember(participantName, participantId);
}
@Override
public void participantRemovedFromTeam(String participantId) {
Iterator participantIterator = team.getParticipants().iterator();
while (participantIterator.hasNext()) {
Participant participant = (Participant) participantIterator.next();
if (participant.getParticipantId().equals(participantId)) {
participantIterator.remove();
refreshTeamParticipantsList();
setupChallengeTeamInviteContainer(settingsTeamInviteContainer);
break;
}
}
}
public void confirmToRemoveTeamMember(String participantName, final String participantId) {
final AlertDialog dialogBuilder = new AlertDialog.Builder(getContext()).create();
LayoutInflater inflater = this.getLayoutInflater();
View dialogView = inflater.inflate(R.layout.view_remove_team_member, null);
TextView title = dialogView.findViewById(R.id.remove_team_member_title);
title.setText(getString(R.string.remove_team_member_title, participantName));
TextView message = dialogView.findViewById(R.id.remove_team_member_message);
message.setText(getString(R.string.remove_team_member_message, participantName));
Button removeBtn = dialogView.findViewById(R.id.remove_team_member_button);
Button cancelBtn = dialogView.findViewById(R.id.cancel_remove_team_member_button);
cancelBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
dialogBuilder.dismiss();
}
});
removeBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
presenter.removeMemberFromTeam(participantId);
dialogBuilder.dismiss();
}
});
dialogBuilder.setView(dialogView);
dialogBuilder.show();
}
void confirmDeleteTeam() {
final AlertDialog dialogBuilder = new AlertDialog.Builder(getContext()).create();
LayoutInflater inflater = this.getLayoutInflater();
View dialogView = inflater.inflate(R.layout.view_confirm_delete_team, null);
Button deleteBtn = dialogView.findViewById(R.id.delete_team_button);
Button cancelBtn = dialogView.findViewById(R.id.cancel_delete_team_button);
cancelBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
dialogBuilder.dismiss();
}
});
deleteBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
presenter.deleteTeam(team.getId());
dialogBuilder.dismiss();
}
});
dialogBuilder.setView(dialogView);
dialogBuilder.show();
}
@Override
public void onTeamDeleteSuccess() {
SharedPreferencesUtil.clearMyTeamId();
clearCacheTeamList();
clearCachedParticipantTeam();
clearCachedParticipant();
host.restartHomeActivity();
}
@Override
public void onTeamDeleteError(Throwable error) {
showError("Team Delete Error", error.getMessage(), null);
}
public void editTeamName() {
final TextView teamNameTV = settingsTeamProfileContainer.findViewById(R.id.team_name);
final String currentTeamName = teamNameTV.getText().toString();
presenter.onEditTeamName(presenter, currentTeamName, editTeamNameDialogListener);
}
@Override
public void showTeamNameEditDialog(final TeamMembershipMvp.Presenter presenter,
final String currentName,
final EditTextDialogListener editTextDialogListener) {
LayoutInflater inflater = this.getLayoutInflater();
editTeamDialogView = inflater.inflate(R.layout.view_team_name_edit, null);
final Button saveBtn = editTeamDialogView.findViewById(R.id.save);
final Button cancelBtn = editTeamDialogView.findViewById(R.id.cancel);
final TextInputLayout teamNameInputLayout = editTeamDialogView.findViewById(R.id.team_name_input_layout);
final TextInputEditText teamNameEditText = editTeamDialogView.findViewById(R.id.team_name);
TextWatcher editTeamNameWatcher = new TextWatcher() {
@Override
public void beforeTextChanged(CharSequence charSequence, int start, int before, int count) {
}
@Override
public void onTextChanged(CharSequence charSequence, int start, int before, int count) {
boolean enabled = false;
String teamName = teamNameEditText.getText().toString();
if (teamName.trim().length() >= Constants.MIN_TEAM_NAME_CHAR_LENGTH) {
if (!teamName.equals(currentName)) {
enabled = true;
}
}
saveBtn.setEnabled(enabled);
}
@Override
public void afterTextChanged(Editable editable) {
}
};
teamNameEditText.setText(currentName);
teamNameEditText.addTextChangedListener(editTeamNameWatcher);
//disable the saveBtn initially. It will be enabled when team name entered
boolean enabled = false;
String teamName = teamNameEditText.getText().toString();
if (teamName.trim().length() >= Constants.MIN_TEAM_NAME_CHAR_LENGTH) {
if (!teamName.equals(currentName)) {
enabled = true;
}
}
saveBtn.setEnabled(enabled);
cancelBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
editTeamNameDialogBuilder.dismiss();
editTeamNameDialogBuilder.setView(null);
if (editTextDialogListener != null) {
editTextDialogListener.onDialogCancel();
}
}
});
saveBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
String newName = teamNameEditText.getText().toString();
if (!newName.isEmpty() && !newName.equals(currentName)) {
presenter.updateTeamName(team.getId(), newName);
}
}
});
editTeamNameDialogBuilder.setView(editTeamDialogView);
editTeamNameDialogBuilder.show();
}
@Override
public void onTeamNameUpdateSuccess(String teamName) {
editTeamNameDialogBuilder.dismiss();
if (editTeamNameDialogListener != null) {
editTeamNameDialogListener.onDialogDone(teamName);
}
}
@Override
public void onTeamNameUpdateConstraintError(String teamName) {
if (editTeamDialogView != null) {
TextInputLayout teamNameInputLayout = editTeamDialogView.findViewById(R.id.team_name_input_layout);
if (teamNameInputLayout != null) {
teamNameInputLayout.setError(getString(R.string.duplicate_team_name_error));
}
}
}
@Override
public void onTeamNameUpdateError(@NotNull Throwable error) {
if (error instanceof IOException) {
showNetworkErrorMessage(R.string.events_data_error);
}
else {
TextInputLayout teamNameInputLayout = editTeamDialogView.findViewById(R.id.team_name_input_layout);
if (teamNameInputLayout != null) {
teamNameInputLayout.setError(error.getMessage());
}
}
}
} |
ThePantsThief/SnapchatKit | Pod/Classes/Model/SKErrorPacket.h | <filename>Pod/Classes/Model/SKErrorPacket.h
//
// SKErrorPacket.h
// Pods
//
// Created by Tanner on 1/3/16.
//
//
#import "SKPacket.h"
@interface SKErrorPacket : SKPacket
@property (nonatomic, readonly) NSString *errorIdentifier;
@property (nonatomic, readonly) NSString *message;
@end
|
iambus/xquery-b | src/main/java/org/libj/xquery/compiler/Symbol.java | package org.libj.xquery.compiler;
public class Symbol {
private String name;
private int index;
private Class type;
public Symbol(String name, int index, Class type) {
this.name = name;
this.index = index;
this.type = type;
}
public Symbol(String name, int index) {
this(name, index, Object.class);
}
public String getName() {
return name;
}
public int getIndex() {
return index;
}
public Class getType() {
return type;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.