answer
stringlengths
17
10.2M
package cx2x.xcodeml.xnode; import org.w3c.dom.Element; import org.w3c.dom.Node; import java.util.*; /** * The XtypeTable represents the typeTable (3.1) element in XcodeML intermediate * representation. * * Elements: ( FbasicType | FfunctionType | FstructType ) * * - Optional: * - FbasicType (XbasicType) * - FfunctionType (XfunctionType) * - FstructType (XstructType) * * @author clementval */ public class XtypeTable extends Xnode { private static final int HASH_LENGTH = 12; private static final String ARRAY_HASH_PREFIX = "A"; private static final String CHAR_HASH_PREFIX = "C"; private static final String COMPLEX_HASH_PREFIX = "P"; private static final String FCT_HASH_PREFIX = "F"; private static final String INT_HASH_PREFIX = "I"; private static final String LOGICAL_HASH_PREFIX = "L"; private static final String REAL_HASH_PREFIX = "R"; private final Map<String, Xtype> _table; /** * Element standard ctor. Pass the base element to the base class and read * inner information (elements and attributes). * @param baseElement The root of the element. */ public XtypeTable(Element baseElement){ super(baseElement); _table = new LinkedHashMap<>(); readTable(); } /** * Read the type table. */ private void readTable(){ Node crtNode = _baseElement.getFirstChild(); while(crtNode != null){ if (crtNode.getNodeType() == Node.ELEMENT_NODE) { Element el = (Element)crtNode; switch (el.getTagName()){ case Xname.BASIC_TYPE: XbasicType bt = new XbasicType(el); _table.put(bt.getType(), bt); break; case Xname.F_FUNCTION_TYPE: XfunctionType ft = new XfunctionType(el); _table.put(ft.getType(), ft); break; case Xname.F_STRUCT_TYPE: // TODO create XstructType object and insert it in the table break; } } crtNode = crtNode.getNextSibling(); } } /** * Get number of elements in the type table. * @return Number of elements in the table. */ public int count(){ return _table.size(); } /** * Add a new element in the type table. * @param type The new type to be added. */ public void add(Xtype type){ _baseElement.appendChild(type.cloneNode()); _table.put(type.getAttribute(Xattr.TYPE), type); } /** * Get an element from the type table. * @param type Type of the element to be returned. * @return Xtype object if found in the table. Null otherwise. */ public Xtype get(String type) { if(_table.containsKey(type)){ return _table.get(type); } return null; } /** * Check if a type is present in the type table * @param type Type of the element to be checked. * @return True if the element is present. False otherwise. */ public boolean hasType(String type){ return _table.containsKey(type); } /** * Get a new unique function hash for the type table. * @return New unique fct type hash. */ public String generateFctTypeHash(){ return generateHash(FCT_HASH_PREFIX); } /** * Get a new unique integer hash for the type table. * @return New unique integer type hash. */ public String generateIntegerTypeHash(){ return generateHash(INT_HASH_PREFIX); } /** * Get a new unique real hash for the type table. * @return New unique real type hash. */ public String generateRealTypeHash(){ return generateHash(REAL_HASH_PREFIX); } /** * Get a new unique array hash for the type table. * @return New unique array type hash. */ public String generateArrayTypeHash(){ return generateHash(ARRAY_HASH_PREFIX); } /** * Get a new unique character hash for the type table. * @return New unique character type hash. */ public String generateCharTypeHash(){ return generateHash(CHAR_HASH_PREFIX); } /** * Get a new unique logical hash for the type table. * @return New unique logical type hash. */ public String generateLogicalTypeHash(){ return generateHash(LOGICAL_HASH_PREFIX); } /** * Get a new unique complex hash for the type table. * @return New unique complex type hash. */ public String generateComplexTypeHash(){ return generateHash(COMPLEX_HASH_PREFIX); } /** * Get a new unique hash for the type table with the given prefix. * @param prefix Prefix added to the hash string. * @return New unique hash. */ private String generateHash(String prefix){ String hash; do { hash = prefix + generateHash(HASH_LENGTH); } while(_table.containsKey(hash)); return hash; } /** * Generate a new unique type hash for the table. * @param length Length of the hash string to be generated. * @return The new unique hash. */ private String generateHash(int length){ Random r = new Random(); StringBuilder sb = new StringBuilder(); while(sb.length() < length){ sb.append(Integer.toHexString(r.nextInt())); } return sb.toString().substring(0, length); } /** * Returns a Collection view of the values contained in this XtypeTable. * @return A view of the values contained in this map */ public Collection<Xtype> values(){ return _table.values(); } @Override public XtypeTable cloneObject() { Element clone = (Element)cloneNode(); return new XtypeTable(clone); } }
package net.md_5.bungee; import com.google.common.base.Preconditions; import com.google.common.base.Predicate; import com.google.common.collect.Iterables; import com.google.common.collect.Sets; import com.google.gson.GsonBuilder; import net.md_5.bungee.api.Favicon; import net.md_5.bungee.api.ServerPing; import net.md_5.bungee.module.ModuleManager; import com.google.common.io.ByteStreams; import net.md_5.bungee.api.chat.BaseComponent; import net.md_5.bungee.api.chat.TextComponent; import net.md_5.bungee.chat.ComponentSerializer; import net.md_5.bungee.log.BungeeLogger; import net.md_5.bungee.scheduler.BungeeScheduler; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.gson.Gson; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.Channel; import io.netty.channel.ChannelException; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; import io.netty.util.ResourceLeakDetector; import net.md_5.bungee.conf.Configuration; import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.net.InetSocketAddress; import java.text.MessageFormat; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Locale; import java.util.Map; import java.util.MissingResourceException; import java.util.ResourceBundle; import java.util.Timer; import java.util.TimerTask; import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.logging.Handler; import java.util.logging.Level; import java.util.logging.Logger; import jline.UnsupportedTerminal; import jline.console.ConsoleReader; import jline.internal.Log; import lombok.Getter; import lombok.Setter; import lombok.Synchronized; import net.md_5.bungee.api.CommandSender; import net.md_5.bungee.api.ProxyServer; import net.md_5.bungee.api.ReconnectHandler; import net.md_5.bungee.api.config.ConfigurationAdapter; import net.md_5.bungee.api.config.ListenerInfo; import net.md_5.bungee.api.config.ServerInfo; import net.md_5.bungee.api.connection.ProxiedPlayer; import net.md_5.bungee.api.plugin.Plugin; import net.md_5.bungee.api.plugin.PluginManager; import net.md_5.bungee.api.tab.CustomTabList; import net.md_5.bungee.command.*; import net.md_5.bungee.conf.YamlConfig; import net.md_5.bungee.log.LoggingOutputStream; import net.md_5.bungee.netty.PipelineUtils; import net.md_5.bungee.protocol.DefinedPacket; import net.md_5.bungee.protocol.Protocol; import net.md_5.bungee.protocol.ProtocolConstants; import net.md_5.bungee.protocol.packet.Chat; import net.md_5.bungee.protocol.packet.PluginMessage; import net.md_5.bungee.query.RemoteQuery; import net.md_5.bungee.tab.Custom; import net.md_5.bungee.util.CaseInsensitiveMap; import org.fusesource.jansi.AnsiConsole; /** * Main BungeeCord proxy class. */ public class BungeeCord extends ProxyServer { /** * Current operation state. */ public volatile boolean isRunning; /** * Configuration. */ @Getter public final Configuration config = new Configuration(); /** * Localization bundle. */ public ResourceBundle bundle; public EventLoopGroup eventLoops; /** * locations.yml save thread. */ private final Timer saveThread = new Timer( "Reconnect Saver" ); private final Timer metricsThread = new Timer( "Metrics Thread" ); /** * Server socket listener. */ private Collection<Channel> listeners = new HashSet<>(); /** * Fully qualified connections. */ private final Map<String, UserConnection> connections = new CaseInsensitiveMap<>(); private final ReadWriteLock connectionLock = new ReentrantReadWriteLock(); /** * Plugin manager. */ @Getter public final PluginManager pluginManager = new PluginManager( this ); @Getter @Setter private ReconnectHandler reconnectHandler; @Getter @Setter private ConfigurationAdapter configurationAdapter = new YamlConfig(); private final Collection<String> pluginChannels = new HashSet<>(); @Getter private final File pluginsFolder = new File( "plugins" ); @Getter private final BungeeScheduler scheduler = new BungeeScheduler(); @Getter private ConsoleReader consoleReader; @Getter private final Logger logger; public final Gson gson = new GsonBuilder() .registerTypeAdapter( ServerPing.PlayerInfo.class, new PlayerInfoSerializer( ProtocolConstants.MINECRAFT_1_7_6 ) ) .registerTypeAdapter( Favicon.class, Favicon.getFaviconTypeAdapter() ).create(); public final Gson gsonLegacy = new GsonBuilder() .registerTypeAdapter( ServerPing.PlayerInfo.class, new PlayerInfoSerializer( ProtocolConstants.MINECRAFT_1_7_2 ) ) .registerTypeAdapter( Favicon.class, Favicon.getFaviconTypeAdapter() ).create(); @Getter private ConnectionThrottle connectionThrottle; private final ModuleManager moduleManager = new ModuleManager(); { // TODO: Proper fallback when we interface the manager getPluginManager().registerCommand( null, new CommandReload() ); getPluginManager().registerCommand( null, new CommandEnd() ); getPluginManager().registerCommand( null, new CommandIP() ); getPluginManager().registerCommand( null, new CommandBungee() ); getPluginManager().registerCommand( null, new CommandPerms() ); registerChannel( "BungeeCord" ); } public static BungeeCord getInstance() { return (BungeeCord) ProxyServer.getInstance(); } public BungeeCord() throws IOException { try { bundle = ResourceBundle.getBundle( "messages" ); } catch ( MissingResourceException ex ) { bundle = ResourceBundle.getBundle( "messages", Locale.ENGLISH ); } Log.setOutput( new PrintStream( ByteStreams.nullOutputStream() ) ); // TODO: Bug JLine AnsiConsole.systemInstall(); consoleReader = new ConsoleReader(); consoleReader.setExpandEvents( false ); logger = new BungeeLogger( this ); System.setErr( new PrintStream( new LoggingOutputStream( logger, Level.SEVERE ), true ) ); System.setOut( new PrintStream( new LoggingOutputStream( logger, Level.INFO ), true ) ); if ( consoleReader.getTerminal() instanceof UnsupportedTerminal ) { logger.info( "Unable to initialize fancy terminal. To fix this on Windows, install the correct Microsoft Visual C++ 2008 Runtime" ); logger.info( "NOTE: This error is non crucial, and BungeeCord will still function correctly! Do not bug the author about it unless you are still unable to get it working" ); } if ( NativeCipher.load() ) { logger.info( "Using OpenSSL based native cipher." ); } else { logger.info( "Using standard Java JCE cipher. To enable the OpenSSL based native cipher, please make sure you are using 64 bit Ubuntu or Debian with libssl installed." ); } } /** * Start this proxy instance by loading the configuration, plugins and * starting the connect thread. * * @throws Exception */ @Override public void start() throws Exception { System.setProperty( "java.net.preferIPv4Stack", "true" ); // Minecraft does not support IPv6 System.setProperty( "io.netty.selectorAutoRebuildThreshold", "0" ); // Seems to cause Bungee to stop accepting connections ResourceLeakDetector.setEnabled( false ); // Eats performance eventLoops = PipelineUtils.newEventLoopGroup( 0, new ThreadFactoryBuilder().setNameFormat( "Netty IO Thread #%1$d" ).build() ); File moduleDirectory = new File( "modules" ); moduleManager.load( this, moduleDirectory ); pluginManager.detectPlugins( moduleDirectory ); pluginsFolder.mkdir(); pluginManager.detectPlugins( pluginsFolder ); pluginManager.loadPlugins(); config.load(); isRunning = true; pluginManager.enablePlugins(); connectionThrottle = new ConnectionThrottle( config.getThrottle() ); startListeners(); saveThread.scheduleAtFixedRate( new TimerTask() { @Override public void run() { if ( getReconnectHandler() != null ) { getReconnectHandler().save(); } } }, 0, TimeUnit.MINUTES.toMillis( 5 ) ); metricsThread.scheduleAtFixedRate( new Metrics(), 0, TimeUnit.MINUTES.toMillis( Metrics.PING_INTERVAL ) ); } public void startListeners() { for ( final ListenerInfo info : config.getListeners() ) { ChannelFutureListener listener = new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if ( future.isSuccess() ) { listeners.add( future.channel() ); getLogger().info( "Listening on " + info.getHost() ); } else { getLogger().log( Level.WARNING, "Could not bind to host " + info.getHost(), future.cause() ); } } }; new ServerBootstrap() .channel( PipelineUtils.getServerChannel() ) .option( ChannelOption.SO_REUSEADDR, true ) .childAttr( PipelineUtils.LISTENER, info ) .childHandler( PipelineUtils.SERVER_CHILD ) .group( eventLoops ) .localAddress( info.getHost() ) .bind().addListener( listener ); if ( info.isQueryEnabled() ) { ChannelFutureListener bindListener = new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if ( future.isSuccess() ) { listeners.add( future.channel() ); getLogger().info( "Started query on " + future.channel().localAddress() ); } else { getLogger().log( Level.WARNING, "Could not bind to host " + info.getHost(), future.cause() ); } } }; new RemoteQuery( this, info ).start( PipelineUtils.getDatagramChannel(), new InetSocketAddress( info.getHost().getAddress(), info.getQueryPort() ), eventLoops, bindListener ); } } } public void stopListeners() { for ( Channel listener : listeners ) { getLogger().log( Level.INFO, "Closing listener {0}", listener ); try { listener.close().syncUninterruptibly(); } catch ( ChannelException ex ) { getLogger().severe( "Could not close listen thread" ); } } listeners.clear(); } @Override public void stop() { new Thread( "Shutdown Thread" ) { @Override public void run() { BungeeCord.this.isRunning = false; stopListeners(); getLogger().info( "Closing pending connections" ); connectionLock.readLock().lock(); try { getLogger().info( "Disconnecting " + connections.size() + " connections" ); for ( UserConnection user : connections.values() ) { user.disconnect( getTranslation( "restart" ) ); } } finally { connectionLock.readLock().unlock(); } getLogger().info( "Closing IO threads" ); eventLoops.shutdownGracefully(); try { eventLoops.awaitTermination( Long.MAX_VALUE, TimeUnit.NANOSECONDS ); } catch ( InterruptedException ex ) { } if ( reconnectHandler != null ) { getLogger().info( "Saving reconnect locations" ); reconnectHandler.save(); reconnectHandler.close(); } saveThread.cancel(); metricsThread.cancel(); // TODO: Fix this shit getLogger().info( "Disabling plugins" ); for ( Plugin plugin : pluginManager.getPlugins() ) { try { plugin.onDisable(); for ( Handler handler : plugin.getLogger().getHandlers() ) { handler.close(); } } catch ( Throwable t ) { getLogger().severe( "Exception disabling plugin " + plugin.getDescription().getName() ); t.printStackTrace(); } getScheduler().cancel( plugin ); } scheduler.shutdown(); getLogger().info( "Thank you and goodbye" ); // Need to close loggers after last message! for ( Handler handler : getLogger().getHandlers() ) { handler.close(); } System.exit( 0 ); } }.start(); } /** * Broadcasts a packet to all clients that is connected to this instance. * * @param packet the packet to send */ public void broadcast(DefinedPacket packet) { connectionLock.readLock().lock(); try { for ( UserConnection con : connections.values() ) { con.unsafe().sendPacket( packet ); } } finally { connectionLock.readLock().unlock(); } } @Override public String getName() { return "BungeeCord"; } @Override public String getVersion() { return ( BungeeCord.class.getPackage().getImplementationVersion() == null ) ? "unknown" : BungeeCord.class.getPackage().getImplementationVersion(); } @Override public String getTranslation(String name, Object... args) { String translation = "<translation '" + name + "' missing>"; try { translation = MessageFormat.format( bundle.getString( name ), args ); } catch ( MissingResourceException ex ) { } return translation; } @Override @SuppressWarnings("unchecked") public Collection<ProxiedPlayer> getPlayers() { connectionLock.readLock().lock(); try { return Collections.unmodifiableCollection( new HashSet( connections.values() ) ); } finally { connectionLock.readLock().unlock(); } } @Override public int getOnlineCount() { return connections.size(); } @Override public ProxiedPlayer getPlayer(String name) { connectionLock.readLock().lock(); try { return connections.get( name ); } finally { connectionLock.readLock().unlock(); } } public ProxiedPlayer getPlayer(UUID uuid) { connectionLock.readLock().lock(); try { for ( ProxiedPlayer proxiedPlayer : connections.values() ) { if ( proxiedPlayer.getUniqueId().equals( uuid ) ) { return proxiedPlayer; } } return null; } finally { connectionLock.readLock().unlock(); } } @Override public Map<String, ServerInfo> getServers() { return config.getServers(); } @Override public ServerInfo getServerInfo(String name) { return getServers().get( name ); } @Override @Synchronized("pluginChannels") public void registerChannel(String channel) { pluginChannels.add( channel ); } @Override @Synchronized("pluginChannels") public void unregisterChannel(String channel) { pluginChannels.remove( channel ); } @Override @Synchronized("pluginChannels") public Collection<String> getChannels() { return Collections.unmodifiableCollection( pluginChannels ); } public PluginMessage registerChannels() { return new PluginMessage( "REGISTER", Util.format( pluginChannels, "\00" ).getBytes() ); } @Override public int getProtocolVersion() { return Protocol.supportedVersions.get( Protocol.supportedVersions.size() - 1 ); } @Override public String getGameVersion() { return "1.7.9"; } @Override public ServerInfo constructServerInfo(String name, InetSocketAddress address, String motd, boolean restricted) { return new BungeeServerInfo( name, address, motd, restricted ); } @Override public CommandSender getConsole() { return ConsoleCommandSender.getInstance(); } @Override public void broadcast(String message) { broadcast( TextComponent.fromLegacyText( message ) ); } @Override public void broadcast(BaseComponent... message) { getConsole().sendMessage( BaseComponent.toLegacyText( message ) ); broadcast( new Chat( ComponentSerializer.toString( message ) ) ); } @Override public void broadcast(BaseComponent message) { getConsole().sendMessage( message.toLegacyText() ); broadcast( new Chat( ComponentSerializer.toString( message ) ) ); } public void addConnection(UserConnection con) { connectionLock.writeLock().lock(); try { connections.put( con.getName(), con ); } finally { connectionLock.writeLock().unlock(); } } public void removeConnection(UserConnection con) { connectionLock.writeLock().lock(); try { connections.remove( con.getName() ); } finally { connectionLock.writeLock().unlock(); } } @Override public CustomTabList customTabList(ProxiedPlayer player) { return new Custom( player ); } @Override public Collection<String> getDisabledCommands() { return config.getDisabledCommands(); } @Override public Collection<ProxiedPlayer> matchPlayer(final String partialName) { Preconditions.checkNotNull( partialName, "partialName" ); ProxiedPlayer exactMatch = getPlayer( partialName ); if ( exactMatch != null ) { return Collections.singleton( exactMatch ); } return Sets.newHashSet( Iterables.find( getPlayers(), new Predicate<ProxiedPlayer>() { @Override public boolean apply(ProxiedPlayer input) { return input.getName().toLowerCase().contains( partialName.toLowerCase() ); } } ) ); } }
package api.web.gw2.mapping.v2.wvw.objectives; import api.web.gw2.mapping.core.Coord2DValue; import api.web.gw2.mapping.core.Coord3DValue; import api.web.gw2.mapping.v2.wvw.MapType; import api.web.gw2.mapping.core.IdValue; import api.web.gw2.mapping.core.LocalizedResource; import api.web.gw2.mapping.core.OptionalValue; import api.web.gw2.mapping.core.Point2D; import api.web.gw2.mapping.core.Point3D; import api.web.gw2.mapping.core.URLReference; import api.web.gw2.mapping.core.URLValue; import api.web.gw2.mapping.v2.APIv2; @APIv2(endpoint = "v2/wvw/objectives") // NOI18N. public interface Objective { /** * Gets the id of this objective. * @return A {@code String} instance, never {@code null}. */ @IdValue(flavor = IdValue.Flavor.STRING) String getId(); /** * Gets the i18n abstract name of this objective. * <br>Note: this may not be the same as the name in game. * @return A {@code String} instance, never {@code null}. */ @LocalizedResource String getName(); /** * Gets the id of the sector containing this objective. * @return An {@code int} &gt; 0. */ @IdValue int getSectorId(); /** * Gets the type of this objective. * @return An {@code ObjectiveType} instance, never {@code null}. */ ObjectiveType getType(); /** * Gets the map type of this objective. * @return An {@code MapType} instance, never {@code null}. */ MapType getMapType(); /** * Gets the if of the map of this objective. * @return An {@code int}. */ @IdValue int getMapId(); /** * Gets the coordinates of this objective. * @return An {@code Point3D} instance, never {@code null}. */ @Coord3DValue Point3D getCoord(); /** * Gets the coordinates of of the label for this objective. * @return An {@code Point2D} instance, never {@code null}. */ @Coord2DValue Point2D getLabelCoord(); /** * Gets the URL to the marker icon for this objective. * @return A {@code URLReference} instance, never {@code null}. */ @URLValue URLReference getMarker(); /** * Gets the chat link needed to pass this objective to other players. * @return A {@code String} instance, never {@code null}. * <br>Old JSON files from earlier versions of the API or objectives that * cannot be linked may return an empty {@code string}. */ @OptionalValue String getChatLink(); }
package backupbuddies.gui; import java.awt.*; import java.awt.event.*; import javax.swing.*; import java.util.Map; import java.util.HashMap; import java.lang.*; //do not import util.* //there is a Timer class in util and swing that conflict //currently using swing timer import backupbuddies.shared.Interface; import static backupbuddies.Debug.*; @SuppressWarnings("serial") public class GuiMain extends JFrame { //load assets, lists etc before creating the gui static JFrame frame; static JTextField saveDir = new JTextField(); static final DefaultListModel<String> userModel = new DefaultListModel<String>(); static final DefaultListModel<String> fileModel = new DefaultListModel<String>(); static ImageIcon statusRed = new ImageIcon("gui/assets/RedCircle.png"); static ImageIcon statusYellow = new ImageIcon("gui/assets/YellowCircle.png"); static ImageIcon statusGreen = new ImageIcon("gui/assets/GreenCircle.png"); static Map<String, ImageIcon> userMap = fetchAndProcess("users"); static Map<String, ImageIcon> fileMap = fetchAndProcess("files"); //process lists returned from networking //NOTE: to speed this up we can just do it in the interface methods //iteration already occurs there public static Map<String, ImageIcon> fetchAndProcess(String type) { //get data Map<String, Integer> map = new HashMap<String, Integer>(); if (type.equals("users")) map = Interface.fetchUserList(); else if (type.equals("files")) map = Interface.fetchFileList(); //replace int with img Map<String, ImageIcon> iconMap = new HashMap<String, ImageIcon>(); for (Map.Entry<String, Integer> entry : map.entrySet()) { switch (entry.getValue()) { case 0: iconMap.put(entry.getKey(), statusRed); break; case 1: iconMap.put(entry.getKey(), statusGreen); break; case 2: iconMap.put(entry.getKey(), statusYellow); break; default: iconMap.put(entry.getKey(), statusRed); break; } } return iconMap; } //updates ui on interval public static void startIntervals(int interval) { ActionListener updateUI = new ActionListener() { public void actionPerformed(ActionEvent e) { userMap = fetchAndProcess("users"); userMap = fetchAndProcess("files"); } }; Timer timer = new Timer(interval, updateUI); timer.setRepeats(true); timer.start(); } //user chooses directory to save to public static void setSaveDir() { JFileChooser browser = new JFileChooser(); browser.setDialogTitle("choose save location"); browser.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY); browser.setAcceptAllFileFilterUsed(false); if (browser.showOpenDialog(frame) == JFileChooser.APPROVE_OPTION) { saveDir.setText(browser.getSelectedFile().toString()); Interface.testFile(saveDir.getText()); } } //user selects a file and it uploads to network public static void chooseAndUpload() { JFileChooser browser = new JFileChooser(); browser.setDialogTitle("choose file to upload"); if (browser.showOpenDialog(frame) == JFileChooser.APPROVE_OPTION) { //browser.getSelectedFile().toString() for full path w/filename //since download will be separate name and directory //might be easier to keep separate Interface.uploadFile(browser.getSelectedFile().getName(), browser.getCurrentDirectory().toString()); } } //user downloads a file to save directory (and chooses if not set) public static void setDirAndDownload() { //FIXME: need to have a list of uploaded files to choose from String fileToGet = "test.txt"; if (saveDir.getText().equals("")) { setSaveDir(); } Interface.downloadFile(fileToGet, saveDir.getText()); } //upload, download, save control buttons public static JPanel controlPanel() { //create panel JPanel controlPanel = new JPanel(); //create components JLabel fileLabel = new JLabel("backup your files"); JButton uploadButton = new JButton("upload"); JButton downloadButton = new JButton("download"); JButton pathButton = new JButton("save to..."); //bind methods to buttons uploadButton.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent e) { chooseAndUpload(); } }); pathButton.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent e) { setSaveDir(); } }); downloadButton.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent e) { setDirAndDownload(); } }); //add components to panel and specify orientation controlPanel.add(fileLabel); controlPanel.add(pathButton); controlPanel.add(uploadButton); controlPanel.add(downloadButton); controlPanel.setComponentOrientation( ComponentOrientation.LEFT_TO_RIGHT); return controlPanel; } //allows user to input ip and pass and connect to network public static JPanel loginPanel() { //create panel final JPanel loginPanel = new JPanel(); //create components final JLabel loginLabel = new JLabel("join a network:"); final JButton loginButton = new JButton("join"); final JTextField ipField = new JTextField("network ip"); final JTextField passField = new JTextField("network password"); //bind methods to buttons loginButton.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent e) { Interface.login(ipField.getText(), passField.getText()); } }); ipField.addMouseListener(new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { ipField.setText(""); } }); passField.addMouseListener(new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { passField.setText(""); } }); //add components to panel and specify orientation loginPanel.add(loginLabel); loginPanel.add(ipField); loginPanel.add(passField); loginPanel.add(loginButton); loginPanel.setComponentOrientation( ComponentOrientation.LEFT_TO_RIGHT); return loginPanel; } public static class UserListRenderer extends DefaultListCellRenderer { @Override public Component getListCellRendererComponent(JList list, Object value, int index, boolean isSelected, boolean cellHasFocus) { JLabel label = (JLabel)super.getListCellRendererComponent( list, value, index, isSelected, cellHasFocus); label.setIcon(userMap.get((String)value)); label.setHorizontalTextPosition(JLabel.RIGHT); return label; } } public static class FileListRenderer extends DefaultListCellRenderer { @Override public Component getListCellRendererComponent(JList list, Object value, int index, boolean isSelected, boolean cellHasFocus) { JLabel label = (JLabel)super.getListCellRendererComponent( list, value, index, isSelected, cellHasFocus); label.setIcon(fileMap.get((String)value)); label.setHorizontalTextPosition(JLabel.RIGHT); return label; } } //list of peers in the network //TODO: multiple selection //TODO: renders images public static JScrollPane userListPanel() { userMap = fetchAndProcess("users"); JList list = new JList(userMap.keySet().toArray()); list.setCellRenderer(new UserListRenderer()); JScrollPane pane = new JScrollPane(list); pane.setPreferredSize(new Dimension(300, 100)); return pane; } //list of files you can recover //TODO: multiple selection //TODO: renders images public static JScrollPane fileListPanel() { fileMap = fetchAndProcess("files"); JList list = new JList(fileMap.keySet().toArray()); list.setCellRenderer(new FileListRenderer()); JScrollPane pane = new JScrollPane(list); pane.setPreferredSize(new Dimension(300, 100)); return pane; } //bind panels to frame and display the gui public static void startGui() { javax.swing.SwingUtilities.invokeLater(new Runnable() { public void run() { //start those intervals startIntervals(1000); //create the window and center it on screen frame = new JFrame("BackupBuddies"); frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); frame.setResizable(false); Container contentPane = frame.getContentPane(); SpringLayout layout = new SpringLayout(); contentPane.setLayout(layout); //these values are used to center despite pack() overriding frame.setSize(700, 300); //frame.setLocationRelativeTo(null); //FIXME: migrate to SpringLayout //this uses the easy yet terrible BorderLayout to //prototype each panel //populate the window JPanel loginPanel = new JPanel(); JPanel controlPanel = new JPanel(); JScrollPane userListPanel = new JScrollPane(); JScrollPane fileListPanel = new JScrollPane(); loginPanel = loginPanel(); controlPanel = controlPanel(); userListPanel = userListPanel(); fileListPanel = fileListPanel(); contentPane.add(loginPanel); contentPane.add(controlPanel); contentPane.add(userListPanel); contentPane.add(fileListPanel); layout.putConstraint(SpringLayout.NORTH, loginPanel, 5, SpringLayout.NORTH, contentPane); layout.putConstraint(SpringLayout.WEST, loginPanel, 5, SpringLayout.WEST, contentPane); layout.putConstraint(SpringLayout.WEST, userListPanel, 5, SpringLayout.WEST, contentPane); layout.putConstraint(SpringLayout.NORTH, userListPanel, 5, SpringLayout.SOUTH, loginPanel); layout.putConstraint(SpringLayout.NORTH, controlPanel, 5, SpringLayout.SOUTH, userListPanel); layout.putConstraint(SpringLayout.WEST, controlPanel, 5, SpringLayout.WEST, contentPane); layout.putConstraint(SpringLayout.WEST, fileListPanel, 20, SpringLayout.EAST, userListPanel); layout.putConstraint(SpringLayout.NORTH, fileListPanel, 5, SpringLayout.SOUTH, loginPanel); //display the window //pack - layout manager auto sizes and auto locates //fixes size issue with insets/border of frame //aka use minimum frame size to display the content //frame.pack(); frame.setVisible(true); } }); } }
package be.ibridge.kettle.cluster; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Hashtable; import java.util.Properties; import be.ibridge.kettle.core.SharedObjects; import be.ibridge.kettle.core.database.DatabaseMeta; import be.ibridge.kettle.core.database.PartitionDatabaseMeta; import be.ibridge.kettle.core.exception.KettleXMLException; /** * The program generates a piece of XML that defines a (shared) Cluster Schema * * @author Matt * */ public class GenerateClusterSchema { public static final String PREFIX = "SLAVE_SERVER_"; public static final String PORT = "_PORT"; public static final String IP = "_IP"; /** * @param args <br> * - the properties file to read * - the shared file to write to * - the name of the cluster schema * - * @throws IOException * @throws FileNotFoundException * @throws KettleXMLException */ public static void main(String[] args) throws FileNotFoundException, IOException, KettleXMLException { Properties properties = new Properties(); properties.load(new FileInputStream(new File(args[0]))); SharedObjects sharedObjects = new SharedObjects(args[1], new ArrayList(), new Hashtable()); DatabaseMeta mysql = new DatabaseMeta("MySQL EC2", "MySQL", "JDBC", null, "test", "3306", "matt", "abcd"); ClusterSchema clusterSchema = new ClusterSchema(); clusterSchema.setName(args[2]); clusterSchema.setBasePort("40000"); clusterSchema.setSocketsBufferSize("100000"); clusterSchema.setSocketsFlushInterval("0"); clusterSchema.setSocketsCompressed(true); int max = 1; while (properties.getProperty(PREFIX+max+IP)!=null) max++; max mysql.setPartitioned(max>1); PartitionDatabaseMeta[] partDbMeta = new PartitionDatabaseMeta[max-1]; for (int i=1;i<=max;i++) { String serverIp = properties.getProperty(PREFIX+i+IP); String serverPort = properties.getProperty(PREFIX+i+PORT); if (i==1) // use the first as the master { // add the master clusterSchema.getSlaveServers().add(new SlaveServer(serverIp, serverPort, "cluster", "cluster", null, null, null, true)); mysql.setHostname(serverIp); if (max==1) // if there is just one server here, so we add a slave too besides the master { clusterSchema.getSlaveServers().add(new SlaveServer(serverIp, serverPort, "cluster", "cluster")); } } else { // Add a slave server clusterSchema.getSlaveServers().add(new SlaveServer(serverIp, serverPort, "cluster", "cluster")); // Add a db partition partDbMeta[i-2] = new PartitionDatabaseMeta("P"+i, serverIp, "3306", "test"); } } sharedObjects.storeObject(clusterSchema); mysql.setPartitioningInformation(partDbMeta); sharedObjects.storeObject(mysql); sharedObjects.saveToFile(); } }
package bzh.plealog.dbmirror.util.runner; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.InputStreamReader; import java.text.MessageFormat; import java.util.ArrayList; import java.util.Hashtable; import java.util.List; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import bzh.plealog.dbmirror.fetcher.LoaderEngine; import bzh.plealog.dbmirror.lucenedico.DicoTermQuerySystem; import bzh.plealog.dbmirror.task.PAbstractTask; import bzh.plealog.dbmirror.task.PTask; import bzh.plealog.dbmirror.task.PTaskEngineAbortException; import bzh.plealog.dbmirror.task.PTaskFormatDB; import bzh.plealog.dbmirror.task.PTaskMakeBlastAlias; import bzh.plealog.dbmirror.util.Utils; import bzh.plealog.dbmirror.util.conf.DBMSAbstractConfig; import bzh.plealog.dbmirror.util.log.LoggerCentral; import bzh.plealog.dbmirror.util.sequence.SeqIOUtils; import bzh.plealog.dbmirror.util.sequence.TaxonMatcherHelper; public class FormatDBRunner extends Thread { private String _formatDBCmd; // 06/06/2014 new KLib => use blast databank instead of volume files // private String _makePlastDBCmd; private String _dbxrefsConfig; private String _dbPath; private String _dbName; private List<String> _files; private FormatDBMonitor _monitor; private TaxonMatcherHelper _taxMatcher; private DicoTermQuerySystem _dico; private boolean _useNcbiIdFormat; private boolean _isProteic; private boolean _checkInputFiles; private boolean _success = false; private int _headerFormat; private int _volumeSize; private int _blastVersion; private static final int REFORMAT_SEQ_FILE_ERROR = 0; private static final int REFORMAT_SEQ_FILE_OK = 1; private static final int REFORMAT_SEQ_FILE_NON_NR = 2; private static final String NR_EXT = "_kb"; public static final String Plast_INFO_EXT = ".info"; private static final Log LOGGER = LogFactory .getLog(DBMSAbstractConfig.KDMS_ROOTLOG_CATEGORY + ".FormatDBRunner"); private static final MessageFormat FORMAT_MSG1_HEADER = new MessageFormat( "Reading file ({0}/{1}) {2}..."); private static final MessageFormat FORMAT_MSG2_HEADER = new MessageFormat( "Formatting {0}..."); private static final String FORMAT_MSG3_HEADER = "Creating Fasta volumes..."; private static final String FORMAT_MSG_OK = "done."; private static final MessageFormat FORMAT_NON_NR_MSG = new MessageFormat( "Found {0} redundant sequence IDs: only first occurrences have been retained."); private static final MessageFormat FORMAT_TOT_SEQ_MSG = new MessageFormat( "File contains {0} sequences."); private static final MessageFormat FORMAT_GUESS_FORMAT_MSG = new MessageFormat( "format is {0}..."); public FormatDBRunner() { } public FormatDBRunner(FormatDBMonitor monitor, String formatDBCmd, String dbxrefsConfig, String dbPath, String dbName, List<String> files, TaxonMatcherHelper taxMatcher, DicoTermQuerySystem dico, boolean checkForNrID, boolean useNcbiIdFormat, boolean isProteic, boolean checkInputFiles, int headerFormat, int volumeSize, int blastVer) { _monitor = monitor; _monitor.setCheckNR(checkForNrID); _formatDBCmd = formatDBCmd; _dbxrefsConfig = dbxrefsConfig; _dbPath = dbPath; _dbName = dbName; _files = files; _taxMatcher = taxMatcher; _dico = dico; _useNcbiIdFormat = useNcbiIdFormat; _checkInputFiles = checkInputFiles; _isProteic = isProteic; _headerFormat = headerFormat; _volumeSize = volumeSize; _blastVersion = blastVer; } private void removeOldFiles(String path, boolean isProteic) { File[] files; File f; String fName, fDbName1, fDbName2; int i; // construct the name of the file that were (or will) be created during // formatdb step // depending if the user chosen to check or not the provided files // dbFileName may contain the '_kb' or not. So we have to locate // both types of files fDbName1 = new File(path).getName() + "."; fDbName2 = new File(path).getName() + "_"; // get an array of all the files contained in the directory where to // install // the blast database files = new File(path).getParentFile().listFiles(); // remove all files prefixed with fDbName for (i = 0; i < files.length; i++) { f = files[i]; if (!f.isFile()) continue; fName = f.getName(); if ((fName.startsWith(fDbName1) || fName.startsWith(fDbName2)) && !(fName.endsWith(DBMSAbstractConfig.FEXT_NUM) || fName.endsWith("gz") || fName.endsWith("tgz") || fName.endsWith("zip") || fName.endsWith(PTask.TASK_OK_FEXT) || fName.endsWith(LoaderEngine.LOAD_OK_FEXT))) { f.delete(); } } } /** * Prepares the formatdb command-line parameters. */ private Map<String, CommandArgument> prepareParamsFormatDB(String dbPath, boolean isProt, boolean useNcbiIdFormat, String dbName, List<String> dbFileNames) { Hashtable<String, CommandArgument> params; StringBuffer buf; String p; int i, size; params = new Hashtable<String, CommandArgument>(); // isProteic ? params.put("-p", new CommandArgument((isProt ? "T" : "F"), false)); // db File to format buf = new StringBuffer(); size = dbFileNames.size(); i = 0; for (String dataFile : dbFileNames) { buf.append(dataFile); if ((i + 1) < size) { buf.append(","); } i++; } p = buf.toString(); params.put("-i", new CommandArgument(p, true, true, true)); // parse gi (required to use NCBI recommendations) params.put("-o", new CommandArgument((useNcbiIdFormat ? "T" : "F"), false)); // log file params.put("-l", new CommandArgument("formatdb.log", false)); // db name; only when we have multiple input files params.put("-n", new CommandArgument(dbName, false)); return params; } /** * Prepares the formatdb command-line parameters. */ private Map<String, CommandArgument> prepareParamsMakeBlastDB(String dbPath, boolean isProt, boolean useNcbiIdFormat, String dbName, List<String> dbFileNames) { Hashtable<String, CommandArgument> params; StringBuffer buf; String p; int i, size; params = new Hashtable<String, CommandArgument>(); // isProteic ? params.put("-dbtype", new CommandArgument((isProt ? "prot" : "nucl"), false)); // db File to format buf = new StringBuffer(); size = dbFileNames.size(); i = 0; for (String dataFile : dbFileNames) { buf.append(dataFile); if ((i + 1) < size) { buf.append(","); } i++; } p = buf.toString(); params.put("-in", new CommandArgument(p, true, true, true)); // parse gi (required to use NCBI recommendations) if (useNcbiIdFormat) params.put("-parse_seqids", new CommandArgument("", false)); // log file params.put("-logfile", new CommandArgument("makeblastdb.log", false)); // db name; only when we have multiple input files params.put("-title", new CommandArgument(dbName, false)); params.put("-out", new CommandArgument(dbName, false)); //blast version; default is 5 params.put("-blastdb_version", new CommandArgument(String.valueOf(_blastVersion), false)); return params; } /** * * * @param si * @param checkInputFiles * @param isProt * @return */ private int prepareTmpFastaFile(SeqInfo si, boolean checkInputFiles, boolean isProt) { String msg; File f1; int[] rets; int ret = REFORMAT_SEQ_FILE_ERROR, format, totSeq = 0; try { format = SeqIOUtils.guessFileFormat(si.dbFileName); si.msg += FORMAT_GUESS_FORMAT_MSG .format(new Object[] { SeqIOUtils.FILE_TYPES[format] }); _monitor.setTxtMessage(si.msg); si.fileType = format; if (format == SeqIOUtils.UNKNOWN) { throw new Exception("Unknown file format."); } if (!checkInputFiles) // user does not want to check input file { // but in fasta format, it is mandatory to do just a little things if (format == SeqIOUtils.FASTADNA || format == SeqIOUtils.FASTAPROT || format == SeqIOUtils.FASTARNA) { // remove old files and set the fasta file created = the fasta file // downloaded f1 = new File(si.dbFileName); removeOldFiles(si.dbFileName, _isProteic); si.preparedDbName = si.dbFileName; // it is required to count sequences to get the grand total // number of sequences available countSequence(f1, _monitor); si.converted = false; // it is ok ! return REFORMAT_SEQ_FILE_OK; } } removeOldFiles(si.dbFileName, _isProteic); si.converted = true; si.msg += "converting to Fasta..."; _monitor.setTxtMessage(si.msg); rets = SeqIOUtils.convertToFasta(si.dbFileName, si.preparedDbName, format, _monitor, _taxMatcher, _dico, _headerFormat); if (rets != null) { totSeq = rets[0]; } ret = REFORMAT_SEQ_FILE_OK; } catch (PTaskEngineAbortException ex) { si.msg = ex.getMessage(); ret = REFORMAT_SEQ_FILE_ERROR; } catch (Exception e) { msg = "Unable to read sequence file.\n" + si.dbFileName + ": " + e.getMessage() + ".\nCheck your file for sequence ID: " + _monitor.getLastID(); si.msg = msg; ret = REFORMAT_SEQ_FILE_ERROR; } si.discardSeq = 0; si.totSeq = totSeq; return ret; } /** * Runs a formatdb job. */ private void doJob() { DBMSExecNativeCommand executor; Process proc = null; int exitCode = -1; boolean formatdbrunning, stopped; Map<String, CommandArgument> params; String fName, dbLocation, dbPath, msg; List<String> dbList; ArrayList<String> formattedDbList; Object[] values = new Object[3]; // needed to format the messages File volumeOkFile; List<String> volumes; SeqInfo si; int ret, totSeq, curFile=0, totFiles; long tim; boolean runOk = true; fName = PTaskMakeBlastAlias.getBlastAliasFilePath(_dbPath, _dbName, _isProteic); if (PAbstractTask.testTaskOkForFileExists(fName)) { LoggerCentral.info(LOGGER, "skip BLAST db creation: alias file already exists"); if (_taxMatcher != null) _taxMatcher.closeTaxonMatcher(); _success = true; return; } dbLocation = Utils.terminatePath(_dbPath); dbPath = dbLocation + _dbName; executor = new DBMSExecNativeCommand(); dbList = _files; formattedDbList = new ArrayList<String>(); totSeq = 0; try { // get or create the volumes ? // it depends of the volumes.ok file volumeOkFile = new File(dbLocation, PTaskFormatDB.VOL_FILES_OK); if (volumeOkFile.exists()) { volumes = Utils.getFileVolumes(dbLocation, _dbName); } else { si = new SeqInfo(); // required PTaskMakeBlastAlias.removeOldAlias(_dbPath, _dbName, _isProteic); // required totFiles=dbList.size(); for (String dbFileName : dbList) { curFile++; if (!new File(dbFileName).exists()) { msg = "File not found: " + dbFileName; _monitor.setErrMsg(msg); throw new Exception(msg); } si.dbFileName = dbFileName; fName = new File(dbFileName).getName(); // fasta converted filename si.preparedDbName = dbLocation + fName + NR_EXT; // first pass: check the source file for its format and for redundant // sequences values[0] = curFile; values[1] = totFiles; values[2] = fName; msg = FORMAT_MSG1_HEADER.format(values); _monitor.setTxtMessage(msg); tim = System.currentTimeMillis(); LoggerCentral.info(LOGGER, "Checking: " + dbFileName); si.msg = msg; ret = prepareTmpFastaFile(si, _checkInputFiles, _isProteic); LoggerCentral.info(LOGGER, "checking time: " + ((System.currentTimeMillis() - tim) / 1000) + " s"); if (ret == REFORMAT_SEQ_FILE_ERROR) { _monitor.setErrMsg(si.msg); runOk = false; break; } msg = si.msg; msg += FORMAT_MSG_OK; if (si.converted) { msg += " "; msg += FORMAT_TOT_SEQ_MSG.format(new Object[] { si.totSeq }); if (ret == REFORMAT_SEQ_FILE_NON_NR) { msg += " "; msg += FORMAT_NON_NR_MSG.format(new Object[] { si.discardSeq }); } totSeq += si.totSeq; } formattedDbList.add(si.preparedDbName); _monitor.setTxtMessage(msg); System.gc(); try { sleep(1000); } catch (InterruptedException e) { } } if (_taxMatcher != null) { _taxMatcher.dumpTaxonNotFound(LOGGER); } // second pass: create data volumes if (!runOk) { throw new Exception(si.msg); } if (_checkInputFiles && totSeq == 0) { throw new Exception("Fasta files do not contain any sequences."); } msg = FORMAT_MSG3_HEADER; _monitor.setTxtMessage(msg); volumes = Utils.createFileVolumes(formattedDbList, dbLocation, _dbName, (long) _volumeSize * Utils.GIGA, true); if (volumes == null) { throw new Exception("Unable to create data volumes."); } // create the volumes.ok file try { volumeOkFile.createNewFile(); } catch (Exception ex) { // Do not stop the process if the volumes.ok file is not created LOGGER.warn("Unable to create the " + PTaskFormatDB.VOL_FILES_OK + " file for " + volumeOkFile.getAbsolutePath(), ex); } } // end test the volumes.ok file // third pass: start formatdb process values[0] = volumes.toString(); values[1] = null; values[2] = null; msg = FORMAT_MSG2_HEADER.format(values); _monitor.setTxtMessage(msg); if (_formatDBCmd.indexOf("formatdb") > 1) { params = prepareParamsFormatDB(dbPath, _isProteic, _useNcbiIdFormat, _dbName, volumes); } else { params = prepareParamsMakeBlastDB(dbPath, _isProteic, _useNcbiIdFormat, _dbName, volumes); } tim = System.currentTimeMillis(); proc = executor.executeAndReturn(_formatDBCmd, params); formatdbrunning = true; stopped = false; while (formatdbrunning && (!stopped)) { formatdbrunning = false; try { exitCode = proc.exitValue(); } catch (IllegalThreadStateException ex) { formatdbrunning = true; } stopped = (LoggerCentral.processAborted()); Thread.sleep(1000); } proc.destroy(); if (stopped) { exitCode = DBMSExecNativeCommand.EXEC_INTERRUPTED; } LoggerCentral.info(LOGGER, _formatDBCmd+" exit code: "+exitCode); // figures out if something wrong occurs if (exitCode == 0) { msg += FORMAT_MSG_OK; _monitor.setTxtMessage(msg); // LoggerCentral.info(LOGGER, // "formatting time: "+((System.currentTimeMillis()-tim)/1000)+" s"); } else if (exitCode == DBMSExecNativeCommand.EXEC_INTERRUPTED) { _monitor.setErrMsg("Job cancelled."); runOk = false; } else { _monitor.setErrMsg("Unable to format sequence file (FormatDB error)."); runOk = false; } } catch (Exception e) { LoggerCentral.error(LOGGER, "Unable to run formatdb: " + e); _monitor.setErrMsg(e.getMessage()); runOk = false; } finally { if (proc != null) { try { proc.getErrorStream().close(); } catch (Exception e) { }// not bad try { proc.getInputStream().close(); } catch (Exception e) { }// not bad try { proc.getOutputStream().close(); } catch (Exception e) { }// not bad } } System.gc(); if (runOk) { if (PTaskMakeBlastAlias.prepareAliasFile(_dbPath, _dbName, !_isProteic)) { runOk = true; } else { runOk = false; } fName = PTaskMakeBlastAlias.getBlastAliasFilePath(_dbPath, _dbName, _isProteic); PAbstractTask.setTaskOkForFile(fName); } if (_taxMatcher != null) _taxMatcher.closeTaxonMatcher(); _success = runOk; _monitor.setTxtMessage(""); } private void setDbXrefTagConfiguration() { if (_dbxrefsConfig != null) { SeqIOUtils.setDbXrefTagManager(_dbxrefsConfig); } } public void run() { LoggerCentral.info(LOGGER, "Start making BLAST db processing"); if (_monitor.setJobRunnig(true)) { setDbXrefTagConfiguration(); doJob(); DicoTermQuerySystem.closeDicoTermQuerySystem(); } _monitor.setJobRunnig(false); _monitor.jobDone(_success); LoggerCentral.info(LOGGER, "Done making BLAST db processing"); } private void countSequence(File f, FormatDBMonitor monitor) throws Exception { FileInputStream fis = null; BufferedReader reader = null; String line, id; int i, size, idx; try { fis = new FileInputStream(f); reader = new BufferedReader(new InputStreamReader(fis)); while ((line = reader.readLine()) != null) { if (line.charAt(0) == '>') { i = 1; size = line.length(); // some Fasta may contains space between > and the seqId for (i = 1; i < size; i++) { if (line.charAt(i) != ' ') { break; } } idx = line.indexOf(' ', i); if (idx == -1) idx = line.length(); id = line.substring(i, idx); monitor.seqFound(id); } } } finally { if (fis != null) { try { fis.close(); } catch (Exception ex) { } } } } private class SeqInfo { int totSeq; int discardSeq; @SuppressWarnings("unused") int fileType; boolean converted; String msg; String dbFileName; String preparedDbName; } }
package cn.shuto.maximo.tool.migration.app; import java.io.File; import java.io.IOException; import java.io.Writer; import java.sql.Clob; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.List; import java.util.logging.Logger; import cn.shuto.maximo.tool.migration.app.bean.MaxApps; import cn.shuto.maximo.tool.migration.app.bean.MaxMenu; import cn.shuto.maximo.tool.migration.app.bean.MaxModules; import cn.shuto.maximo.tool.migration.app.bean.MaxPresentation; import cn.shuto.maximo.tool.migration.app.bean.Maxlabels; import cn.shuto.maximo.tool.migration.app.bean.Sigoption; import cn.shuto.maximo.tool.system.SystemEnvironmental; import cn.shuto.maximo.tool.util.CommonUtil; import cn.shuto.maximo.tool.util.DBUtil; import cn.shuto.maximo.tool.util.SerializeUtil; public class AppMigration { private static Logger _log = Logger.getLogger(AppMigration.class.getName()); private String APPFILEPATH = "\\package\\app\\app.mtep"; private String MODLESFILEPATH = "\\package\\app\\modules.mtep"; private static final String SELECTMAXMODULES = "select MODULE, DESCRIPTION, MAXMODULESID from maxmodules where module = ?"; private static final String SELECTMAXMODULESMENUS = "select MENUTYPE, MODULEAPP, POSITION, SUBPOSITION, ELEMENTTYPE, KEYVALUE, HEADERDESCRIPTION, URL, VISIBLE, IMAGE, ACCESSKEY, TABDISPLAY, MAXMENUID from maxmenu where menutype = 'MODULE' and elementtype in ( 'HEADER','MODULE' ) and moduleapp = ?"; private static final String SELECTMAXAPPS = "select APP, DESCRIPTION, APPTYPE, RESTRICTIONS, ORDERBY, ORIGINALAPP, CUSTAPPTYPE, MAINTBNAME, MAXAPPSID, ISMOBILE from maxapps where app = ?"; private static final String SELECTMAXAPPSMENUS = "select MENUTYPE, MODULEAPP, POSITION, SUBPOSITION, ELEMENTTYPE, KEYVALUE, HEADERDESCRIPTION, URL, VISIBLE, IMAGE, ACCESSKEY, TABDISPLAY, MAXMENUID from maxmenu where KEYVALUE=? or MODULEAPP=?"; private static final String SELECTSIGOPTION = "select APP, OPTIONNAME, DESCRIPTION, ESIGENABLED, VISIBLE, ALSOGRANTS, ALSOREVOKES, PREREQUISITE, SIGOPTIONID, LANGCODE, HASLD from sigoption where app = ?"; private static final String SELECTMAXLABELS = "select APP, ID, PROPERTY, VALUE, MAXLABELSID from maxlabels where app = ?"; private static final String SELECTMAXPRESENTATION = "select app, maxpresentationid, presentation from maxpresentation where app = ?"; private Connection conn = null; PreparedStatement maxmodulesST = null; PreparedStatement maxmodulesmenusST = null; PreparedStatement maxappsST = null; PreparedStatement maxappsmenusST = null; PreparedStatement sigoptionST = null; PreparedStatement maxlabelsST = null; PreparedStatement maxpresentationST = null; Statement importST = null; public AppMigration() { conn = DBUtil.getInstance() .getMaximoConnection(SystemEnvironmental.getInstance().getStringParam("-maximopath")); if (conn != null) { try { maxmodulesST = conn.prepareStatement(SELECTMAXMODULES); maxmodulesmenusST = conn.prepareStatement(SELECTMAXMODULESMENUS); maxappsST = conn.prepareStatement(SELECTMAXAPPS); maxappsmenusST = conn.prepareStatement(SELECTMAXAPPSMENUS); sigoptionST = conn.prepareStatement(SELECTSIGOPTION); maxlabelsST = conn.prepareStatement(SELECTMAXLABELS); maxpresentationST = conn.prepareStatement(SELECTMAXPRESENTATION); importST = conn.createStatement(); } catch (SQLException e) { e.printStackTrace(); } } } /** * App */ public void importApp() { try { _log.info(" importModules(); _log.info(" importApps(); } catch (SQLException e) { try { conn.rollback(); } catch (SQLException e1) { e1.printStackTrace(); } e.printStackTrace(); } catch (IOException e) { try { conn.rollback(); } catch (SQLException e1) { e1.printStackTrace(); } e.printStackTrace(); } finally { closeResource(); } } /** * * * @throws SQLException * @throws IOException */ private void importApps() throws SQLException, IOException { List<MaxApps> applist = SerializeUtil.readObjectForList( new File(SystemEnvironmental.getInstance().getStringParam("-importpath") + APPFILEPATH)); if (applist != null && applist.size() > 0) { for (MaxApps app : applist) { clearMaxApps(app); _log.info(" _log.info(" importST.addBatch(app.toInsertSql()); _log.info(" importST.addBatch(app.getMaxPresentation().toInsertSql()); _log.info(" List<Maxlabels> maxlabels = app.getMaxlabels(); if (maxlabels != null && maxlabels.size() > 0) { for (Maxlabels maxlabel : maxlabels) { importST.addBatch(maxlabel.toInsertSql()); } } _log.info(" List<MaxMenu> maxMenus = app.getMaxMenus(); if (maxMenus != null && maxMenus.size() > 0) { for (MaxMenu maxMenu : maxMenus) { importST.addBatch(maxMenu.toInsertSql()); } } _log.info(" List<Sigoption> Sigoptions = app.getSigoptions(); if (Sigoptions != null && Sigoptions.size() > 0) { for (Sigoption sigoption : Sigoptions) { importST.addBatch(sigoption.toInsertSql()); } } importST.executeBatch(); conn.commit(); // XML insertXMLToMaxPresentation(app); } } } /** * XML * @param app * @throws SQLException * @throws IOException */ private void insertXMLToMaxPresentation(MaxApps app) throws SQLException, IOException { _log.info(" String updatePresentationSql = "SELECT presentation from maxpresentation where app = ? for update"; PreparedStatement pstmt = conn.prepareStatement(updatePresentationSql); pstmt.setString(1, app.getApp()); ResultSet rs = pstmt.executeQuery(); while (rs.next()) { // oracle.sql.CLOB Clob clob = rs.getClob(1); Writer writer = clob.setCharacterStream(1); writer.write(app.getMaxPresentation().getPresentation()); writer.close(); } rs.close(); pstmt.close(); } private static final String DELETEMAXAPPS = "delete maxapps where app = '%s'"; private static final String DELETEMAXPRESENTATION = "delete from maxpresentation where APP= '%s'"; private static final String DELETEMAXLABELS = "delete from maxlabels where app='%s'"; private static final String DELETEMAXMENU = "delete from Maxmenu where KEYVALUE='%s' or MODULEAPP='%s'"; private static final String DELETESIGOPTION = "delete from Sigoption where APP='%s'"; private void clearMaxApps(MaxApps app) { _log.info(" try { importST.addBatch(String.format(DELETEMAXAPPS, app.getApp())); importST.addBatch(String.format(DELETEMAXPRESENTATION, app.getApp())); importST.addBatch(String.format(DELETEMAXLABELS, app.getApp())); importST.addBatch(String.format(DELETEMAXMENU, app.getApp(), app.getApp())); importST.addBatch(String.format(DELETESIGOPTION, app.getApp())); importST.executeBatch(); conn.commit(); } catch (SQLException e) { try { conn.rollback(); } catch (SQLException e1) { e1.printStackTrace(); } e.printStackTrace(); } _log.info(" } /** * * * @throws SQLException */ private void importModules() throws SQLException { List<MaxModules> modulelist = SerializeUtil.readObjectForList( new File(SystemEnvironmental.getInstance().getStringParam("-importpath") + MODLESFILEPATH)); if (modulelist != null && modulelist.size() > 0) { for (MaxModules module : modulelist) { clearMaxModules(module); _log.info(" importST.addBatch(module.toInsertSql()); _log.info(" List<MaxMenu> maxMenus = module.getMaxMenus(); if (maxMenus != null && maxMenus.size() > 0) { for (MaxMenu maxMenu : maxMenus) { importST.addBatch(maxMenu.toInsertSql()); } } } importST.executeBatch(); conn.commit(); } } private static final String DELETEMAXMODULES = "delete maxmodules where module = '%s'"; private static final String DELETEMAXMODULESMENUS = "delete maxmenu where menutype = 'MODULE' and elementtype in ( 'HEADER','MODULE' ) and moduleapp = '%s'"; private void clearMaxModules(MaxModules module) { _log.info(" try { importST.addBatch(String.format(DELETEMAXMODULES, module.getModule())); importST.addBatch(String.format(DELETEMAXMODULESMENUS, module.getModule())); importST.executeBatch(); conn.commit(); } catch (SQLException e) { try { conn.rollback(); } catch (SQLException e1) { e1.printStackTrace(); } e.printStackTrace(); } _log.info(" } /** * APP * * @param exportObjects */ public void exportApp(String modules, String apps) { try { if (modules != null && !"".equals(modules)) { _log.info(" List<MaxModules> moduleList = exportModules(CommonUtil.buildExportObjects(modules)); SerializeUtil.writeObject(moduleList, new File(SystemEnvironmental.getInstance().getStringParam("-packagepath") + MODLESFILEPATH)); } if (apps != null && !"".equals(apps)) { _log.info(" List<MaxApps> appList = exportApps(CommonUtil.buildExportObjects(apps)); SerializeUtil.writeObject(appList, new File(SystemEnvironmental.getInstance().getStringParam("-packagepath") + APPFILEPATH)); } } catch (SQLException e) { e.printStackTrace(); } finally { closeResource(); } } /** * * * @param apps * @return * @throws SQLException */ private List<MaxApps> exportApps(String[] apps) throws SQLException { List<MaxApps> list = new ArrayList<MaxApps>(); for (String app : apps) { _log.info(" maxappsST.setString(1, app); ResultSet rs = maxappsST.executeQuery(); if (rs.next()) { MaxApps maxApp = new MaxApps(CommonUtil.NULLTOEMPTY(rs.getString(1)), CommonUtil.NULLTOEMPTY(rs.getString(2)), CommonUtil.NULLTOEMPTY(rs.getString(3)), CommonUtil.NULLTOEMPTY(rs.getString(4)), CommonUtil.NULLTOEMPTY(rs.getString(5)), CommonUtil.NULLTOEMPTY(rs.getString(6)), CommonUtil.NULLTOEMPTY(rs.getString(7)), CommonUtil.NULLTOEMPTY(rs.getString(8)), CommonUtil.NULLTOEMPTY(rs.getString(10))); _log.info(" maxApp.setMaxMenus(exportAppMenus(app)); _log.info(" maxApp.setSigoptions(exportAppSigoptions(app)); _log.info(" maxApp.setMaxlabels(exportAppMaxLabels(app)); _log.info(" maxApp.setMaxPresentation(exportAppMaxPresentation(app)); list.add(maxApp); } rs.close(); } return list; } /** * MaxPresentation * * @param app * @return * @throws SQLException */ private MaxPresentation exportAppMaxPresentation(String app) throws SQLException { maxpresentationST.setString(1, app); ResultSet rs = maxpresentationST.executeQuery(); if (rs.next()) { return new MaxPresentation(CommonUtil.NULLTOEMPTY(rs.getString(1)), CommonUtil.NULLTOEMPTY(rs.getString(3))); } rs.close(); return null; } /** * Maxlabels * * @param app * @return * @throws SQLException */ private List<Maxlabels> exportAppMaxLabels(String app) throws SQLException { List<Maxlabels> list = new ArrayList<Maxlabels>(); maxlabelsST.setString(1, app); ResultSet rs = maxlabelsST.executeQuery(); while (rs.next()) { list.add(new Maxlabels(CommonUtil.NULLTOEMPTY(rs.getString(1)), CommonUtil.NULLTOEMPTY(rs.getString(2)), CommonUtil.NULLTOEMPTY(rs.getString(3)), CommonUtil.NULLTOEMPTY(rs.getString(4)))); } rs.close(); return list; } /** * * * @param app * @return * @throws SQLException */ private List<Sigoption> exportAppSigoptions(String app) throws SQLException { List<Sigoption> list = new ArrayList<Sigoption>(); sigoptionST.setString(1, app); ResultSet rs = sigoptionST.executeQuery(); while (rs.next()) { list.add(new Sigoption(CommonUtil.NULLTOEMPTY(rs.getString(1)), CommonUtil.NULLTOEMPTY(rs.getString(2)), CommonUtil.NULLTOEMPTY(rs.getString(3)), rs.getInt(4), rs.getInt(5), CommonUtil.NULLTOEMPTY(rs.getString(6)), CommonUtil.NULLTOEMPTY(rs.getString(7)), CommonUtil.NULLTOEMPTY(rs.getString(8)), CommonUtil.NULLTOEMPTY(rs.getString(10)), rs.getInt(11))); } rs.close(); return list; } /** * * * @param app * @return * @throws SQLException */ private List<MaxMenu> exportAppMenus(String app) throws SQLException { List<MaxMenu> list = new ArrayList<MaxMenu>(); maxappsmenusST.setString(1, app); maxappsmenusST.setString(2, app); ResultSet rs = maxappsmenusST.executeQuery(); while (rs.next()) { list.add(new MaxMenu(CommonUtil.NULLTOEMPTY(rs.getString(1)), CommonUtil.NULLTOEMPTY(rs.getString(2)), rs.getInt(3), rs.getInt(4), CommonUtil.NULLTOEMPTY(rs.getString(5)), CommonUtil.NULLTOEMPTY(rs.getString(6)), CommonUtil.NULLTOEMPTY(rs.getString(7)), CommonUtil.NULLTOEMPTY(rs.getString(8)), rs.getInt(9), CommonUtil.NULLTOEMPTY(rs.getString(10)), CommonUtil.NULLTOEMPTY(rs.getString(11)), CommonUtil.NULLTOEMPTY(rs.getString(12)))); } rs.close(); return list; } /** * * * @param modules * @return * @throws SQLException */ private List<MaxModules> exportModules(String[] modules) throws SQLException { List<MaxModules> list = new ArrayList<MaxModules>(); for (String module : modules) { _log.info("--" + module); maxmodulesST.setString(1, module); ResultSet rs = maxmodulesST.executeQuery(); if (rs.next()) { MaxModules maxModules = new MaxModules(CommonUtil.NULLTOEMPTY(rs.getString(1)), CommonUtil.NULLTOEMPTY(rs.getString(2))); _log.info(" maxModules.setMaxMenus(exportModuleMenus(module)); list.add(maxModules); } rs.close(); } return list; } /** * * * @param module * @return * @throws SQLException */ private List<MaxMenu> exportModuleMenus(String module) throws SQLException { List<MaxMenu> list = new ArrayList<MaxMenu>(); maxmodulesmenusST.setString(1, module); ResultSet rs = maxmodulesmenusST.executeQuery(); while (rs.next()) { list.add(new MaxMenu(CommonUtil.NULLTOEMPTY(rs.getString(1)), CommonUtil.NULLTOEMPTY(rs.getString(2)), rs.getInt(3), rs.getInt(4), CommonUtil.NULLTOEMPTY(rs.getString(5)), CommonUtil.NULLTOEMPTY(rs.getString(6)), CommonUtil.NULLTOEMPTY(rs.getString(7)), CommonUtil.NULLTOEMPTY(rs.getString(8)), rs.getInt(9), CommonUtil.NULLTOEMPTY(rs.getString(10)), CommonUtil.NULLTOEMPTY(rs.getString(11)), CommonUtil.NULLTOEMPTY(rs.getString(12)))); } rs.close(); return list; } private void closeResource() { try { if (maxmodulesST != null) maxmodulesST.close(); if (maxmodulesmenusST != null) maxmodulesmenusST.close(); if (maxappsST != null) maxappsST.close(); if (maxappsmenusST != null) maxappsmenusST.close(); if (sigoptionST != null) sigoptionST.close(); if (maxlabelsST != null) maxlabelsST.close(); if (maxpresentationST != null) maxpresentationST.close(); if (importST != null) importST.close(); } catch (SQLException e) { e.printStackTrace(); } } }
package com.adsdk.sdk.customevents; import com.chartboost.sdk.Chartboost; import com.chartboost.sdk.ChartboostDelegate; import com.chartboost.sdk.Chartboost.CBAgeGateConfirmation; import com.chartboost.sdk.Model.CBError.CBClickError; import com.chartboost.sdk.Model.CBError.CBImpressionError; import android.app.Activity; public class ChartboostFullscreen extends CustomEventFullscreen { private Chartboost chartboost; private Activity activity; private boolean shouldDisplay; @Override public void loadFullscreen(Activity activity, CustomEventFullscreenListener customEventFullscreenListener, String optionalParameters, String trackingPixel) { this.activity = activity; String[] adIdParts = optionalParameters.split(";"); String appId = adIdParts[0]; String appSignature = adIdParts[1]; listener = customEventFullscreenListener; this.trackingPixel = trackingPixel; try { Class.forName("com.chartboost.sdk.Chartboost"); Class.forName("com.chartboost.sdk.ChartboostDelegate"); Class.forName("com.chartboost.sdk.Model.CBError"); } catch (ClassNotFoundException e) { if (listener != null) { listener.onFullscreenFailed(); } return; } chartboost = Chartboost.sharedChartboost(); chartboost.onCreate(activity, appId, appSignature, createListener()); chartboost.onStart(activity); chartboost.cacheInterstitial(); } private ChartboostDelegate createListener() { return new ChartboostDelegate() { @Override public boolean shouldRequestMoreApps() { return false; } @Override public boolean shouldRequestInterstitialsInFirstSession() { return true; } @Override public boolean shouldRequestInterstitial(String arg0) { return true; } @Override public boolean shouldPauseClickForConfirmation(CBAgeGateConfirmation arg0) { return false; } @Override public boolean shouldDisplayMoreApps() { return false; } @Override public boolean shouldDisplayLoadingViewForMoreApps() { return false; } @Override public boolean shouldDisplayInterstitial(String arg0) { return shouldDisplay; } @Override public void didShowMoreApps() { } @Override public void didShowInterstitial(String arg0) { reportImpression(); if (listener != null) { listener.onFullscreenOpened(); } shouldDisplay = false; } @Override public void didFailToRecordClick(String arg0, CBClickError arg1) { } @Override public void didFailToLoadMoreApps(CBImpressionError arg0) { } @Override public void didFailToLoadInterstitial(String arg0, CBImpressionError arg1) { if (listener != null) { listener.onFullscreenFailed(); } } @Override public void didDismissMoreApps() { } @Override public void didDismissInterstitial(String arg0) { if (listener != null) { listener.onFullscreenClosed(); } } @Override public void didCloseMoreApps() { } @Override public void didCloseInterstitial(String arg0) { } @Override public void didClickMoreApps() { } @Override public void didClickInterstitial(String arg0) { if (listener != null) { listener.onFullscreenLeftApplication(); } } @Override public void didCacheMoreApps() { } @Override public void didCacheInterstitial(String arg0) { if (listener != null) { listener.onFullscreenLoaded(ChartboostFullscreen.this); } } }; } @Override public void showFullscreen() { if(chartboost!=null && chartboost.hasCachedInterstitial()) { shouldDisplay = true; chartboost.showInterstitial(); } } @Override public void finish() { if(chartboost != null) { chartboost.onStop(activity); chartboost.onDestroy(activity); } chartboost = null; activity = null; super.finish(); } }
package com.android.email.mail.store; import java.io.IOException; import java.io.InputStream; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.Locale; import android.util.Config; import android.util.Log; import com.android.email.Email; import com.android.email.FixedLengthInputStream; import com.android.email.PeekableInputStream; import com.android.email.mail.MessagingException; public class ImapResponseParser { SimpleDateFormat mDateTimeFormat = new SimpleDateFormat("dd-MMM-yyyy HH:mm:ss Z", Locale.US); SimpleDateFormat badDateTimeFormat = new SimpleDateFormat("dd MMM yyyy HH:mm:ss Z", Locale.US); PeekableInputStream mIn; InputStream mActiveLiteral; public ImapResponseParser(PeekableInputStream in) { this.mIn = in; } /** * Reads the next response available on the stream and returns an * ImapResponse object that represents it. * * @return * @throws IOException */ public ImapResponse readResponse() throws IOException { ImapResponse response = new ImapResponse(); if (mActiveLiteral != null) { while (mActiveLiteral.read() != -1) ; mActiveLiteral = null; } int ch = mIn.peek(); if (ch == '*') { parseUntaggedResponse(); readTokens(response); } else if (ch == '+') { response.mCommandContinuationRequested = parseCommandContinuationRequest(); readTokens(response); } else { response.mTag = parseTaggedResponse(); readTokens(response); } if (Config.LOGD) { if (Email.DEBUG) { Log.d(Email.LOG_TAG, "<<< " + response.toString()); } } return response; } private void readTokens(ImapResponse response) throws IOException { response.clear(); Object token; while ((token = readToken()) != null) { if (response != null) { response.add(token); } if (mActiveLiteral != null) { break; } } response.mCompleted = token == null; } /** * Reads the next token of the response. The token can be one of: String - * for NIL, QUOTED, NUMBER, ATOM. InputStream - for LITERAL. * InputStream.available() returns the total length of the stream. * ImapResponseList - for PARENTHESIZED LIST. Can contain any of the above * elements including List. * * @return The next token in the response or null if there are no more * tokens. * @throws IOException */ public Object readToken() throws IOException { while (true) { Object token = parseToken(); if (token == null || !token.equals(")") || !token.equals("]")) { return token; } } } private Object parseToken() throws IOException { if (mActiveLiteral != null) { while (mActiveLiteral.read() != -1) ; mActiveLiteral = null; } while (true) { int ch = mIn.peek(); if (ch == '(') { return parseList(); } else if (ch == '[') { return parseSequence(); } else if (ch == ')') { expect(')'); return ")"; } else if (ch == ']') { expect(']'); return "]"; } else if (ch == '"') { return parseQuoted(); } else if (ch == '{') { mActiveLiteral = parseLiteral(); return mActiveLiteral; } else if (ch == ' ') { expect(' '); } else if (ch == '\r') { expect('\r'); expect('\n'); return null; } else if (ch == '\n') { expect('\n'); return null; } else if (ch == '\t') { expect('\t'); } else { return parseAtom(); } } } private boolean parseCommandContinuationRequest() throws IOException { expect('+'); expect(' '); return true; } // * OK [UIDNEXT 175] Predicted next UID private void parseUntaggedResponse() throws IOException { expect('*'); expect(' '); } // 3 OK [READ-WRITE] Select completed. private String parseTaggedResponse() throws IOException { String tag = readStringUntil(' '); return tag; } private ImapList parseList() throws IOException { expect('('); ImapList list = new ImapList(); Object token; while (true) { token = parseToken(); if (token == null) { break; } else if (token instanceof InputStream) { list.add(token); break; } else if (token.equals(")")) { break; } else { list.add(token); } } return list; } private ImapList parseSequence() throws IOException { expect('['); ImapList list = new ImapList(); Object token; while (true) { token = parseToken(); if (token == null) { break; } else if (token instanceof InputStream) { list.add(token); break; } else if (token.equals("]")) { break; } else { list.add(token); } } return list; } private String parseAtom() throws IOException { StringBuffer sb = new StringBuffer(); int ch; while (true) { ch = mIn.peek(); if (ch == -1) { throw new IOException("parseAtom(): end of stream reached"); } else if (ch == '(' || ch == ')' || ch == '{' || ch == ' ' || ch == '[' || ch == ']' || // docs claim that flags are \ atom but atom isn't supposed to // contain // * and some falgs contain * // ch == '%' || ch == '*' || // TODO probably should not allow \ and should recognize // it as a flag instead // ch == '"' || ch == '\' || ch == '"' || (ch >= 0x00 && ch <= 0x1f) || ch == 0x7f) { if (sb.length() == 0) { throw new IOException(String.format("parseAtom(): (%04x %c)", (int)ch, ch)); } return sb.toString(); } else { sb.append((char)mIn.read()); } } } /** * A { has been read, read the rest of the size string, the space and then * notify the listener with an InputStream. * * @param mListener * @throws IOException */ private InputStream parseLiteral() throws IOException { expect('{'); int size = Integer.parseInt(readStringUntil('}')); expect('\r'); expect('\n'); FixedLengthInputStream fixed = new FixedLengthInputStream(mIn, size); return fixed; } /** * A " has been read, read to the end of the quoted string and notify the * listener. * * @param mListener * @throws IOException */ private String parseQuoted() throws IOException { expect('"'); return readStringUntil('"'); } private String readStringUntil(char end) throws IOException { StringBuffer sb = new StringBuffer(); int ch; while ((ch = mIn.read()) != -1) { if (ch == end) { return sb.toString(); } else { sb.append((char)ch); } } throw new IOException("readQuotedString(): end of stream reached"); } private int expect(char ch) throws IOException { int d; if ((d = mIn.read()) != ch) { throw new IOException(String.format("Expected %04x (%c) but got %04x (%c)", (int)ch, ch, d, (char)d)); } return d; } /** * Represents an IMAP LIST response and is also the base class for the * ImapResponse. */ public class ImapList extends ArrayList<Object> { public ImapList getList(int index) { return (ImapList)get(index); } public Object getObject(int index) { return get(index); } public String getString(int index) { return (String)get(index); } public InputStream getLiteral(int index) { return (InputStream)get(index); } public int getNumber(int index) { return Integer.parseInt(getString(index)); } public Date getDate(int index) throws MessagingException { try { return parseDate(getString(index)); } catch (ParseException pe) { throw new MessagingException("Unable to parse IMAP datetime", pe); } } public Object getKeyedValue(Object key) { for (int i = 0, count = size(); i < count; i++) { if (get(i).equals(key)) { return get(i + 1); } } return null; } public ImapList getKeyedList(Object key) { return (ImapList)getKeyedValue(key); } public String getKeyedString(Object key) { return (String)getKeyedValue(key); } public InputStream getKeyedLiteral(Object key) { return (InputStream)getKeyedValue(key); } public int getKeyedNumber(Object key) { return Integer.parseInt(getKeyedString(key)); } public Date getKeyedDate(Object key) throws MessagingException { try { String value = getKeyedString(key); if (value == null) { return null; } return parseDate(value); } catch (ParseException pe) { throw new MessagingException("Unable to parse IMAP datetime", pe); } } private Date parseDate(String value) throws ParseException { try { synchronized(mDateTimeFormat) { return mDateTimeFormat.parse(value); } } catch (Exception e) { synchronized(badDateTimeFormat) { return badDateTimeFormat.parse(value); } } } } /** * Represents a single response from the IMAP server. Tagged responses will * have a non-null tag. Untagged responses will have a null tag. The object * will contain all of the available tokens at the time the response is * received. In general, it will either contain all of the tokens of the * response or all of the tokens up until the first LITERAL. If the object * does not contain the entire response the caller must call more() to * continue reading the response until more returns false. */ public class ImapResponse extends ImapList { private boolean mCompleted; boolean mCommandContinuationRequested; String mTag; public boolean more() throws IOException { if (mCompleted) { return false; } readTokens(this); return true; } public String getAlertText() { if (size() > 1 && "[ALERT]".equals(get(1))) { StringBuffer sb = new StringBuffer(); for (int i = 2, count = size(); i < count; i++) { sb.append(get(i).toString()); sb.append(' '); } return sb.toString(); } else { return null; } } public String toString() { return "#" + mTag + "# " + super.toString(); } } }
package com.anmipo.android.trentobus.view; import android.content.Context; import android.content.res.Resources; import android.graphics.Canvas; import android.graphics.Color; import android.graphics.Paint; import android.graphics.Paint.Align; import android.graphics.Paint.Style; import android.graphics.Point; import android.graphics.Rect; import android.graphics.Region.Op; import android.graphics.Typeface; import android.graphics.drawable.Drawable; import android.os.Bundle; import android.os.Parcelable; import android.text.TextPaint; import android.text.TextUtils; import android.text.TextUtils.TruncateAt; import android.util.AttributeSet; import android.util.TypedValue; import android.view.GestureDetector; import android.view.MotionEvent; import android.view.View; import android.widget.Scroller; import com.anmipo.android.trentobus.R; public class TimetableView extends View { static final String TAG = "Timetable"; // font size for cell text public static final int FONT_SIZE_DP = 16; // horizontal padding for all cells, in px private static final int CELL_PADDING_X = 5; // state-saving key private static final String STATE_OFFSET_X = "TimetableView.offsetX"; private static final String STATE_OFFSET_Y = "TimetableView.offsetY"; // table data private String[] fixedCol; private String[] fixedRow; private String[][] cells; // fixedCol content ellipsized to fit the column private String[] fixedColEllipsized; // table dimensions private int rowCount; private int colCount; // paints for table parts private TextPaint fixedColumnPaint; private TextPaint fixedRowPaint; private TextPaint cellPaint; // background drawables for fixed and normal cells private Drawable fixedBackgroundDrawable; private Drawable cellBackgroundDrawable; // viewport private int topRow = 0; // currently visible top row number private int leftCol = 0; // currently visible left column number private int offsetX = 0; // table shift in relation to the viewport private int offsetY = 0; // table shift in relation to the viewport // drawing sizes / dimensions (calculated based on View size and paints) private int width, height; // view/canvas size private int fixedColWidth; // width of the left (fixed) column private int fixedRowHeight; private int maxOffsetX; // max allowed horizontal offset private int maxOffsetY; // max allowed vertical offset private int colWidth; // cell columns width private int rowHeight; // height of all rows private GestureDetector gestureDetector; private Scroller scroller; private OnSizeChangedListener onSizeChangedListener = null; public interface OnSizeChangedListener { public void onSizeChanged(int width, int height); } private OnCellClickListener onCellClickListener = null; public interface OnCellClickListener { /** * Called when the user clicks/taps a table cell. * <code>col</code> and <code>row</code> values are between -1 and * number of columns/rows; -1 indicates the fixed column/row. * @param col * @param row */ public void onCellClick(int col, int row); } public TimetableView(Context context, AttributeSet attrs) { super(context, attrs); initResources(context); initPaints(); setSaveEnabled(true); gestureDetector = new GestureDetector(context, new GestureListener()); scroller = new Scroller(context); // TODO: remove this debug data setData(new String[]{"row1", "row2", "row3", "row4", "row5", "row6"}, new String[]{"col1", "col2", "col3", "col4"}, new String[][] {{"12:34", "56:78", "90:12", "34:56"}, {"12:34", "56:78", "*", "34:56"}, {"12:34", "56:78", "90:12", "34:56"}, {"12:34", "56:78", "90:12", "34:56"}, {"12:34", "56:78", "90:12", "34:56"}, {"12:34", "56:78", "90:12", "34:56"}}); } private void initResources(Context context) { Resources res = context.getResources(); fixedBackgroundDrawable = res.getDrawable(R.drawable.fixed_bg); cellBackgroundDrawable = res.getDrawable(R.drawable.cell_bg); } private void initPaints() { float fontSizePixels = TypedValue .applyDimension(TypedValue.COMPLEX_UNIT_DIP, FONT_SIZE_DP, getContext().getResources().getDisplayMetrics()); fixedColumnPaint = new TextPaint(); fixedColumnPaint.setTextAlign(Align.LEFT); fixedColumnPaint.setTextSize(fontSizePixels); fixedColumnPaint.setTypeface(Typeface.DEFAULT_BOLD); fixedColumnPaint.setAntiAlias(true); fixedRowPaint = new TextPaint(); fixedRowPaint.setTextAlign(Align.CENTER); fixedRowPaint.setTextSize(fontSizePixels); fixedRowPaint.setTypeface(Typeface.DEFAULT_BOLD); fixedRowPaint.setAntiAlias(true); cellPaint = new TextPaint(); cellPaint.setColor(Color.BLACK); cellPaint.setTextSize(fontSizePixels); cellPaint.setStyle(Style.STROKE); cellPaint.setTextAlign(Align.CENTER); cellPaint.setAntiAlias(true); updateChildrenLayout(); ellipsizeTexts(); } @Override protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) { int measuredWidth = MeasureSpec.getSize(widthMeasureSpec); int measuredHeight = MeasureSpec.getSize(heightMeasureSpec); setMeasuredDimension(measuredWidth, measuredHeight); } @Override protected void onSizeChanged(int w, int h, int oldw, int oldh) { super.onSizeChanged(w, h, oldw, oldh); width = w; height = h; updateChildrenLayout(); if (onSizeChangedListener != null) { onSizeChangedListener.onSizeChanged(w, h); } } /** * (Re)evaluates dimensions of child elements (cell width/height). */ protected void updateChildrenLayout() { setColWidth((int)cellPaint.measureText("88:88") + 2 * CELL_PADDING_X); setRowHeight((int)(1.5f * cellPaint.getTextSize())); setFixedColWidth((int) measureFixedColumnWidth()); setFixedRowHeight(rowHeight); maxOffsetX = colCount * colWidth - (width - fixedColWidth); maxOffsetY = rowCount * rowHeight - (height - fixedRowHeight); // for small tables max values can become negative, forbid this. if (maxOffsetX < 0) maxOffsetX = 0; if (maxOffsetY < 0) maxOffsetY = 0; } protected void ellipsizeTexts() { if (fixedCol != null) { fixedColEllipsized = new String[fixedCol.length]; for (int i = 0; i < fixedCol.length; i++) { fixedColEllipsized[i] = TextUtils.ellipsize(fixedCol[i], fixedColumnPaint, fixedColWidth, TruncateAt.END).toString(); } } } @Override protected void onLayout(boolean changed, int left, int top, int right, int bottom) { super.onLayout(changed, left, top, right, bottom); updateChildrenLayout(); } /** * Calculates the fixed column's width, so that it either * fits all entries, or occupies not more than 50% of the view; * @return */ protected float measureFixedColumnWidth() { float result = 0; for (int i = 0; i < rowCount; i++) { float w = fixedColumnPaint.measureText(fixedCol[i]); if (w > result) { result = w; } } result += 2 * CELL_PADDING_X; return (result <= width/2) ? result : width/2; } @Override protected void onDraw(Canvas canvas) { super.onDraw(canvas); drawFixedColumn(canvas); drawFixedRow(canvas); drawCells(canvas); drawCorner(canvas); } protected void drawCorner(Canvas canvas) { // draw top-left empty corner canvas.clipRect(0, 0, width, height, Op.REPLACE); fixedBackgroundDrawable.setBounds(0, 0, fixedColWidth, fixedRowHeight); fixedBackgroundDrawable.draw(canvas); } protected void drawCells(Canvas canvas) { canvas.clipRect( fixedColWidth, fixedRowHeight, width, height, Op.REPLACE); int textOffset = getTextCenterOffset(rowHeight, cellPaint); int cellCenterOffset = colWidth / 2; int x = fixedColWidth + colWidth * leftCol - offsetX; int y0 = topRow * rowHeight + fixedRowHeight - offsetY; int xIndex = leftCol; while ((x < width) && (xIndex < colCount)) { int yIndex = topRow; int y = y0; int cellRight = x + colWidth; while ((y < height) && (yIndex < rowCount)) { int cellBottom = y + rowHeight; cellBackgroundDrawable.setBounds(x, y, cellRight, cellBottom); cellBackgroundDrawable.draw(canvas); canvas.drawText(cells[yIndex][xIndex], x + cellCenterOffset, y + textOffset, cellPaint); y += rowHeight; yIndex++; } x += colWidth; xIndex++; } } protected void drawFixedRow(Canvas canvas) { canvas.clipRect(fixedColWidth, 0, width, fixedRowHeight, Op.REPLACE); int textOffsetX = colWidth / 2; int textOffsetY = getTextCenterOffset(fixedRowHeight, fixedRowPaint); int x = fixedColWidth + colWidth * leftCol - offsetX; int index = leftCol; while ((x < width) && (index < colCount)) { fixedBackgroundDrawable.setBounds(x, 0, x + colWidth, fixedRowHeight); fixedBackgroundDrawable.draw(canvas); canvas.drawText(fixedRow[index], x + textOffsetX, textOffsetY, fixedRowPaint); x += colWidth; index++; } } protected void drawFixedColumn(Canvas canvas) { canvas.clipRect(0, fixedRowHeight, fixedColWidth, height, Op.REPLACE); int textOffsetY = getTextCenterOffset(rowHeight, fixedColumnPaint); int y = rowHeight * topRow + fixedRowHeight - offsetY; int index = topRow; while ((y < height) && (index < rowCount)) { Rect bounds = new Rect(0, y, fixedColWidth, (y + rowHeight)); fixedBackgroundDrawable.setBounds(bounds); fixedBackgroundDrawable.draw(canvas); canvas.drawText(fixedColEllipsized[index], CELL_PADDING_X, y + textOffsetY , fixedColumnPaint); y += rowHeight; index++; } canvas.drawLine(fixedColWidth, 0, fixedColWidth, y, fixedColumnPaint); } /** * Returns the vertical offset for text to be centered in a line with * <code>lineHeight</code> height. * @param lineHeight * Height of line, in pixels. * @param paint * Paint defining text drawing parameters. * @return */ protected static int getTextCenterOffset(int lineHeight, Paint paint) { /* * Full form: * paint.descent() * - (paint.descent() - paint.ascent())/2 * + lineHeight/2; */ return (int)(lineHeight - paint.ascent() - paint.descent()) / 2; } public void setData(String[] fixedCol, String[] fixedRow, String[][] cells) { this.fixedCol = fixedCol; this.fixedRow = fixedRow; this.cells = cells; colCount = fixedRow.length; rowCount = fixedCol.length; if (cells.length != rowCount || cells[0].length != colCount) { throw new IllegalArgumentException("Table dimensions do not match"); } onLayout(true, 0, 0, width, height); postInvalidate(); } @Override public boolean onTouchEvent(MotionEvent event) { return gestureDetector.onTouchEvent(event); } private class GestureListener extends GestureDetector.SimpleOnGestureListener { @Override public boolean onScroll(MotionEvent e1, MotionEvent e2, float dx, float dy) { scrollBy((int) dx, (int) dy); return true; } @Override public boolean onDown(MotionEvent ev) { scroller.forceFinished(true); return true; } @Override public boolean onSingleTapUp(MotionEvent e) { Point colRow = coordsToCell((int) e.getX(), (int) e.getY()); if (colRow != null && onCellClickListener != null) { onCellClickListener.onCellClick(colRow.x, colRow.y); } return true; } @Override public boolean onFling(MotionEvent e1, MotionEvent e2, float velocityX, float velocityY) { // allow only single-axis fling, for user's convenience if (Math.abs(velocityX) < Math.abs(velocityY)) { velocityX = 0; } else { velocityY = 0; } scroller.fling(offsetX, offsetY, (int) -velocityX, (int) -velocityY, 0, maxOffsetX, 0, maxOffsetY); // It is necessary to call postInvalidate(), so that // computeScroll() will eventually get called. // (For some reason, simple invalidate() won't work.) postInvalidate(); return true; } } /** * Returns col/row number corresponding to the given graphical coords * (in this view system). For fixed row/col, the value is -1. * If the coords do not correspond to any cell, returns null. * @param x * X graphical coordinate within this view. * @param y * Y graphical coordinate within this view. * @return */ protected Point coordsToCell(int x, int y) { int col, row; if (y <= fixedRowHeight) { row = -1; } else { row = (y - fixedRowHeight + offsetY) / rowHeight; } if (x <= fixedColWidth) { col = -1; } else { col = (x - fixedColWidth + offsetX) / colWidth; } return new Point(col, row); } @Override public void computeScroll() { if (scroller.computeScrollOffset()) { setOffsetX(scroller.getCurrX()); setOffsetY(scroller.getCurrY()); // It is necessary to call postInvalidate(), so that // computeScroll() will be called again later. // (For some reason, simple invalidate() won't work.) postInvalidate(); } } @Override public void scrollTo(int x, int y) { setOffsetX(x); setOffsetY(y); postInvalidate(); } @Override public void scrollBy(int dx, int dy) { setOffsetX(offsetX + dx); setOffsetY(offsetY + dy); postInvalidate(); } protected void setOffsetX(int newOffsetX) { if (newOffsetX < 0) { offsetX = 0; } else if (newOffsetX > maxOffsetX) { offsetX = maxOffsetX; } else { offsetX = newOffsetX; } leftCol = offsetX / colWidth; } protected void setOffsetY(int newOffsetY) { if (newOffsetY < 0) { offsetY = 0; } else if (newOffsetY > maxOffsetY) { offsetY = maxOffsetY; } else { offsetY = newOffsetY; } topRow = offsetY / rowHeight; } @Override protected Parcelable onSaveInstanceState() { // super must be called, but always returns null super.onSaveInstanceState(); Bundle bundle = new Bundle(); bundle.putInt(STATE_OFFSET_X, offsetX); bundle.putInt(STATE_OFFSET_Y, offsetY); return bundle; } @Override protected void onRestoreInstanceState(Parcelable state) { super.onRestoreInstanceState(null); Bundle bundle = (Bundle)state; setOffsetX(bundle.getInt(STATE_OFFSET_X, 0)); setOffsetY(bundle.getInt(STATE_OFFSET_Y, 0)); } public void setOnSizeChangedListener(OnSizeChangedListener listener) { onSizeChangedListener = listener; } public void setOnCellClickListener(OnCellClickListener listener) { onCellClickListener = listener; } /* * Getters for descendant classes. */ protected int getRowHeight() { return rowHeight; } protected void setRowHeight(int rowHeight) { this.rowHeight = rowHeight; } protected int getFixedColWidth() { return fixedColWidth; } protected void setFixedColWidth(int width) { this.fixedColWidth = width; ellipsizeTexts(); } protected int getColWidth() { return colWidth; } protected void setColWidth(int width) { this.colWidth = width; } protected int getFixedRowHeight() { return fixedRowHeight; } protected void setFixedRowHeight(int height) { this.fixedRowHeight = height; } protected int getTopRow() { return topRow; } protected int getLeftCol() { return leftCol; } protected int getOffsetX() { return offsetX; } protected int getOffsetY() { return offsetY; } protected int getColCount() { return colCount; } protected int getRowCount() { return rowCount; } }
package com.axiastudio.zoefx.persistence; import com.axiastudio.zoefx.core.Utilities; import com.axiastudio.zoefx.core.beans.BeanAccess; import com.axiastudio.zoefx.core.beans.BeanClassAccess; import com.axiastudio.zoefx.core.db.Database; import com.axiastudio.zoefx.core.db.Manager; import javax.persistence.EntityManager; import javax.persistence.TypedQuery; import javax.persistence.criteria.*; import java.lang.annotation.Annotation; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.*; public class JPAManagerImpl<E> implements Manager<E> { private Class<E> entityClass; private EntityManager entityManager; public JPAManagerImpl(EntityManager em, Class<E> klass) { entityClass = klass; entityManager = em; } @Override public E create() { try { return entityClass.newInstance(); } catch (InstantiationException e) { e.printStackTrace(); } catch (IllegalAccessException e) { e.printStackTrace(); } return null; } @Override public Object createRow(String collectionName) { Database db = Utilities.queryUtility(Database.class); BeanClassAccess beanClassAccess = new BeanClassAccess(entityClass, collectionName); Class<?> genericReturnType = beanClassAccess.getGenericReturnType(); Manager<?> manager = db.createManager(genericReturnType); return manager.create(); } @Override public E commit(E entity) { parentize(entity); EntityManager em = getEntityManager(); em.getTransaction().begin(); E merged = em.merge(entity); em.getTransaction().commit(); return merged; } @Override public void commit(List<E> entities) { EntityManager em = getEntityManager(); em.getTransaction().begin(); for( E entity: entities ){ em.merge(entity); } em.getTransaction().commit(); } @Override public void delete(E entity) { EntityManager em = getEntityManager(); em.getTransaction().begin(); E merged = em.merge(entity); em.remove(merged); em.getTransaction().commit(); } @Override public void deleteRow(Object row) { EntityManager em = getEntityManager(); Object merged = em.merge(row); em.remove(merged); } @Override public void truncate(){ EntityManager em = getEntityManager(); em.getTransaction().begin(); em.createQuery("DELETE FROM " + entityClass.getCanonicalName() + " e").executeUpdate(); em.getTransaction().commit(); } @Override public E get(Long id) { EntityManager em = getEntityManager(); return em.find(entityClass, id); } @Override public List<E> getAll() { EntityManager em = getEntityManager(); CriteriaBuilder cb = em.getCriteriaBuilder(); CriteriaQuery<E> cq = cb.createQuery(entityClass); Root<E> root = cq.from(entityClass); cq.select(root); TypedQuery<E> query = em.createQuery(cq); List<E> store = query.getResultList(); return store; } @Override public List<E> query(Map<String, Object> map) { EntityManager em = getEntityManager(); CriteriaBuilder cb = em.getCriteriaBuilder(); CriteriaQuery<E> cq = cb.createQuery(entityClass); Root<E> root = cq.from(entityClass); List<Predicate> predicates = new ArrayList<>(); for( String name: map.keySet() ){ Predicate predicate=null; Path path = null; path = root.get(name); Object objectValue = map.get(name); if( objectValue instanceof String ){ String value = (String) objectValue; value = value.replace("*", "%"); if( !value.endsWith("%") ){ value += "%"; } predicate = cb.like(cb.upper(path), value.toUpperCase()); } else if( objectValue instanceof Boolean ){ predicate = cb.equal(path, objectValue); } else if( objectValue instanceof List ){ List<Date> range = (List<Date>) objectValue; Date from = zeroMilliseconds(range.get(0)); Date to = lastMillisecond(range.get(1)); predicate = cb.and(cb.greaterThanOrEqualTo(path, from), cb.lessThanOrEqualTo(path, to)); } else if( objectValue instanceof Object ){ if( objectValue.getClass().isEnum() ) { int value = ((Enum) objectValue).ordinal(); // XXX: and if EnumType.STRING?? predicate = cb.equal(path, value); } else { predicate = cb.equal(path, objectValue); } } if( predicate != null ){ predicates.add(predicate); } } cq.select(root); if( predicates.size()>0 ){ cq.where(cb.and(predicates.toArray(new Predicate[predicates.size()]))); } TypedQuery<E> query = em.createQuery(cq); List<E> store = query.getResultList(); return store; } private Date zeroMilliseconds(Date date) { Calendar calendar = Calendar.getInstance(); calendar.setTime(date); calendar.set(Calendar.HOUR_OF_DAY, 0); calendar.set(Calendar.MINUTE, 0); calendar.set(Calendar.SECOND, 0); calendar.set(Calendar.MILLISECOND, 0); return calendar.getTime(); } private Date lastMillisecond(Date date) { Calendar calendar = Calendar.getInstance(); calendar.setTime(date); calendar.set(Calendar.HOUR_OF_DAY, 23); calendar.set(Calendar.MINUTE, 59); calendar.set(Calendar.SECOND, 59); calendar.set(Calendar.MILLISECOND, 999); return calendar.getTime(); } private EntityManager getEntityManager() { return entityManager; } /* * The parentize method hooks the items of the collections to the parent * entity. */ private void parentize(E entity){ for(Field f: entityClass.getDeclaredFields()){ for( Annotation a: f.getAnnotations()){ // discover the OneToMany if( a.annotationType().equals(javax.persistence.OneToMany.class) ) { String name = f.getName(); BeanAccess<Collection> collectionBeanAccess = new BeanAccess<Collection>(entity, name); Collection collection = collectionBeanAccess.getValue(); if( collection != null && collection.size()>0 ){ // discover the "mapped by" foreign key String foreignKey=null; try { Method mappedBy = a.annotationType().getDeclaredMethod("mappedBy"); foreignKey = (String) mappedBy.invoke(a); } catch (NoSuchMethodException e) { e.printStackTrace(); } catch (InvocationTargetException e) { e.printStackTrace(); } catch (IllegalAccessException e) { e.printStackTrace(); } if( foreignKey != null ) { // parentize children for (Iterator it = collection.iterator(); it.hasNext(); ) { Object child = it.next(); BeanAccess<E> fkBeanAccess = new BeanAccess<>(child, foreignKey); fkBeanAccess.setValue(entity); } } } } } } } }
package com.github.obsidianarch.gvengine.core; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.lwjgl.Sys; import com.github.obsidianarch.gvengine.core.options.Option; import com.github.obsidianarch.gvengine.core.options.SliderOption; import com.github.obsidianarch.gvengine.core.options.ToggleOption; /** * Maintains a schedule of events and when they need to be executed. * * @author Austin */ public class Scheduler { // Options /** The maximum number of events dispatched every tick. */ @Option( description = "Maximum events", screenName = "Maximum events per tick", x = -1, y = -1 ) @SliderOption( minimum = 8, maximum = 4096 ) public static int MaxEvents = 16; /** When true, the timed events will be restricted to the {@code MaxEvents} as well. */ @Option( description = "Timed events throttled", screenName = "Timed Events Throttled", x = -1, y = -1 ) @ToggleOption( options = { "false", "true" }, descriptions = { "Disabled (recommended)", "Enabled" } ) public static boolean TimedEventsThrottled = false; @Option( description = "Log scheduling output messages", screenName = "Log Scheduling Output", x = -1, y = -1 ) @ToggleOption( options = { "false", "true" }, descriptions = { "Disabled", "Enabled" } ) public static boolean LogOutput = false; // Fields private static List< Event > recurringEvents = new ArrayList<>(); /** The list of events which have timers attached. */ private static List< Event > timedEvents = new ArrayList<>(); /** The list of events which have just been scheduled. (FIFO) */ private static List< Event > events = new ArrayList<>(); // Schedulers /** * Schedules an event to be performed every tick, this should be used sparingly or * have large delays, as these cannot be throttled to a maximum number of events per * tick. * * @param method * The name of the method to invoke. * @param target * The object to target. * @param delay * The time (in milliseconds) between the executions. * @param parameters * The parameters to pass to the method. */ public static void scheduleRecurringEvent( String method, Object target, long delay, Object... parameters ) { Event event = new Event(); try { if ( parameters != null ) { Class< ? >[] paramClasses = new Class< ? >[ parameters.length ]; for ( int i = 0; i < paramClasses.length; i++ ) { paramClasses[ i ] = parameters[ i ].getClass(); } event.action = target.getClass().getMethod( method, paramClasses ); } else { event.action = target.getClass().getMethod( method ); } } catch ( NoSuchMethodException e ) { e.printStackTrace(); } catch ( SecurityException e ) { System.err.println( "Scheduler cannot get method due to SecurityException" ); e.printStackTrace(); } event.target = target; event.delay = delay; event.executionTime = MathHelper.toTicks( Sys.getTime() ) + delay; event.parameters = parameters; recurringEvents.add( event ); } /** * Schedules an event {@code time} milliseconds in the future. So * {@code scheduleEvent( "aMethod", anObject, 5000, aParameter )} will * invoke {@code anObject.aMethod( aParameter )} in 5 seconds. * * @param method * The method to execute. * @param target * The object upon which the method will be invoked. * @param time * The time (in milliseconds) until the event will be fired (Use -1 when * the event doesn't have a time it needs to run by). * @param parameters * The parameters passed to the method when executed. */ public static void scheduleEvent( String method, Object target, long time, Object... parameters ) { Event event = new Event(); try { if ( parameters != null ) { Class< ? >[] paramClasses = new Class< ? >[ parameters.length ]; for ( int i = 0; i < paramClasses.length; i++ ) { paramClasses[ i ] = parameters[ i ].getClass(); } event.action = target.getClass().getMethod( method, paramClasses ); } else { event.action = target.getClass().getMethod( method ); } } catch ( NoSuchMethodException | SecurityException e1 ) { e1.printStackTrace(); // hopefully will never be thrown } event.target = target; if ( time == -1 ) { event.executionTime = -1; } else { event.executionTime = Sys.getTime() + MathHelper.toTicks( time ); } event.parameters = parameters; addEvent( event ); // add the event to the list } // Actions /** * Updates the scheduler, and fires events which need to be fired. */ public static void doTick() { int firedEvents = 0; // fire all of the recurring events for ( Event e : recurringEvents ) { if ( Sys.getTime() < e.executionTime ) continue; try { e.action.invoke( e.target, e.parameters ); // invoke the method if ( TimedEventsThrottled ) firedEvents++; } catch ( Exception ex ) { System.err.println( "Scheduler failed to invoke \"" + e.action.getName() + "()\"" ); ex.printStackTrace(); } e.executionTime = Sys.getTime() + MathHelper.toTicks( e.delay ); // set the next execution time } Iterator< Event > it = timedEvents.iterator(); // get the iterator for the timed events while ( it.hasNext() ) { Event e = it.next(); // get the next event // if the timed events are throttled too, then we may have to break out of the loop if ( TimedEventsThrottled && ( firedEvents >= MaxEvents ) ) break; // everything past this point will have a higher execution time than this one as well, and we haven't gotten to this time yet if ( e.executionTime > Sys.getTime() ) break; try { e.action.invoke( e.target, e.parameters ); // invoke the method if ( TimedEventsThrottled ) firedEvents++; } catch ( Exception ex ) { System.err.println( "Scheduler failed to invoke \"" + e.action.getName() + "()\"" ); ex.printStackTrace(); } System.out.println( "> Executed timed event \"" + e.action.getName() + "\"" ); it.remove(); // remove the iterated objects } it = events.iterator(); // get the iterator for the untimed events while ( it.hasNext() ) { Event e = it.next(); // get the next event // we've reached the max number of events we can fire for now if ( firedEvents >= MaxEvents ) break; try { e.action.invoke( e.target, e.parameters ); // invoke the method firedEvents++; } catch ( Exception ex ) { System.err.println( "Scheduler failed to invoke \"" + e.action.getName() + "()\"" ); ex.printStackTrace(); } System.out.println( "> Executed event \"" + e.action.getName() + "\"" ); it.remove(); // remove the iterated object } } /** * Adds the event into the correct collection, and if it's timed adds it into the list * at the correct position. * * @param e * The event to schedule. */ private static void addEvent( Event e ) { try { // if there is not timed constraint if ( e.executionTime == -1 ) { events.add( e ); return; } // if there are no prescheduled timed events, then add one in if ( timedEvents.size() == 0 ) { timedEvents.add( e ); return; } // add the timed event into the list at the correct position, based on the surronding items for ( int i = 0; i < timedEvents.size(); i++ ) { Event event = timedEvents.get( i ); // get the event from the list if ( event.executionTime > e.executionTime ) { // this event is scheduled later than this one timedEvents.add( i, e ); // insert the timed event into the list return; // we've added it, no reason to continue doing so } } timedEvents.add( e ); // add the event to the very end of the queue } finally { if ( LogOutput ) { if ( e.executionTime == -1 ) { System.out.println( "> Scheduled " + e.action.getName() ); } else { System.out.println( "> Scheduled " + e.action.getName() + " for " + e.executionTime ); } } } } // Getters /** * @return The total number of events scheduled that need to be executed at this time * (timed and untimed). */ public static int getEventCount() { return events.size() + timedEvents.size(); } // Nested Classes /** * A container for the method which will be executed, the class which will be * executing the method, and the time at which the method will be fired. * * @author Austin */ private static class Event { /** The action for this event. */ public Method action; /** The executor. */ public Object target; /** The delay between executions (only used for recurring events). */ public long delay; /** The time this event should be executed at. -1 if there is not timing priority. */ public long executionTime; /** The parameters passed to the methods. */ public Object[] parameters; } }
package net.acomputerdog.core.tree; /** * A "leaf" on a tree. A leaf is a branch that has data and no children * * @param <T> */ public class Leaf<T> { /** * Thee data on this leaf */ private T item; /** * The branch that this leaf is on */ private final Branch branch; /** * Creates a new Leaf * @param branch The branch that this leaf is attached to * @param item The item this leaf holds */ public Leaf(Branch branch, T item) { this.branch = branch; this.item = item; } /** * Creates a new leaf with no item * * @param branch The branch that this leaf is attached to */ public Leaf(Branch branch) { this(branch, null); } /** * Gets the item held by this leaf * @return return the item */ public T getItem() { return item; } /** * Sets the item held by this leaf * @param item Set the item */ public void setItem(T item) { this.item = item; } /** * Gets the branch this leaf is on * @return return the branch of this leaf */ public Branch getBranch() { return branch; } }
package com.jcwhatever.nucleus.utils.text; import com.jcwhatever.nucleus.utils.PreCon; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; /** * Replaces tags in text. * * <p>Tags consist of enclosing curly braces with tag text inside.</p> * * <p>Numbers are used to mark the index of the parameter that should be * placed where the tag is. ie {0} is parameter 1 (index 0)</p> * * <p>{@link TextColor} constant names are automatically added as tags and * replaced with the equivalent color code. ie {RED} is replaced with the * Minecraft color code for red.</p> * * <p>Tags that don't match a parameter index or other defined tag formatter * are ignored and added as is.</p> * * <p>Comments can be added to tags by inserting a colon. Useful for including * the purpose of the format tag for documentation purposes. * ie {0: This part of the tag is a comment and is ignored}</p> * */ public class TextFormatter { private static Map<String, ITagFormatter> _colors = new HashMap<>(TextColor.values().length); static { for (final TextColor color : TextColor.values()) { _colors.put(color.name(), new ITagFormatter() { @Override public String getTag() { return color.name(); } @Override public void append(StringBuilder sb, String tag) { sb.append(color.getColorCode()); } }); } } private final Map<String, ITagFormatter> _formatters = new HashMap<>(20); private final StringBuilder _textBuffer = new StringBuilder(100); private final StringBuilder _tagBuffer = new StringBuilder(25); /** * Constructor. */ public TextFormatter(){} /** * Constructor. * * @param formatters A collection of formatters to include. */ public TextFormatter(Collection<? extends ITagFormatter> formatters) { for (ITagFormatter formatter : formatters) { _formatters.put(formatter.getTag(), formatter); } } /** * Constructor. * * @param formatters The formatters to include. */ public TextFormatter(ITagFormatter... formatters) { for (ITagFormatter formatter : formatters) { _formatters.put(formatter.getTag(), formatter); } } /** * Get a default tag formatter by case sensitive tag. * * @param tag The tag text. */ public ITagFormatter getFormatter(String tag) { return _formatters.get(tag); } /** * Get all default tag formatters. */ public List<ITagFormatter> getFormatters() { return new ArrayList<>(_formatters.values()); } /** * Remove a default tag formatter. * * @param tag The tag to remove. */ public void removeFormatter(String tag) { _formatters.remove(tag); } /** * Add a default tag formatter. * * @param formatter The tag formatter to add. */ public void addFormatter(ITagFormatter formatter) { _formatters.put(formatter.getTag(), formatter); } /** * Format text. * * @param template The template text. * @param params The parameters to add. * * @return The formatted string. */ public String format(String template, Object... params) { PreCon.notNull(template); return format(_formatters, template, params); } /** * Format text using a custom set of formatters. * * @param formatters The formatter map to use. * @param template The template text. * @param params The parameters to add. * * @return The formatted string. */ public String format(Map<String, ITagFormatter> formatters, String template, Object... params) { PreCon.notNull(template); if (template.indexOf('{') == -1 && template.indexOf('\\') == -1) return template; _textBuffer.setLength(0); for (int i=0; i < template.length(); i++) { char ch = template.charAt(i); // check for tag opening if (ch == '{') { // parse tag String tag = parseTag(template, i); // update index position i += _tagBuffer.length(); // template ended before tag was closed if (tag == null) { _textBuffer.append('{'); _textBuffer.append(_tagBuffer); } // tag parsed else { i++; // add 1 for closing brace appendReplacement(_textBuffer, tag, params, formatters); } } else if (ch == '\\' && i < template.length() - 1) { // make sure the backslash isn't escaped int s = i; int bsCount = 0; while (s != 0) { if (template.charAt(s - 1) == '\\') { bsCount++; } else { break; } s } if (bsCount % 2 != 0) continue; // look at next character char next = template.charAt(i + 1); // handle new line character if (next == 'n' || next == 'r') { _textBuffer.append('\n'); i++; } // handle unicode else if (next == 'u') { i++; char unicode = parseUnicode(template, i); if (unicode == 0) { // append non unicode text _textBuffer.append("\\u"); } else { _textBuffer.append(unicode); i+= Math.min(4, template.length() - i); } } // unused backslash else { _textBuffer.append(ch); } } else { // append next character _textBuffer.append(ch); } } return _textBuffer.toString(); } /** * Parse a unicode character from the string */ private char parseUnicode(String template, int currentIndex) { _tagBuffer.setLength(0); for (int i=currentIndex + 1, readCount=0; i < template.length(); i++, readCount++) { if (readCount == 4) { break; } else { char ch = template.charAt(i); if ("01234567890abcdefABCDEF".indexOf(ch) == -1) return 0; _tagBuffer.append(ch); } } if (_tagBuffer.length() == 4) { try { return (char) Integer.parseInt(_tagBuffer.toString(), 16); } catch (NumberFormatException ignore) { return 0; } } return 0; } /* * Parse a single tag from the template */ private String parseTag(String template, int currentIndex) { _tagBuffer.setLength(0); for (int i=currentIndex + 1; i < template.length(); i++) { char ch = template.charAt(i); if (ch == '}') { return _tagBuffer.toString(); } else { _tagBuffer.append(ch); } } return null; } /* * Append replacement text for a tag */ private void appendReplacement(StringBuilder sb, String tag, Object[] params, Map<String, ITagFormatter> formatters) { boolean isNumber = !tag.isEmpty(); _tagBuffer.setLength(0); // parse out tag from comment section for (int i=0; i < tag.length(); i++) { char ch = tag.charAt(i); // done at comment character if (ch == ':') { break; } // append next tag character else { _tagBuffer.append(ch); // check if the character is a number if (isNumber && !Character.isDigit(ch)) { isNumber = false; } } } String parsedTag = _tagBuffer.toString(); if (isNumber) { int index = Integer.parseInt(parsedTag); // make sure number is in the range of the provided parameters. if (params.length <= index) { reappendTag(sb, tag); } // replace number with parameter argument. else { String toAppend = String.valueOf(params[index]); String lastColors = null; // make sure colors from inserted text do not continue // into template text if (toAppend.indexOf(TextColor.FORMAT_CHAR) != -1) { lastColors = TextColor.getEndColor(sb); } // append parameter argument sb.append(params[index]); // append template color if (lastColors != null && !lastColors.isEmpty()) { sb.append(lastColors); } } } else { // check for custom formatter ITagFormatter formatter = formatters.get(parsedTag); if (formatter == null) { // check for color formatter formatter = _colors.get(parsedTag); } if (formatter != null) { // formatter appends replacement text to format buffer formatter.append(sb, tag); } else { // no formatter, append tag to result buffer reappendTag(sb, tag); } } } /* * Append raw tag to string builder */ private void reappendTag(StringBuilder sb, String tag) { sb.append('{'); sb.append(tag); sb.append('}'); } /** * Defines a format tag. */ public static interface ITagFormatter { /** * Get the format tag. */ String getTag(); /** * Append replacement text into the provided * string builder. The parsed tag is provided for reference. * * @param sb The string builder to append to. * @param rawTag The tag that was parsed. */ void append(StringBuilder sb, String rawTag); } }
package org.jasig.portal.services; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.log4j.Priority; /** * As of uPortal 2.4, use Apache Commons Logging directly instead of using this * LogService. This LogService is retained here for backwards compatibility, * and presumably will disappear in a future release. * * The LogService is a service offered by the uPortal framework whereby messages * can be logged. Each uPortal deployment can customize exactly where how and * how much they want logged. As of uPortal 2.4, this is accomplished by this class * delegating all logging to Apache Commons Logging. The expected typical (and * default) local logging configuration is to use the Logger.properties file to configure * Log4J as the underlying logging implementation, to a file on disk. However, the * options are endless under Log4J: you can configure it to log as XML, to log to a tree * of files depending upon where the logging message is coming from and at what * logging level, to send log messages over the network to a Chainsaw instance listening to * your uPortal... Furthermore, you don't even have to use Log4J: Commons Logging supports * JDK1.4 logging as well as the ability for you to plug in a custom logging implementation if * you really want to. * * @author Ken Weiner, Bernie Durfee, Vikrant Joshi, Zed A. Shaw, andrew.petro@yale.edu * @version $Revision$ $Date$ * @deprecated As of uPortal 2.4, please use Apache Commons Logging directly */ public final class LogService { // Log levels, create fake ones if they don't match the Log4J standard ones public final static Priority NONE = Priority.DEBUG; public final static Priority SEVERE = Priority.FATAL; public final static Priority ERROR = Priority.ERROR; public final static Priority WARN = Priority.WARN; public final static Priority INFO = Priority.INFO; public final static Priority DEBUG = Priority.DEBUG; private static final Log log = LogFactory.getLog("org.jasig.portal"); private static final LogService m_instance = new LogService(); static{ initialize(); } protected LogService () { initialize(); } public final static LogService instance () { return (m_instance); } /** * Used to configure Log4J with the Logger.properties file. * Now does nothing, as the build.xml now copies the Logger.properties file * to the well known name (log4j.properties) and location (base of the classpath * by virtue of being in the base of the WEB-INF/classes/ directory) expected * by Log4J. * @deprecated no longer does anything */ private final static void initialize () { /* * No longer does anything. * The build.xml compile task copies the Logger.properties file * to the well known name and location Log4J expects. */ } public final static void log (Priority pLogLevel, String sMessage) { initialize(); if (pLogLevel == null){ log.fatal(sMessage); } else if (pLogLevel.equals(SEVERE)){ log.fatal(sMessage); } else if (pLogLevel.equals(ERROR)){ log.error(sMessage); } else if (pLogLevel.equals(WARN)){ log.warn(sMessage); } else if (pLogLevel.equals(INFO)){ log.info(sMessage); } else if (pLogLevel.equals(DEBUG)){ log.debug(sMessage); } else if (pLogLevel.equals(NONE)){ log.debug(sMessage); } else { // should never get here log.fatal(sMessage); } } /** * Log the given throwable at the given priority. * @param pLogLevel - logging level * @param ex - throwable to be logged */ public final static void log (Priority pLogLevel, Throwable ex) { initialize(); if (pLogLevel == null){ log.fatal(ex, ex); } else if (pLogLevel.equals(SEVERE)){ log.fatal(ex, ex); } else if (pLogLevel.equals(ERROR)){ log.error(ex, ex); } else if (pLogLevel.equals(WARN)){ log.warn(ex, ex); } else if (pLogLevel.equals(INFO)){ log.info(ex, ex); } else if (pLogLevel.equals(DEBUG)){ log.debug(ex, ex); } else if (pLogLevel.equals(NONE)){ log.debug(ex, ex); } else { // should never get here log.fatal("Unrecognized logging level " + pLogLevel, ex); } } public final static void log (Priority pLogLevel, String sMessage, Throwable ex) { initialize(); if (pLogLevel == null){ log.fatal(sMessage, ex); }else if (pLogLevel.equals(SEVERE)){ log.fatal(sMessage, ex); } else if (pLogLevel.equals(ERROR)){ log.error(sMessage, ex); } else if (pLogLevel.equals(WARN)){ log.warn(sMessage, ex); } else if (pLogLevel.equals(INFO)){ log.info(sMessage, ex); } else if (pLogLevel.equals(DEBUG)){ log.debug(sMessage, ex); } else if (pLogLevel.equals(NONE)){ log.debug(sMessage, ex); } else { // Should never get here. log.fatal(sMessage, ex); } } public final static void log (String sMessage) { initialize(); log.info(sMessage); } }
package com.maddyhome.idea.vim.ex.handler; import com.intellij.openapi.actionSystem.DataContext; import com.intellij.openapi.editor.Caret; import com.intellij.openapi.editor.Editor; import com.maddyhome.idea.vim.VimPlugin; import com.maddyhome.idea.vim.common.Register; import com.maddyhome.idea.vim.ex.*; import com.maddyhome.idea.vim.group.MotionGroup; import com.maddyhome.idea.vim.handler.CaretOrder; import org.jetbrains.annotations.NotNull; public class RepeatHandler extends CommandHandler { public RepeatHandler() { super(new CommandName[]{new CommandName("@", "")}, RANGE_OPTIONAL | ARGUMENT_REQUIRED | DONT_SAVE_LAST, true, CaretOrder.DECREASING_OFFSET); } public boolean execute(@NotNull Editor editor, @NotNull Caret caret, @NotNull DataContext context, @NotNull ExCommand cmd) throws ExException { char arg = cmd.getArgument().charAt(0); if (arg == '@') arg = lastArg; lastArg = arg; final int line = cmd.getLine(editor, caret, context); MotionGroup.moveCaret(editor, caret, VimPlugin.getMotion().moveCaretToLine(editor, line)); if (arg == ':') { return CommandParser.getInstance().processLastCommand(editor, context, 1); } final Register reg = VimPlugin.getRegister().getPlaybackRegister(arg); if (reg == null) return false; final String text = reg.getText(); if (text == null) return false; CommandParser.getInstance().processCommand(editor, context, text, 1); return true; } private char lastArg = ':'; }
package com.miviclin.droidengine2d.screen; import java.util.ArrayList; import android.util.SparseArray; import com.miviclin.droidengine2d.Game; import com.miviclin.droidengine2d.graphics.Graphics; /** * ScreenManager. * * @author Miguel Vicente Linares * */ public class ScreenManager { private SparseArray<Screen> screens; private Screen activeScreen; private ArrayList<OnScreenChangeListener> onScreenChangeListeners; /** * Constructor. */ public ScreenManager() { this(16); } /** * Constructor. * * @param initialCapacity Initial capacity for Screens. If this capacity is reached, the data structure that holds * the Screens will be resized automatically. */ public ScreenManager(int initialCapacity) { this.screens = new SparseArray<Screen>(initialCapacity); this.activeScreen = null; this.onScreenChangeListeners = new ArrayList<OnScreenChangeListener>(); } /** * Registers a Screen in this ScreenManager using the specified screenId.<br> * If a Screen with the specified screenId was previously registered in this ScreenManager, it will be replaced by * the new one.<br> * The active Screen will not change. * * @param screenId Identifier of the Screen. It can be used to get the Screen from this ScreenManager later. * @param screen Screen (can not be null). */ public void registerScreen(int screenId, Screen screen) { registerScreen(screenId, screen, false); } /** * Registers a Screen in this ScreenManager using the specified screenId.<br> * If a Screen with the specified screenId was previously registered in this ScreenManager, it will be replaced by * the new one. * * @param screenId Identifier of the Screen. It can be used to get the Screen from this ScreenManager later. * @param screen Screen (can not be null). * @param activate true to make the Screen the active Screen of this ScreenManager. */ public void registerScreen(int screenId, Screen screen, boolean activate) { if (screen == null) { throw new IllegalArgumentException("The Screen can not be null"); } screens.put(screenId, screen); screen.onRegister(); if (activate) { setActiveScreen(screenId); } } /** * Unregisters the specified Screen from this ScreenManager.<br> * If a Screen was registered with the specified screenId, {@link Screen#onDispose()} is called on the Screen before * it is removed from this ScreenManager. * * @param screenId Identifier of the Screen. * @return Removed Screen or null */ public Screen unregisterScreen(int screenId) { Screen screen = screens.get(screenId); if (screen == activeScreen) { screen.onDeactivation(); activeScreen = null; } if (screen != null) { screen.dispose(); screens.remove(screenId); } return screen; } /** * Returns the Screen associated with the specified screenId. * * @param screenId Identifier of the Screen. * @return Screen or null */ public Screen getScreen(int screenId) { return screens.get(screenId); } /** * Returns the active Screen of this ScreenManager. * * @return Screen or null */ public Screen getActiveScreen() { return activeScreen; } /** * Sets the active Screen of this ScreenManager.<br> * The Screen must have been previously registered with the specified screenId. * * @param screenId Identifier of the Screen we want to set as the active Screen. */ public void setActiveScreen(int screenId) { if (activeScreen != null) { activeScreen.onDeactivation(); } Screen screen = screens.get(screenId); if (activeScreen != screen) { dispatchOnScreenChangeEvent(activeScreen, screen); } this.activeScreen = screen; if (activeScreen != null) { activeScreen.onActivation(); } } /** * Notifies all listeners that the Screen has changed. * * @param previousScreen Previous Screen. * @param currentScreen Current Screen. */ private void dispatchOnScreenChangeEvent(Screen previousScreen, Screen currentScreen) { for (int i = 0; i < onScreenChangeListeners.size(); i++) { onScreenChangeListeners.get(i).onScreenChange(previousScreen, currentScreen); } } /** * Adds an OnScreenChangeListener that will be notified when the active Screen of this ScreenManager changes. * * @param listener Listener to be added. */ public void addOnScreenChangeListener(OnScreenChangeListener listener) { onScreenChangeListeners.add(listener); } /** * Removes an OnScreenChangeListener from the list of listeners. The removed listener will not be notified anymore * when the active Screen of this ScreenManager changes. * * @param listener Listener to be removed. */ public void removeOnScreenChangeListener(OnScreenChangeListener listener) { onScreenChangeListeners.remove(listener); } /** * This method is called when the engine is paused, usually when the activity goes to background.<br> * Calls {@link Screen#onPause()} on the active Screen. */ public void pause() { if (activeScreen != null) { activeScreen.onPause(); } } /** * This method is called when the engine is resumed, usually when the activity comes to foreground.<br> * Calls {@link Screen#onResume()} on the active Screen. */ public void resume() { if (activeScreen != null) { activeScreen.onResume(); } } /** * Calls {@link Screen#onDispose()} on all Screens registered in this ScreenManager and removes them from the * ScreenManager.<br> * This ScreenManager will be left empty. */ public void dispose() { int numScreens = screens.size(); for (int i = 0; i < numScreens; i++) { Screen screen = screens.valueAt(i); if (screen != null) { screen.dispose(); } } screens.clear(); activeScreen = null; } /** * Calls {@link Screen#update(float)} on the active Screen .<br> * This method is called from {@link Game#update(float)}. * * @param delta Elapsed time, in milliseconds, since the last update. */ public void update(float delta) { if (activeScreen != null) { activeScreen.getInputManager().processInput(); activeScreen.update(delta); } } /** * Calls {@link Screen#draw(Graphics)} on the active Screen.<br> * This method is called from {@link Game#draw(Graphics)}.<br> * This method is called from the redering thread after {@link ScreenManager#update(float)} has been executed in the * game thread. */ public void draw(Graphics graphics) { if (activeScreen != null) { activeScreen.draw(graphics); } } }
package com.ra4king.opengl.util.render; import static org.lwjgl.opengl.GL11.*; import static org.lwjgl.opengl.GL15.*; import static org.lwjgl.opengl.GL20.*; import java.nio.FloatBuffer; import java.util.function.Supplier; import org.lwjgl.BufferUtils; import com.ra4king.opengl.util.ShaderProgram; import com.ra4king.opengl.util.Utils; import com.ra4king.opengl.util.math.Matrix4; import com.ra4king.opengl.util.math.Vector4; import net.indiespot.struct.cp.Struct; import net.indiespot.struct.cp.TakeStruct; /** * @author Roi Atalla */ public class PerformanceGraph { private Supplier<? extends Number> stepValueSupplier; private float maxValue; private int x, y; private int width, height; private int maxSteps; private int stepWidth; private Vector4 color = Struct.malloc(Vector4.class); private static ShaderProgram uiProgram; private Matrix4 projectionMatrix; private int vbo, vao; private FloatBuffer graphData; private int graphOffset; private int stepCount; /** * Creates a PerformanceGraph at location (x,y) with size (maxSteps*stepWidth, graphHeight). * The origin (0,0) is at the top left corner of the window. * * @param stepValueSupplier The Supplier of the values of each step * @param maxValue The value to represent as 100% * @param x The left side of the graph * @param y The top of the graph * @param maxSteps The number of steps in the graph * @param stepWidth The size (in pixels) of each step * @param graphHeight The height of the graph * @param color The color of the graph */ public PerformanceGraph(float maxValue, int x, int y, int maxSteps, int stepWidth, int graphHeight, Vector4 color, Supplier<? extends Number> stepValueSupplier) { this.stepValueSupplier = stepValueSupplier; setMaxValue(maxValue); setX(x); setY(y); this.width = maxSteps * stepWidth; this.height = graphHeight; this.maxSteps = maxSteps; this.stepWidth = stepWidth; init(); this.setColor(color); } @Override protected void finalize() throws Throwable { try { Struct.free(color); } finally { super.finalize(); } } private static void initProgram() { uiProgram = new ShaderProgram(Utils.readFully(PerformanceGraph.class.getResourceAsStream(RenderUtils.SHADERS_PATH + "perf_graph.vert")), Utils.readFully(PerformanceGraph.class.getResourceAsStream(RenderUtils.SHADERS_PATH + "perf_graph.frag"))); } private void init() { if(uiProgram == null) initProgram(); vao = RenderUtils.glGenVertexArrays(); RenderUtils.glBindVertexArray(vao); float[] graph = { getX(), getY(), getX(), getY() + getHeight(), getX(), getY(), getX() + getWidth(), getY() }; graphOffset = graph.length; graphData = BufferUtils.createFloatBuffer(getMaxSteps() * 2); stepCount = 0; vbo = glGenBuffers(); glBindBuffer(GL_ARRAY_BUFFER, vbo); glBufferData(GL_ARRAY_BUFFER, (graphOffset + graphData.capacity()) * Float.BYTES, GL_STREAM_DRAW); glBufferSubData(GL_ARRAY_BUFFER, 0, (FloatBuffer)BufferUtils.createFloatBuffer(graph.length).put(graph).flip()); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 2, GL_FLOAT, false, 0, 0); glBindBuffer(GL_ARRAY_BUFFER, 0); RenderUtils.glBindVertexArray(0); projectionMatrix = new Matrix4().clearToOrtho(0, RenderUtils.getWidth(), 0, RenderUtils.getHeight(), 0, 1); } public int getX() { return x; } public void setX(int x) { this.x = x; } public int getY() { return y; } public void setY(int y) { this.y = y; } public float getMaxValue() { return maxValue; } public void setMaxValue(float maxValue) { this.maxValue = maxValue; } @TakeStruct public Vector4 getColor() { return color; } public void setColor(Vector4 color) { this.color.set(color); } public int getWidth() { return width; } public int getHeight() { return height; } public int getMaxSteps() { return maxSteps; } public int getStepWidth() { return stepWidth; } private long elapsedTime; public void update(long deltaTime) { elapsedTime += deltaTime; while(elapsedTime >= 1e9) { elapsedTime -= 1e9; graphData.clear(); if(stepCount < getMaxSteps()) { stepCount++; } for(int a = stepCount * 2 - 2; a >= 2; a -= 2) { graphData.put(a, graphData.get(a - 2) - getStepWidth()); graphData.put(a + 1, graphData.get(a - 1)); } float stepHeight = height * stepValueSupplier.get().floatValue() / getMaxValue(); if(Float.isNaN(stepHeight)) stepHeight = 0; graphData.put(0, getX() + getWidth() - 1); graphData.put(1, getY() + stepHeight); glBindBuffer(GL_ARRAY_BUFFER, vbo); glBufferSubData(GL_ARRAY_BUFFER, graphOffset * Float.BYTES, graphData); glBindBuffer(GL_ARRAY_BUFFER, 0); } } public void render() { glDisable(GL_DEPTH_TEST); glDisable(GL_CULL_FACE); glDisable(GL_BLEND); uiProgram.begin(); glUniform4(uiProgram.getUniformLocation("color"), color.toBuffer()); glUniformMatrix4(uiProgram.getUniformLocation("projectionMatrix"), false, projectionMatrix.toBuffer()); RenderUtils.glBindVertexArray(vao); glDrawArrays(GL_LINES, 0, graphOffset / 2); glDrawArrays(GL_LINE_STRIP, graphOffset / 2, stepCount); RenderUtils.glBindVertexArray(0); uiProgram.end(); glEnable(GL_BLEND); glEnable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); } }
package com.redhat.ceylon.compiler.js; import static java.lang.Character.toUpperCase; import java.io.IOException; import java.io.Writer; import java.util.List; import com.redhat.ceylon.compiler.typechecker.model.Declaration; import com.redhat.ceylon.compiler.typechecker.model.Functional; import com.redhat.ceylon.compiler.typechecker.model.Method; import com.redhat.ceylon.compiler.typechecker.tree.NaturalVisitor; import com.redhat.ceylon.compiler.typechecker.tree.Node; import com.redhat.ceylon.compiler.typechecker.tree.Tree; import com.redhat.ceylon.compiler.typechecker.tree.Tree.AnnotationList; import com.redhat.ceylon.compiler.typechecker.tree.Tree.AssignOp; import com.redhat.ceylon.compiler.typechecker.tree.Tree.AttributeDeclaration; import com.redhat.ceylon.compiler.typechecker.tree.Tree.AttributeGetterDefinition; import com.redhat.ceylon.compiler.typechecker.tree.Tree.AttributeSetterDefinition; import com.redhat.ceylon.compiler.typechecker.tree.Tree.BaseMemberExpression; import com.redhat.ceylon.compiler.typechecker.tree.Tree.BaseTypeExpression; import com.redhat.ceylon.compiler.typechecker.tree.Tree.Block; import com.redhat.ceylon.compiler.typechecker.tree.Tree.Body; import com.redhat.ceylon.compiler.typechecker.tree.Tree.CharLiteral; import com.redhat.ceylon.compiler.typechecker.tree.Tree.ClassDefinition; import com.redhat.ceylon.compiler.typechecker.tree.Tree.CompilationUnit; import com.redhat.ceylon.compiler.typechecker.tree.Tree.ExecutableStatement; import com.redhat.ceylon.compiler.typechecker.tree.Tree.Expression; import com.redhat.ceylon.compiler.typechecker.tree.Tree.FloatLiteral; import com.redhat.ceylon.compiler.typechecker.tree.Tree.Identifier; import com.redhat.ceylon.compiler.typechecker.tree.Tree.ImportPath; import com.redhat.ceylon.compiler.typechecker.tree.Tree.InterfaceDefinition; import com.redhat.ceylon.compiler.typechecker.tree.Tree.InvocationExpression; import com.redhat.ceylon.compiler.typechecker.tree.Tree.MethodDeclaration; import com.redhat.ceylon.compiler.typechecker.tree.Tree.MethodDefinition; import com.redhat.ceylon.compiler.typechecker.tree.Tree.NamedArgument; import com.redhat.ceylon.compiler.typechecker.tree.Tree.NamedArgumentList; import com.redhat.ceylon.compiler.typechecker.tree.Tree.NaturalLiteral; import com.redhat.ceylon.compiler.typechecker.tree.Tree.ObjectDefinition; import com.redhat.ceylon.compiler.typechecker.tree.Tree.Outer; import com.redhat.ceylon.compiler.typechecker.tree.Tree.Parameter; import com.redhat.ceylon.compiler.typechecker.tree.Tree.ParameterList; import com.redhat.ceylon.compiler.typechecker.tree.Tree.PositionalArgument; import com.redhat.ceylon.compiler.typechecker.tree.Tree.PositionalArgumentList; import com.redhat.ceylon.compiler.typechecker.tree.Tree.QualifiedMemberExpression; import com.redhat.ceylon.compiler.typechecker.tree.Tree.QualifiedTypeExpression; import com.redhat.ceylon.compiler.typechecker.tree.Tree.Return; import com.redhat.ceylon.compiler.typechecker.tree.Tree.SequencedArgument; import com.redhat.ceylon.compiler.typechecker.tree.Tree.SpecifierStatement; import com.redhat.ceylon.compiler.typechecker.tree.Tree.Statement; import com.redhat.ceylon.compiler.typechecker.tree.Tree.StringLiteral; import com.redhat.ceylon.compiler.typechecker.tree.Tree.SumOp; import com.redhat.ceylon.compiler.typechecker.tree.Tree.Super; import com.redhat.ceylon.compiler.typechecker.tree.Tree.This; import com.redhat.ceylon.compiler.typechecker.tree.Visitor; public class GenerateJsVisitor extends Visitor implements NaturalVisitor { private final Writer out; public GenerateJsVisitor(Writer out) { this.out = out; } private void out(String code) { try { out.write(code); } catch (IOException ioe) { ioe.printStackTrace(); } } int indentLevel = 0; private void indent() { for (int i=0;i<indentLevel;i++) { out(" "); } } private void endLine() { out("\n"); indent(); } private void beginBlock() { indentLevel++; out("{"); endLine(); } private void endBlock() { indentLevel endLine(); out("}"); endLine(); } private void location(Node node) { out(" at "); out(node.getUnit().getFilename()); out(" ("); out(node.getLocation()); out(")"); } @Override public void visit(CompilationUnit that) { out("var $ceylon$language=require('"); for (int i=0; i<that.getUnit().getPackage().getName().size(); i++) { out("../"); } //TODO fix hardcoded path! out("../runtime/ceylon.language.js');"); endLine(); super.visit(that); try { out.flush(); } catch (IOException ioe) { ioe.printStackTrace(); } } @Override public void visit(ImportPath that) { out("var "); for (Identifier id: that.getIdentifiers()) { out("$"); out(id.getText()); } out("=require('./"); for (int i=0; i<that.getUnit().getPackage().getName().size(); i++) { out("../"); } for (Identifier id: that.getIdentifiers()) { out(id.getText()); out("/"); } for (Identifier id: that.getIdentifiers()) { out(id.getText()); out("."); } out("js');"); endLine(); } @Override public void visit(Parameter that) { out(that.getDeclarationModel().getName()); } @Override public void visit(ParameterList that) { out("("); boolean first=true; for (Parameter param: that.getParameters()) { if (!first) out(","); out(param.getDeclarationModel().getName()); first = false; } out(")"); } @Override public void visit(Body that) { List<Statement> stmnts = that.getStatements(); for (int i=0; i<stmnts.size(); i++) { Statement s = stmnts.get(i); s.visit(this); if (s instanceof ExecutableStatement) { endLine(); } } } @Override public void visit(Block that) { List<Statement> stmnts = that.getStatements(); if (stmnts.isEmpty()) { out("{}"); endLine(); } else { beginBlock(); for (int i=0; i<stmnts.size(); i++) { Statement s = stmnts.get(i); s.visit(this); if (i<stmnts.size()-1 && s instanceof ExecutableStatement) { endLine(); } } endBlock(); } } @Override public void visit(InterfaceDefinition that) { endLine(); out("//interface "); out(that.getDeclarationModel().getName()); location(that); endLine(); out("function "); out(that.getDeclarationModel().getName()); out("()"); beginBlock(); out("var "); self(that.getDeclarationModel()); out("=new CeylonObject();"); endLine(); if (that.getSatisfiedTypes()!=null) for (Tree.SimpleType st: that.getSatisfiedTypes().getTypes()) { out("var $super"); out(st.getDeclarationModel().getName()); out("="); out(st.getDeclarationModel().getName()); out("();"); endLine(); out("for(var $m in $super"); out(st.getDeclarationModel().getName()); out("){"); self(that.getDeclarationModel()); out("[$m]=$super"); out(st.getDeclarationModel().getName()); out("[$m]}"); endLine(); } that.getInterfaceBody().visit(this); out("return "); self(that.getDeclarationModel()); out(";"); endBlock(); if (that.getDeclarationModel().isShared()) { outerSelf(that.getDeclarationModel()); out("."); out(that.getDeclarationModel().getName()); out("="); out(that.getDeclarationModel().getName()); out(";"); endLine(); } } @Override public void visit(ClassDefinition that) { endLine(); out("//class "); out(that.getDeclarationModel().getName()); location(that); endLine(); out("function "); out(that.getDeclarationModel().getName()); that.getParameterList().visit(this); beginBlock(); out("var "); self(that.getDeclarationModel()); out("=new CeylonObject();"); endLine(); if (that.getExtendedType()!=null) { out("var $super="); out(that.getExtendedType().getType() .getDeclarationModel().getName()); that.getExtendedType().getInvocationExpression().visit(this); out(";"); endLine(); out("for(var $m in $super){"); self(that.getDeclarationModel()); out("[$m]=$super[$m]}"); endLine(); } if (that.getSatisfiedTypes()!=null) for (Tree.SimpleType st: that.getSatisfiedTypes().getTypes()) { out("var $super"); out(st.getDeclarationModel().getName()); out("="); out(st.getDeclarationModel().getName()); out("();"); endLine(); out("for(var $m in $super"); out(st.getDeclarationModel().getName()); out("){"); self(that.getDeclarationModel()); out("[$m]=$super"); out(st.getDeclarationModel().getName()); out("[$m]}"); endLine(); } that.getClassBody().visit(this); out("return "); self(that.getDeclarationModel()); out(";"); endBlock(); if (that.getDeclarationModel().isShared()) { outerSelf(that.getDeclarationModel()); out("."); out(that.getDeclarationModel().getName()); out("="); out(that.getDeclarationModel().getName()); out(";"); endLine(); } } @Override public void visit(ObjectDefinition that) { //TODO: fix copy/paste from ClassDefinition endLine(); out("//object "); out(that.getDeclarationModel().getName()); location(that); endLine(); out("var $"); out(that.getDeclarationModel().getName()); out("="); out("function "); out(that.getDeclarationModel().getName()); out("()"); beginBlock(); out("var "); self(that.getDeclarationModel()); out("=new CeylonObject();"); endLine(); if (that.getExtendedType()!=null) { out("var $super="); out(that.getExtendedType().getType() .getDeclarationModel().getName()); that.getExtendedType().getInvocationExpression().visit(this); out(";"); endLine(); out("for(var $m in $super){"); self(that.getDeclarationModel()); out("[$m]=$super[$m]}"); endLine(); } if (that.getSatisfiedTypes()!=null) for (Tree.SimpleType st: that.getSatisfiedTypes().getTypes()) { out(st.getDeclarationModel().getName()); out("("); self(that.getDeclarationModel()); out(");"); endLine(); } that.getClassBody().visit(this); out("return "); self(that.getDeclarationModel()); out(";"); indentLevel endLine(); out("}();"); endLine(); if (that.getDeclarationModel().isShared()) { outerSelf(that.getDeclarationModel()); out("."); out(getter(that.getDeclarationModel())); out("="); } out("function "); out(getter(that.getDeclarationModel())); out("()"); beginBlock(); out("return $"); out(that.getDeclarationModel().getName()); out(";"); endBlock(); } @Override public void visit(MethodDeclaration that) {} @Override public void visit(MethodDefinition that) { endLine(); out("//function "); out(that.getDeclarationModel().getName()); location(that); endLine(); out("function "); out(that.getDeclarationModel().getName()); //TODO: if there are multiple parameter lists // do the inner function declarations super.visit(that); if (that.getDeclarationModel().isShared()) { outerSelf(that.getDeclarationModel()); out("."); out(that.getDeclarationModel().getName()); out("="); out(that.getDeclarationModel().getName()); out(";"); endLine(); } } @Override public void visit(AttributeGetterDefinition that) { endLine(); out("//value "); out(that.getDeclarationModel().getName()); location(that); endLine(); out("function "); out(getter(that.getDeclarationModel())); out("()"); super.visit(that); if (that.getDeclarationModel().isShared()) { outerSelf(that.getDeclarationModel()); out("."); out(getter(that.getDeclarationModel())); out("="); out(getter(that.getDeclarationModel())); out(";"); endLine(); } } @Override public void visit(AttributeSetterDefinition that) { endLine(); out("//assign "); out(that.getDeclarationModel().getName()); location(that); endLine(); out("function "); out(setter(that.getDeclarationModel())); out("("); out(that.getDeclarationModel().getName()); out(")"); super.visit(that); if (that.getDeclarationModel().isShared()) { outerSelf(that.getDeclarationModel()); out("."); out(setter(that.getDeclarationModel())); out("="); out(setter(that.getDeclarationModel())); out(";"); endLine(); } } @Override public void visit(AttributeDeclaration that) { endLine(); out("//value "); out(that.getDeclarationModel().getName()); location(that); endLine(); if (!that.getDeclarationModel().isFormal()) { out("var $"); out(that.getDeclarationModel().getName()); if (that.getSpecifierOrInitializerExpression()!=null) { out("="); } super.visit(that); out(";"); endLine(); out("function "); out(getter(that.getDeclarationModel())); out("()"); beginBlock(); out("return $"); out(that.getDeclarationModel().getName()); out(";"); endBlock(); if (that.getDeclarationModel().isShared()) { outerSelf(that.getDeclarationModel()); out("."); out(getter(that.getDeclarationModel())); out("="); out(getter(that.getDeclarationModel())); out(";"); endLine(); } if (that.getDeclarationModel().isVariable()) { out("function "); out(setter(that.getDeclarationModel())); out("("); out(that.getDeclarationModel().getName()); out(")"); beginBlock(); out("$"); out(that.getDeclarationModel().getName()); out("="); out(that.getDeclarationModel().getName()); out(";"); endBlock(); if (that.getDeclarationModel().isShared()) { outerSelf(that.getDeclarationModel()); out("."); out(setter(that.getDeclarationModel())); out("="); out(setter(that.getDeclarationModel())); out(";"); endLine(); } } } } @Override public void visit(CharLiteral that) { out("$ceylon$language.Character("); out(that.getText().replace('`', '"')); out(")"); } @Override public void visit(StringLiteral that) { out("$ceylon$language.String("); out(that.getText()); out(")"); } @Override public void visit(FloatLiteral that) { out("$ceylon$language.Float("); out(that.getText()); out(")"); } @Override public void visit(NaturalLiteral that) { out("$ceylon$language.Integer("); out(that.getText()); out(")"); } @Override public void visit(This that) { self(that.getTypeModel().getDeclaration()); } @Override public void visit(Super that) { out("$super"); } @Override public void visit(Outer that) { self(that.getTypeModel().getDeclaration()); } @Override public void visit(BaseMemberExpression that) { qualify(that, that.getDeclaration()); if (that.getDeclaration() instanceof com.redhat.ceylon.compiler.typechecker.model.Parameter || that.getDeclaration() instanceof Method) { out(that.getDeclaration().getName()); } else { out(getter(that.getDeclaration())); out("()"); } } @Override public void visit(QualifiedMemberExpression that) { super.visit(that); out("."); if (that.getDeclaration() instanceof com.redhat.ceylon.compiler.typechecker.model.Parameter || that.getDeclaration() instanceof Method) { out(that.getDeclaration().getName()); } else { out(getter(that.getDeclaration())); out("()"); } } @Override public void visit(BaseTypeExpression that) { qualify(that, that.getDeclaration()); out(that.getDeclaration().getName()); } @Override public void visit(QualifiedTypeExpression that) { super.visit(that); out("."); out(that.getDeclaration().getName()); } @Override public void visit(InvocationExpression that) { if (that.getNamedArgumentList()!=null) { out("(function (){"); that.getNamedArgumentList().visit(this); out("return "); that.getPrimary().visit(this); out("("); if (that.getPrimary().getDeclaration() instanceof Functional) { Functional f = (Functional) that.getPrimary().getDeclaration(); if (!f.getParameterLists().isEmpty()) { boolean first=true; for (com.redhat.ceylon.compiler.typechecker.model.Parameter p: f.getParameterLists().get(0).getParameters()) { if (!first) out(","); out("$"); out(p.getName()); first = false; } } } out(")}())"); } else { super.visit(that); } } @Override public void visit(PositionalArgumentList that) { out("("); boolean first=true; boolean sequenced=false; for (PositionalArgument arg: that.getPositionalArguments()) { if (!first) out(","); if (!sequenced && arg.getParameter().isSequenced()) { sequenced=true; out("$ceylon$language.ArraySequence(["); } arg.visit(this); first = false; } if (sequenced) { out("])"); } out(")"); } @Override public void visit(NamedArgumentList that) { for (NamedArgument arg: that.getNamedArguments()) { out("var $"); out(arg.getParameter().getName()); out("="); arg.visit(this); out(";"); } SequencedArgument sarg = that.getSequencedArgument(); if (sarg!=null) { out("var $"); out(sarg.getParameter().getName()); out("="); sarg.visit(this); out(";"); } } @Override public void visit(SequencedArgument that) { out("$ceylon$language.ArraySequence(["); boolean first=true; for (Expression arg: that.getExpressionList().getExpressions()) { if (!first) out(","); arg.visit(this); first = false; } out("])"); } @Override public void visit(SpecifierStatement that) { BaseMemberExpression bme = (Tree.BaseMemberExpression) that.getBaseMemberExpression(); qualify(that, bme.getDeclaration()); out(bme.getDeclaration().getName()); out("="); that.getSpecifierExpression().visit(this); } @Override public void visit(AssignOp that) { BaseMemberExpression bme = (Tree.BaseMemberExpression) that.getLeftTerm(); qualify(that, bme.getDeclaration()); out(setter(bme.getDeclaration())); out("("); that.getRightTerm().visit(this); if (bme.getDeclaration() instanceof com.redhat.ceylon.compiler.typechecker.model.Parameter) {} else { out(")"); } } void qualify(Node that, Declaration d) { if (isImported(that, d)) { out("$"); out(d.getUnit().getPackage().getNameAsString().replace('.', '$')); out("."); } else if (qualifyBaseMember(that, d)) { out("$this"); out(that.getScope() .getInheritingDeclaration(d).getName()); out("."); } } boolean isImported(Node that, Declaration d) { return !d.getUnit().getPackage() .equals(that.getUnit().getPackage()); } boolean qualifyBaseMember(Node that, Declaration d) { return !d.isDefinedInScope(that.getScope()); /*return d.isClassOrInterfaceMember() && d.isShared() && that.getScope().isInherited(d);*/ } @Override public void visit(ExecutableStatement that) { super.visit(that); out(";"); } @Override public void visit(Return that) { out("return "); super.visit(that); } @Override public void visit(AnnotationList that) {} private void self(Declaration d) { out("$this"); out(d.getName()); } private void outerSelf(Declaration d) { //TODO: this is broken, since the container // might not be the class if (d.isToplevel()) { out("this"); } else { out("$this"); out(((Declaration) d.getContainer()).getName()); } } private String setter(Declaration d) { return "set" + toUpperCase(d.getName().charAt(0)) + d.getName().substring(1); } private String getter(Declaration d) { return "get" + toUpperCase(d.getName().charAt(0)) + d.getName().substring(1); } @Override public void visit(SumOp that) { that.getLeftTerm().visit(this); out(".plus("); that.getRightTerm().visit(this); out(")"); } }
package com.sojostudios.as3.visitors; import japa.parser.ast.CompilationUnit; import japa.parser.ast.ImportDeclaration; import japa.parser.ast.Node; import japa.parser.ast.PackageDeclaration; import japa.parser.ast.body.ClassOrInterfaceDeclaration; import japa.parser.ast.body.FieldDeclaration; import japa.parser.ast.body.Parameter; import japa.parser.ast.body.VariableDeclarator; import japa.parser.ast.expr.ArrayAccessExpr; import japa.parser.ast.expr.ArrayCreationExpr; import japa.parser.ast.expr.AssignExpr; import japa.parser.ast.expr.AssignExpr.Operator; import japa.parser.ast.expr.BinaryExpr; import japa.parser.ast.expr.Expression; import japa.parser.ast.expr.FieldAccessExpr; import japa.parser.ast.expr.IntegerLiteralExpr; import japa.parser.ast.expr.MethodCallExpr; import japa.parser.ast.expr.NameExpr; import japa.parser.ast.expr.ObjectCreationExpr; import japa.parser.ast.expr.VariableDeclarationExpr; import japa.parser.ast.stmt.BlockStmt; import japa.parser.ast.type.ClassOrInterfaceType; import japa.parser.ast.type.ReferenceType; import japa.parser.ast.type.Type; import japa.parser.ast.visitor.ModifierVisitorAdapter; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.log4j.Logger; /** * This class mutates the AST to remove or exchange elements from Java * and replace them with the AS3 versions. * * The big mutations here relate to Arrays, Vectors, and Dictionaries. * * @author Kurtis Kopf * */ public class AS3MutationVisitor extends ModifierVisitorAdapter<Object> { public static final String ARRAY_MUTATION_FLAG = "Array"; public static final String DICTIONARY_MUTATION_FLAG = "Dictionary"; public static final String VECTOR_MUTATION_FLAG = "Vector"; public static final String DEFAULT_ARRAY_CLASS = "Array"; public static final String DEFAULT_DICTIONARY_CLASS = "Dictionary"; public static final String DEFAULT_VECTOR_CLASS = "Vector"; private Logger logger = Logger.getLogger(getClass()); private Map<String,String> packageToPackage = new HashMap<String,String>(); private Map<String,String> classesToClasses = new HashMap<String,String>(); private Map<String,String> importsToImports = new HashMap<String,String>(); private List<String> importsToIgnore = new ArrayList<String>(); private List<String> forcedImports = new ArrayList<String>(); private List<String> classesToArrays = new ArrayList<String>(); private List<String> classesToDictionaries = new ArrayList<String>(); private List<String> classesToVectors = new ArrayList<String>(); private List<String> classesExtendArray = new ArrayList<String>(); private List<String> classesExtendDictionary = new ArrayList<String>(); private List<String> classesExtendVector = new ArrayList<String>(); private boolean forceSprite = false; private boolean forceMovieClip = false; private String arrayClass = DEFAULT_ARRAY_CLASS; private String dictionaryClass = DEFAULT_DICTIONARY_CLASS; private String vectorClass = DEFAULT_VECTOR_CLASS; private MutationVariableScope varScope = new MutationVariableScope(); /** * Constructor. * * @param includeDefaults true to include the default mutations. */ public AS3MutationVisitor() { } /** * Include the default mutations. */ public void includeDefaults() { importsToIgnore.add("java.*"); importsToImports.put("java\\.util\\..*Map", "flash.utils.Dictionary"); classesToClasses.put("Exception", "Error"); classesToClasses.put("java.lang.Exception", "Error"); classesToClasses.put("Integer", "Number"); classesToClasses.put("java.lang.Integer", "Number"); classesToClasses.put("Double", "Number"); classesToClasses.put("java.lang.Double", "Number"); classesToClasses.put("Float", "Number"); classesToClasses.put("java.lang.Float", "Number"); classesToClasses.put("Long", "Number"); classesToClasses.put("java.lang.Long", "Number"); classesToClasses.put("Short", "Number"); classesToClasses.put("java.lang.Short", "Number"); classesToClasses.put("Character", "String"); classesToClasses.put("java.lang.Character", "String"); classesToVectors.add("Collection"); classesToVectors.add("java.util.Collection"); // interfaces classesToVectors.add("BeanContext"); classesToVectors.add("java.beans.beancontext.BeanContext"); classesToVectors.add("BeanContextServices"); classesToVectors.add("java.beans.beancontext.BeanContextServices"); classesToVectors.add("BlockingDeque"); classesToVectors.add("java.util.concurrent.BlockingDeque"); classesToVectors.add("BlockingQueue"); classesToVectors.add("java.util.concurrent.BlockingQueue"); classesToVectors.add("Deque"); classesToVectors.add("java.util.Deque"); classesToVectors.add("List"); classesToVectors.add("java.util.List"); classesToVectors.add("NavigableSet"); classesToVectors.add("java.util.NavigableSet"); classesToVectors.add("Queue"); classesToVectors.add("java.util.Queue"); classesToVectors.add("Set"); classesToVectors.add("java.util.Set"); classesToVectors.add("SortedSet"); classesToVectors.add("java.util.SortedSet"); // implementing classes classesToVectors.add("AbstractCollection"); classesToVectors.add("java.util.AbstractCollection"); classesToVectors.add("AbstractList"); classesToVectors.add("java.util.AbstractList"); classesToVectors.add("AbstractQueue"); classesToVectors.add("java.util.AbstractQueue"); classesToVectors.add("AbstractSequentialList"); classesToVectors.add("java.util.AbstractSequentialList"); classesToVectors.add("AbstractSet"); classesToVectors.add("java.util.AbstractSet"); classesToVectors.add("ArrayBlockingQueue"); classesToVectors.add("java.util.concurrent.ArrayBlockingQueue"); classesToVectors.add("ArrayDeque"); classesToVectors.add("java.util.ArrayDeque"); classesToVectors.add("ArrayList"); classesToVectors.add("java.util.ArrayList"); classesToVectors.add("AttributeList"); classesToVectors.add("javax.management.AttributeList"); classesToVectors.add("BeanContextServicesSupport"); classesToVectors.add("java.beans.beancontext.BeanContextServicesSupport"); classesToVectors.add("BeanContextSupport"); classesToVectors.add("java.beans.beancontext.BeanContextSupport"); classesToVectors.add("ConcurrentLinkedQueue"); classesToVectors.add("java.util.concurrent.ConcurrentLinkedQueue"); classesToVectors.add("ConcurrentSkipListSet"); classesToVectors.add("java.util.concurrent.ConcurrentSkipListSet"); classesToVectors.add("CopyOnWriteArrayList"); classesToVectors.add("java.util.concurrent.CopyOnWriteArrayList"); classesToVectors.add("CopyOnWriteArraySet"); classesToVectors.add("java.util.concurrent.CopyOnWriteArraySet"); classesToVectors.add("DelayQueue"); classesToVectors.add("java.util.concurrent.DelayQueue"); classesToVectors.add("EnumSet"); classesToVectors.add("java.util.EnumSet"); classesToVectors.add("HashSet"); classesToVectors.add("java.util.HashSet"); classesToVectors.add("JobStateReasons"); classesToVectors.add("javax.print.attribute.standard.JobStateReasons"); classesToVectors.add("LinkedBlockingDeque"); classesToVectors.add("java.util.concurrent.LinkedBlockingDeque"); classesToVectors.add("LinkedBlockingQueue"); classesToVectors.add("java.util.concurrent.LinkedBlockingQueue"); classesToVectors.add("LinkedHashSet"); classesToVectors.add("java.util.LinkedHashSet"); classesToVectors.add("LinkedList"); classesToVectors.add("java.util.LinkedList"); classesToVectors.add("PriorityBlockingQueue"); classesToVectors.add("java.util.concurrent.PriorityBlockingQueue"); classesToVectors.add("PriorityQueue"); classesToVectors.add("java.util.PriorityQueue"); classesToVectors.add("RoleList"); classesToVectors.add("javax.management.relation.RoleList"); classesToVectors.add("RoleUnresolvedList"); classesToVectors.add("javax.management.relation.RoleUnresolvedList"); classesToVectors.add("Stack"); classesToVectors.add("java.util.Stack"); classesToVectors.add("SynchronousQueue"); classesToVectors.add("java.util.concurrent.SynchronousQueue"); classesToVectors.add("TreeSet"); classesToVectors.add("java.util.TreeSet"); classesToVectors.add("Vector"); classesToVectors.add("java.util.Vector"); classesToDictionaries.add("Map"); classesToDictionaries.add("java.util.Map"); // interfaces classesToDictionaries.add("Bindings"); classesToDictionaries.add("javax.script.Bindings"); classesToDictionaries.add("ConcurrentMap"); classesToDictionaries.add("java.util.concurrent.ConcurrentMap"); classesToDictionaries.add("ConcurrentNavigableMap"); classesToDictionaries.add("java.util.concurrent.ConcurrentNavigableMap"); classesToDictionaries.add("LogicalMessageContext"); classesToDictionaries.add("javax.xml.ws.handler.LogicalMessageContext"); classesToDictionaries.add("MessageContext"); classesToDictionaries.add("javax.xml.ws.handler.MessageContext"); classesToDictionaries.add("NavigableMap"); classesToDictionaries.add("java.util.NavigableMap"); classesToDictionaries.add("SOAPMessageContext"); classesToDictionaries.add("javax.xml.ws.handler.soap.SOAPMessageContext"); classesToDictionaries.add("SortedMap"); classesToDictionaries.add("java.util.SortedMap"); // classes classesToDictionaries.add("AbstractMap"); classesToDictionaries.add("java.util.AbstractMap"); classesToDictionaries.add("Attributes"); classesToDictionaries.add("java.util.jar.Attributes"); classesToDictionaries.add("AuthProvider"); classesToDictionaries.add("java.security.AuthProvider"); classesToDictionaries.add("ConcurrentHashMap"); classesToDictionaries.add("java.util.concurrent.ConcurrentHashMap"); classesToDictionaries.add("ConcurrentSkipListMap"); classesToDictionaries.add("java.util.concurrent.ConcurrentSkipListMap"); classesToDictionaries.add("EnumMap"); classesToDictionaries.add("java.util.EnumMap"); classesToDictionaries.add("HashMap"); classesToDictionaries.add("java.util.HashMap"); classesToDictionaries.add("Hashtable"); classesToDictionaries.add("java.util.Hashtable"); classesToDictionaries.add("IdentityHashMap"); classesToDictionaries.add("java.util.IdentityHashMap"); classesToDictionaries.add("LinkedHashMap"); classesToDictionaries.add("java.util.LinkedHashMap"); classesToDictionaries.add("PrinterStateReasons"); classesToDictionaries.add("javax.print.attribute.standard.PrinterStateReasons"); classesToDictionaries.add("Properties"); classesToDictionaries.add("java.util.Properties"); classesToDictionaries.add("Provider"); classesToDictionaries.add("java.security.Provider"); classesToDictionaries.add("RenderingHints"); classesToDictionaries.add("java.awt.RenderingHints"); classesToDictionaries.add("SimpleBindings"); classesToDictionaries.add("javax.script.SimpleBindings"); classesToDictionaries.add("TabularDataSupport"); classesToDictionaries.add("javax.management.openmbean.TabularDataSupport"); classesToDictionaries.add("TreeMap"); classesToDictionaries.add("java.util.TreeMap"); classesToDictionaries.add("UIDefaults"); classesToDictionaries.add("javax.swing.UIDefaults"); classesToDictionaries.add("WeakHashMap"); classesToDictionaries.add("java.util.WeakHashMap"); } /** * Manipulate imports, need to do it from up one level. */ @Override public Node visit(CompilationUnit n, Object arg) { if (forceSprite) { forcedImports.add("flash.display.Sprite"); } if (forceMovieClip) { forcedImports.add("flash.display.MovieClip"); } List<ImportDeclaration> addMe = new ArrayList<ImportDeclaration>(); if (forcedImports.size() > 0) { for (String forcedImport : forcedImports) { addMe.add(new ImportDeclaration(new NameExpr(forcedImport), false, forcedImport.contains("*"))); } } if (n.getImports() != null) { List<ImportDeclaration> removeMe = new ArrayList<ImportDeclaration>(); for (ImportDeclaration i : n.getImports()) { String imp = i.getName().toString(); for (String ignore : importsToIgnore) { if (imp.matches(ignore)) { logger.info("removing import " + imp); removeMe.add(i); } } for (String impFrom : importsToImports.keySet()) { if (imp.matches(impFrom)) { String newImport = importsToImports.get(impFrom); logger.info("modifying import from " + imp + " to " + newImport); addMe.add(new ImportDeclaration(new NameExpr(newImport), false, newImport.contains("*"))); } } } n.getImports().removeAll(removeMe); } // make sure only one of each new/modified import gets added. Map<String,Boolean> used = new HashMap<String,Boolean>(); for(ImportDeclaration imp : addMe) { String impName = imp.getName().toString(); if (used.get(impName) == null) { // if no other than forced imports were added, CompilationUnit.getImports will still be null if(n.getImports() == null) { n.setImports(new ArrayList<ImportDeclaration>()); } used.put(impName, true); n.getImports().add(imp); logger.info("adding import " + impName); } } return super.visit(n, arg); } /** * Direct class name manipulation for things like Exception -> Error. */ @Override public Node visit(ClassOrInterfaceType n, Object arg) { //logger.warn("class or interface type reference " + n.getName()); for(String incoming : classesToClasses.keySet()) { if (n.getName().matches(incoming)) { String newName = classesToClasses.get(incoming); logger.info("changing class reference from " + n.getName() + " to " + newName); n.setName(newName); } } for(String incoming : classesToArrays) { if (n.getName().matches(incoming)) { logger.info("changing class reference from " + n.getName() + " to Array [" + arrayClass + "]"); n.setName(arrayClass); } } for(String incoming : classesToDictionaries) { if (n.getName().matches(incoming)) { logger.info("changing class reference from " + n.getName() + " to Dictionary [" + dictionaryClass + "]"); n.setName(dictionaryClass); } } for(String incoming : classesToVectors) { if (n.getName().matches(incoming)) { logger.info("changing class reference from " + n.getName() + " to Vector [" + vectorClass + "]"); n.setName(vectorClass); } } return super.visit(n, arg); } /** * Look for variables that were modified to special types (Array, Dictionary) and * modify method calls for them. */ @Override public Node visit(MethodCallExpr n, Object arg) { if (n.getScope() != null && n.getScope() instanceof NameExpr) { NameExpr callObj = (NameExpr)n.getScope(); //logger.warn("method call scope is " + callObj.getName() + "." + n.getName()); // check scope for mutations VarMutation mut = varScope.getVar(callObj.getName()); if (mut != null) { //logger.warn("found mutation"); String method = n.getName(); // string mutations (.equals() to == [BinaryExpr]) if (mut.type.getName().equals("String") && method.equals("equals")) { logger.info("found a string equals method reference"); Expression right = n.getArgs().get(0); BinaryExpr expr = new BinaryExpr(n.getScope(), right, BinaryExpr.Operator.equals); return expr; } if (mut.type.getName().equals("String") && method.equals("length")) { logger.info("found a string length method reference"); FieldAccessExpr expr = new FieldAccessExpr(n.getScope(), "length"); return expr; } // array and dictionary mutations if (mut.hasFlag(ARRAY_MUTATION_FLAG) || mut.hasFlag(DICTIONARY_MUTATION_FLAG) || mut.hasFlag(VECTOR_MUTATION_FLAG)) { logger.info("found mutation for " + method + " at current scope"); // put(a, b) -> [a]=b; // add(a) -> push(a); // get(a) -> [a]; // size() -> length; if (method.equals("put") && n.getArgs().size() > 1) { // replace with AssignmentExpr with ArrayAccessExpr and B expression Expression arg1 = n.getArgs().get(0); Expression right = n.getArgs().get(1); ArrayAccessExpr left = new ArrayAccessExpr(n.getScope(), arg1); AssignExpr expr = new AssignExpr(left, right, Operator.assign); return expr; } else if (method.equals("add")) { // replace method name with "push" n.setName("push"); return n; } else if (method.equals("get") && n.getArgs().size() > 0) { // replace with ArrayAccessExpr Expression arg1 = n.getArgs().get(0); ArrayAccessExpr expr = new ArrayAccessExpr(n.getScope(), arg1); return expr; } else if (method.equals("remove") && n.getArgs().size() > 0) { if (mut.hasFlag(ARRAY_MUTATION_FLAG) || mut.hasFlag(VECTOR_MUTATION_FLAG)) { // convert to splice(arg0, 1) n.setName("splice"); n.getArgs().add(new IntegerLiteralExpr("1")); } else if (mut.hasFlag(DICTIONARY_MUTATION_FLAG)) { // convert to delete method, dump visitor will have to convert to delete dict[key] n.setName("!delete"); Expression arg1 = n.getArgs().get(0); ArrayAccessExpr expr = new ArrayAccessExpr(n.getScope(), arg1); n.getArgs().clear(); n.getArgs().add(expr); } } else if (method.equals("size")) { // replace with a FieldAccessExpr FieldAccessExpr expr = new FieldAccessExpr(n.getScope(), "length"); return expr; } else { logger.warn("Unhandled method " + n + " on a mutated variable"); } } } else if (n.getName().equals("equals")) { logger.warn("Potentially unhandled 'equals' method call, this might not be what you want to do."); } } return super.visit(n, arg); } /** * Replace any packages as defined. */ @Override public Node visit(PackageDeclaration n, Object arg) { String fullPkg = n.getName().toString(); if (packageToPackage.size() > 0) { for(String key : packageToPackage.keySet()) { if (fullPkg.matches(key)) { NameExpr nm = new NameExpr(packageToPackage.get(key)); n.setName(nm); } } } return super.visit(n, arg); } /** * Convert Arrays and Dictionaries for variable declarations */ @Override public Node visit(VariableDeclarationExpr n, Object arg) { boolean modified = modifyDecl(n.getType(), n.getVars(), arg); if (modified) { return n; } else { return super.visit(n, arg); } } /** * Convert Arrays and Dictionaries for field declarations */ @Override public Node visit(FieldDeclaration n, Object arg) { boolean modified = modifyDecl(n.getType(), n.getVariables(), arg); if (modified) { return n; } else { return super.visit(n, arg); } } private boolean modifyDecl(Type type, List<VariableDeclarator> vars, Object arg) { boolean modified = false; if (type instanceof ReferenceType) { ReferenceType rt = (ReferenceType)type; if (rt.getType() instanceof ClassOrInterfaceType) { ClassOrInterfaceType ct = (ClassOrInterfaceType)rt.getType(); //logger.warn("got variable declaration " + ct.getName() + " " + ct.getTypeArgs() + " " + rt.getArrayCount()); String name = ct.getName(); // Array conversions for(String classToArray : classesToArrays) { if (name.matches(classToArray)) { modified = true; varDeclToArray(vars, rt, ct, arg); } } // Dictionary conversions for(String classToDict : classesToDictionaries) { if (name.matches(classToDict)) { modified = true; varDeclToDictionary(vars, rt, ct, arg); } } // Vector conversions for(String classToVect : classesToVectors) { if (name.matches(classToVect)) { modified = true; varDeclToVector(vars, rt, ct, arg); } } // register variable type even for unmodified vars if (!modified) { List<String> flags = new ArrayList<String>(); // look for extensions // array extension for(String classExtArray : classesExtendArray) { if (name.matches(classExtArray)) { flags.add(ARRAY_MUTATION_FLAG); } } // dictionary extension for(String classExtDict : classesExtendDictionary) { if (name.matches(classExtDict)) { flags.add(DICTIONARY_MUTATION_FLAG); } } // vector extension for(String classExtVector : classesExtendVector) { if (name.matches(classExtVector)) { flags.add(VECTOR_MUTATION_FLAG); } } // register declarators for(VariableDeclarator varDec : vars) { // register mutation at current scope //logger.info("registering scope for variable " + varDec.getId().getName()); VarMutation mut = varScope.getVarCurScopeOnly(varDec.getId().getName()); if (mut == null) { logger.info("registering variable " + varDec.getId().getName() + " with mutation flags: " + flags); varScope.addVar(varDec.getId().getName(), ct, flags); } } } } } return modified; } /** * Register a variable mutation. * * @param name * @param flag * @param ct */ private void registerMutation(String name, String flag, ClassOrInterfaceType ct) { VarMutation mut = varScope.getVarCurScopeOnly(name); if (mut != null) { if (!mut.hasFlag(flag)) { mut.mutationFlags.add(flag); } } else { List<String> flags = new ArrayList<String>(); flags.add(flag); varScope.addVar(name, ct, flags); } } /** * Convert a variable declaration to an Array declaration. * * @param n * @param rt * @param ct */ private void varDeclToArray(List<VariableDeclarator> vars, ReferenceType rt, ClassOrInterfaceType ct, Object arg) { logger.info("Converting variable declaration " + ct + " to Array [" + arrayClass + "] declaration with typing info"); // take the first typearg if it exists String newName = arrayClass; if (ct.getTypeArgs() != null && ct.getTypeArgs().size() > 0) { String typeArg = ct.getTypeArgs().get(0).toString(); logger.info("taking TypeArg " + typeArg + " as new Array [" + arrayClass + "] class type"); newName = typeArg; ct.setTypeArgs(null); // wipe out TypeArgs } ct.setName(newName); rt.setArrayCount(1); // change initializer (should only be one, but who the hell knows) for(VariableDeclarator varDec : vars) { // register mutation at current scope logger.info("registering Array mutation for variable " + varDec.getId().getName()); registerMutation(varDec.getId().getName(), ARRAY_MUTATION_FLAG, ct); Expression init = varDec.getInit(); if (init != null) { //logger.warn("initialization expression " + init.getClass()); // just destroy this sucker, replace with ArrayCreationExpression varDec.setInit(new ArrayCreationExpr(rt,1,null)); varDec.getInit().accept(this, arg); } } } /** * Convert a variable declaration to a Vector. * This is the least complex of the conversions. * @param n * @param rt * @param ct */ private void varDeclToVector(List<VariableDeclarator> vars, ReferenceType rt, ClassOrInterfaceType ct, Object arg) { logger.info("Converting variable declaration " + ct + " to Vector [" + vectorClass + "] declaration with typing info"); ct.setName(vectorClass); for(VariableDeclarator varDec : vars) { // register mutation at current scope logger.info("registering Vector mutation for variable " + varDec.getId().getName()); registerMutation(varDec.getId().getName(), VECTOR_MUTATION_FLAG, ct); Expression init = varDec.getInit(); if (init != null) { //logger.warn("initialization expression " + init.getClass()); if (init instanceof ObjectCreationExpr) { ObjectCreationExpr oce = (ObjectCreationExpr) init; oce.setType(ct); oce.setTypeArgs(null); } else { init.accept(this, arg); } } } } /** * Convert a variable declaration to a Dictionary declaration. * * @param n * @param rt * @param ct */ private void varDeclToDictionary(List<VariableDeclarator> vars, ReferenceType rt, ClassOrInterfaceType ct, Object arg) { logger.info("Converting variable declaration " + ct + " to Dictionary [" + dictionaryClass + "] declaration without typing"); // take the first typearg if it exists String newName = dictionaryClass; //ct.setTypeArgs(null); ct.setName(newName); // change initializer (should only be one, but who the hell knows) for(VariableDeclarator varDec : vars) { // register mutation at current scope logger.info("registering Dictionary mutation for variable " + varDec.getId().getName()); registerMutation(varDec.getId().getName(), DICTIONARY_MUTATION_FLAG, ct); Expression init = varDec.getInit(); if (init != null) { logger.warn("initialization expression " + init.getClass()); if (init instanceof ObjectCreationExpr) { ObjectCreationExpr oce = (ObjectCreationExpr) init; oce.setType(new ClassOrInterfaceType(newName)); oce.setTypeArgs(null); } else { init.accept(this, arg); } } } } /** * Just track variable scopes here. */ @Override public Node visit(BlockStmt n, Object arg) { varScope.pushScopeStack(); Node result = super.visit(n, arg); varScope.popScopeStack(); return result; } /** * Track variable scopes, also force Sprite extension if enabled. */ @Override public Node visit(ClassOrInterfaceDeclaration n, Object arg) { varScope.pushScopeStack(); if (n.getExtends() == null && (forceSprite || forceMovieClip)) { List<ClassOrInterfaceType> ext = new ArrayList<ClassOrInterfaceType>(); ClassOrInterfaceType sprite = new ClassOrInterfaceType( forceSprite ? "Sprite" : "MovieClip"); ext.add(sprite); n.setExtends(ext); } Node result = super.visit(n, arg); varScope.popScopeStack(); return result; } /** * Track parameter variable scope and mutations. */ @Override public Node visit(Parameter n, Object arg) { //logger.error("found parameter " + n.getId().getName() + " of type " + n.getType().getClass()); if (n.getType() instanceof ReferenceType) { ReferenceType rt = (ReferenceType)n.getType(); if (rt.getType() instanceof ClassOrInterfaceType) { ClassOrInterfaceType ct = (ClassOrInterfaceType)rt.getType(); String type = ct.getName(); List<String> flags = new ArrayList<String>(); for(String classToArray : classesToArrays) { if (type.matches(classToArray)) { logger.info("adding Array mutation flag to parameter " + n.getId().getName()); flags.add(ARRAY_MUTATION_FLAG); } } // Dictionary conversions for(String classToDict : classesToDictionaries) { if (type.matches(classToDict)) { logger.info("adding Dictionary mutation flag to parameter " + n.getId().getName()); flags.add(DICTIONARY_MUTATION_FLAG); } } // Vector conversions for(String classToVect : classesToVectors) { if (type.matches(classToVect)) { logger.info("adding Vector mutation flag to parameter " + n.getId().getName()); flags.add(VECTOR_MUTATION_FLAG); } } varScope.addVar(n.getId().getName(), ct, flags); } } return super.visit(n, arg); } /** * @return the packageToPackage */ public Map<String, String> getPackageToPackage() { return packageToPackage; } /** * @param packageToPackage the packageToPackage to set */ public void setPackageToPackage(Map<String, String> packageToPackage) { this.packageToPackage = packageToPackage; } /** * @return the classesToClasses */ public Map<String, String> getClassesToClasses() { return classesToClasses; } /** * @param classesToClasses the classesToClasses to set */ public void setClassesToClasses(Map<String, String> classesToClasses) { this.classesToClasses = classesToClasses; } /** * @return the importsToImports */ public Map<String, String> getImportsToImports() { return importsToImports; } /** * @param importsToImports the importsToImports to set */ public void setImportsToImports(Map<String, String> importsToImports) { this.importsToImports = importsToImports; } /** * @return the importsToIgnore */ public List<String> getImportsToIgnore() { return importsToIgnore; } /** * @param importsToIgnore the importsToIgnore to set */ public void setImportsToIgnore(List<String> importsToIgnore) { this.importsToIgnore = importsToIgnore; } /** * @return the forcedImports */ public List<String> getForcedImports() { return forcedImports; } /** * @param forcedImports the forcedImports to set */ public void setForcedImports(List<String> forcedImports) { this.forcedImports = forcedImports; } /** * @return the classesToArrays */ public List<String> getClassesToArrays() { return classesToArrays; } /** * @param classesToArrays the classesToArrays to set */ public void setClassesToArrays(List<String> classesToArrays) { this.classesToArrays = classesToArrays; } /** * @return the classesToDictionaries */ public List<String> getClassesToDictionaries() { return classesToDictionaries; } /** * @param classesToDictionaries the classesToDictionaries to set */ public void setClassesToDictionaries(List<String> classesToDictionaries) { this.classesToDictionaries = classesToDictionaries; } /** * @return the classesToVectors */ public List<String> getClassesToVectors() { return classesToVectors; } /** * @param classesToVectors the classesToVectors to set */ public void setClassesToVectors(List<String> classesToVectors) { this.classesToVectors = classesToVectors; } /** * @return the classesExtendArray */ public List<String> getClassesExtendArray() { return classesExtendArray; } /** * @param classesExtendArray the classesExtendArray to set */ public void setClassesExtendArray(List<String> classesExtendArray) { this.classesExtendArray = classesExtendArray; } /** * @return the classesExtendDictionaries */ public List<String> getClassesExtendDictionary() { return classesExtendDictionary; } /** * @param classesExtendDictionaries the classesExtendDictionaries to set */ public void setClassesExtendDictionary(List<String> classesExtendDictionary) { this.classesExtendDictionary = classesExtendDictionary; } /** * @return the classesExtendVectors */ public List<String> getClassesExtendVector() { return classesExtendVector; } /** * @param classesExtendVectors the classesExtendVectors to set */ public void setClassesExtendVector(List<String> classesExtendVector) { this.classesExtendVector = classesExtendVector; } /** * @return the forceSprite */ public boolean isForceSprite() { return forceSprite; } /** * @param forceSprite the forceSprite to set */ public void setForceSprite(boolean forceSprite) { this.forceSprite = forceSprite; } /** * @return the forceMovieClip */ public boolean isForceMovieClip() { return forceMovieClip; } /** * @param forceMovieClip the forceMovieClip to set */ public void setForceMovieClip(boolean forceMovieClip) { this.forceMovieClip = forceMovieClip; } /** * @return the arrayClass */ public String getArrayClass() { return arrayClass; } /** * @param arrayClass the arrayClass to set */ public void setArrayClass(String arrayClass) { this.arrayClass = arrayClass; } /** * @return the dictionaryClass */ public String getDictionaryClass() { return dictionaryClass; } /** * @param dictionaryClass the dictionaryClass to set */ public void setDictionaryClass(String dictionaryClass) { this.dictionaryClass = dictionaryClass; } /** * @return the vectorClass */ public String getVectorClass() { return vectorClass; } /** * @param vectorClass the vectorClass to set */ public void setVectorClass(String vectorClass) { this.vectorClass = vectorClass; } }
package com.swabunga.spell.engine; import java.util.*; /** * Container for various methods that any <code>SpellDictionary</code> based on the original * Jazzy aspell port will use. */ public abstract class SpellDictionaryASpell implements SpellDictionary { /** The replace list is used in the getSuggestions method*/ protected static char[] replacelist = { 'A', 'B', 'X', 'S', 'K', 'J', 'T', 'F', 'H', 'L', 'M', 'N', 'P', 'R', '0' }; /**The reference to a Transformator, used to transform a word into it's. * phonetic code. */ protected Transformator tf = new DoubleMeta(); /** * Returns a list of Word objects that are the suggestions to an * incorrect word. * <p> * @param word Suggestions for given mispelt word * @param threshold The lower boundary of similarity to mispelt word * @return Vector a List of suggestions */ public Vector getSuggestions(String word, int threshold) { Hashtable nearmisscodes = new Hashtable(); String code = getCode(word); // add all words that have the same phonetics nearmisscodes.put(code, code); Vector phoneticList = getWordsFromCode(word, nearmisscodes); // do some tranformations to pick up more results //interchange nearmisscodes = new Hashtable(); char[] charArray = word.toCharArray(); for (int i = 0; i < word.length() - 1; i++) { char a = charArray[i]; char b = charArray[i + 1]; charArray[i] = b; charArray[i + 1] = a; String s = getCode(new String(charArray)); nearmisscodes.put(s, s); charArray[i] = a; charArray[i + 1] = b; } //change charArray = word.toCharArray(); for (int i = 0; i < word.length(); i++) { char original = charArray[i]; for (int j = 0; j < replacelist.length; j++) { charArray[i] = replacelist[j]; String s = getCode(new String(charArray)); nearmisscodes.put(s, s); } charArray[i] = original; } //add charArray = (word += " ").toCharArray(); int iy = charArray.length - 1; while (true) { for (int j = 0; j < replacelist.length; j++) { charArray[iy] = replacelist[j]; String s = getCode(new String(charArray)); nearmisscodes.put(s, s); } if (iy == 0) break; charArray[iy] = charArray[iy - 1]; --iy; } //delete word = word.trim(); charArray = word.toCharArray(); char[] charArray2 = new char[charArray.length - 1]; for (int ix = 0; ix < charArray2.length; ix++) { charArray2[ix] = charArray[ix]; } char a, b; a = charArray[charArray.length - 1]; int ii = charArray2.length; while (true) { String s = getCode(new String(charArray)); nearmisscodes.put(s, s); if (ii == 0) break; b = a; a = charArray2[ii - 1]; charArray2[ii - 1] = b; --ii; } nearmisscodes.remove(code); //already accounted for in phoneticList Vector wordlist = getWordsFromCode(word, nearmisscodes); // We sort a Vector at the end instead of maintaining a // continously sorted TreeSet because everytime you add a collection // to a treeset it has to be resorted. It's better to do this operation // once at the end. if (wordlist.size() == 0 && phoneticList.size() == 0) addBestGuess(word,phoneticList); // Collections.sort( phoneticList, new Word()); //always sort phonetic matches along the top // Collections.sort( wordlist, new Word()); //the non-phonetic matches can be listed below phoneticList.addAll(wordlist); return phoneticList; } /** * When we don't come up with any suggestions (probably because the threshold was too strict), * then pick the best guesses from the those words that have the same phonetic coce. * @param word - the word we are trying spell correct * @param wordList - the linked list that will get the best guess */ private void addBestGuess(String word, Vector wordList) { assert wordList.size() == 0; int bestScore = Integer.MAX_VALUE; String code = getCode(word); List simwordlist = getWords(code); LinkedList candidates = new LinkedList(); for (Iterator j = simwordlist.iterator(); j.hasNext();) { String similar = (String) j.next(); int distance = EditDistance.getDistance(word, similar); if (distance <= bestScore) { bestScore = distance; Word goodGuess = new Word(similar, distance); candidates.add(goodGuess); } } //now, only pull out the guesses that had the best score for (Iterator iter = candidates.iterator(); iter.hasNext();) { Word candidate = (Word) iter.next(); if (candidate.getScore() == bestScore) wordList.add(candidate); } } private Vector getWordsFromCode(String word, Hashtable codes) { Configuration config = Configuration.getConfiguration(); Vector result = new Vector(); final int configDistance = config.getInteger(Configuration.SPELL_THRESHOLD); for (Enumeration i = codes.keys();i.hasMoreElements();) { String code = (String) i.nextElement(); Vector simwordlist = getWords(code); for (Enumeration j = simwordlist.elements(); j.hasMoreElements();) { String similar = (String) j.nextElement(); int distance = EditDistance.getDistance(word, similar); if (distance < configDistance) { Word w = new Word(similar, distance); result.addElement(w); } } } return result; } /** * Returns the phonetic code representing the word. */ public String getCode(String word) { return tf.transform(word); } /** * Returns a list of words that have the same phonetic code. */ protected abstract Vector getWords(String phoneticCode); }
package com.trendrr.oss.networking; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.SocketChannel; import java.nio.charset.Charset; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import com.trendrr.oss.concurrent.TrendrrLock; import com.trendrr.oss.exceptions.TrendrrDisconnectedException; import com.trendrr.oss.exceptions.TrendrrException; import com.trendrr.oss.exceptions.TrendrrNoCallbackException; /** * @author Dustin Norlander * @created Mar 11, 2011 * */ public class SocketChannelWrapper { protected Log log = LogFactory.getLog(SocketChannelWrapper.class); SocketChannel channel = null; AsynchBuffer buffer = null; SelectorThread thread = null; TrendrrLock threadInit = new TrendrrLock(); ConcurrentLinkedQueue<ByteBuffer> writes = new ConcurrentLinkedQueue<ByteBuffer>(); boolean closed = false; public SocketChannelWrapper(SocketChannel channel) { this.channel = channel; this.buffer = new AsynchBuffer(); } public void readUntil(String delimiter, Charset charset, StringReadCallback callback) { this.buffer.readUntil(delimiter, charset, callback); this.buffer.process();//attempt to read directly from the already buffered bytes this.notifyChange(); } /** * reads until the requested string is found. * @param delimiter * @param charset * @return * @throws TrendrrException */ public String readUntil(String delimiter, Charset charset) throws TrendrrException{ SynchronousReadCallback callback = new SynchronousReadCallback(); this.readUntil(delimiter, charset, callback); callback.awaitResponse(); if (callback.exception != null) { throw callback.exception; } return callback.stringResult; } public void readBytes(int numBytes, ByteReadCallback callback) { this.buffer.readBytes(numBytes, callback); this.buffer.process();//attempt to read from the already buffered bytes this.notifyChange(); } public byte[] readBytes(int numBytes) throws TrendrrException{ SynchronousReadCallback callback = new SynchronousReadCallback(); this.readBytes(numBytes, callback); callback.awaitResponse(); if (callback.exception != null) { throw callback.exception; } return callback.byteResult; } public void write(ByteBuffer buf) { this.writes.add(buf); this.notifyChange(); } public void write(byte[] bytes) { this.write(ByteBuffer.wrap(bytes)); } private void notifyChange() { //alerts the selector that we want to read or write try { this.startThreadIfNeeded(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } this.thread.registerChange(this); } /** * returns true if there are any writes waiting. * @return */ public boolean hasWrites() { return !this.writes.isEmpty(); } /** * returns true if any reads are waiting. * @return */ public boolean hasReads() { if (this.buffer == null) return false; return this.buffer.hasCallbacksWaiting(); } public SocketChannel getChannel() { return this.channel; } public Queue<ByteBuffer> getWrites() { return this.writes; } /** * Attempts to read from the network, and process any callbacks. * does nothing if no callbacks have been registered. * * @throws TrendrrNoCallbackException * @throws TrendrrDisconnectedException * @throws TrendrrException */ public void doRead() throws TrendrrNoCallbackException, TrendrrDisconnectedException, TrendrrException { int numRead = 1; while (numRead > 0 && this.buffer.hasCallbacksWaiting()) { numRead = this.buffer.read(this.channel); this.buffer.process(); } } /** * attempts to process the remaining callbacks, then closes the channel and * cleans up any resources. */ public synchronized void close() { if (closed) { log.warn("Already closed!"); return; } try { this.buffer.process(); } catch (Exception x) { log.debug("Caught", x); } try { this.channel.close();} catch (Exception x) { log.debug("Caught", x); } try { this.buffer.close();} catch (Exception x) { log.debug("Caught", x); } if (this.thread != null) { this.thread.unregister(this); } this.buffer = null; this.thread = null; this.closed = true; } public boolean isClosed() { return this.closed; } private void startThreadIfNeeded() throws IOException { if (threadInit.lockOnce()) { try { this.thread = SelectorThread.registerChannel(this); } finally { threadInit.unlock(); } } } }
package de.wak_sh.client.fragments; import java.io.IOException; import android.app.Activity; import android.os.Bundle; import android.support.v4.app.Fragment; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.TextView; import de.wak_sh.client.R; import de.wak_sh.client.backend.ProgressDialogTask; import de.wak_sh.client.backend.model.UserInformation; import de.wak_sh.client.backend.service.UserInformationService; public class BenutzerinfoFragment extends Fragment { protected UserInformation userInformation; private TextView benutzername; private TextView studiengang; private TextView studiengruppe; private TextView matrikelnummer; @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View rootView = inflater.inflate(R.layout.fragment_benutzerinfo, container, false); benutzername = (TextView) rootView.findViewById(R.id.text_benutzername); studiengang = (TextView) rootView.findViewById(R.id.text_studiengang); studiengruppe = (TextView) rootView .findViewById(R.id.text_studiengruppe); matrikelnummer = (TextView) rootView .findViewById(R.id.text_matrikelnummer); if (userInformation != null) { populateUi(); } else { new UserInfoTask(getActivity()).execute(); } return rootView; } protected void populateUi() { benutzername.setText(userInformation.getBenutzername()); studiengang.setText(userInformation.getStudiengang()); studiengruppe.setText(userInformation.getStudiengruppe()); matrikelnummer.setText(userInformation.getMatrikelnummer()); } private class UserInfoTask extends ProgressDialogTask<Void, Void> { private Activity activity; public UserInfoTask(Activity activity) { super(activity, activity.getString(R.string.fetching_user_info)); this.activity = activity; } @Override protected Void doInBackground(Void... params) { try { userInformation = new UserInformationService() .getUserInformation(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); return null; } activity.runOnUiThread(new Runnable() { @Override public void run() { populateUi(); } }); return null; } } }
package dr.evomodel.coalescent; import dr.evolution.coalescent.TreeIntervals; import dr.evolution.tree.Tree; import dr.evomodel.tree.TreeModel; import dr.inference.model.Model; import dr.inference.model.Parameter; import dr.inference.model.Variable; import dr.xml.*; import java.util.logging.Logger; /** * @author Joseph Heled * @version $Id$ */ public class VariableDemographicModel extends DemographicModel implements MultiLociTreeSet { static final String MODEL_NAME = "variableDemographic"; public static final String POPULATION_SIZES = "populationSizes"; public static final String INDICATOR_PARAMETER = "indicators"; public static final String POPULATION_TREES = "trees"; private static final String PLOIDY = "ploidy"; public static String POP_TREE = "ptree"; public static final String LOG_SPACE = "logUnits"; public static final String USE_MIDPOINTS = "useMidpoints"; public static final String TYPE = "type"; //public static final String STEPWISE = "stepwise"; //public static final String LINEAR = "linear"; //public static final String EXPONENTIAL = "exponential"; public static final String demoElementName = "demographic"; private final Parameter popSizeParameter; private final Parameter indicatorParameter; public Type getType() { return type; } private final Type type; private final boolean logSpace; private final boolean mid; private final TreeModel[] trees; private VDdemographicFunction demoFunction = null; private VDdemographicFunction savedDemoFunction = null; private final double[] populationFactors; public Parameter getPopulationValues() { return popSizeParameter; } public enum Type { LINEAR("linear"), EXPONENTIAL("exponential"), STEPWISE("stepwise"); Type(String name) { this.name = name; } public String toString() { return name; } String name; } public VariableDemographicModel(TreeModel[] trees, double[] popFactors, Parameter popSizeParameter, Parameter indicatorParameter, Type type, boolean logSpace, boolean mid) { super(MODEL_NAME); this.popSizeParameter = popSizeParameter; this.indicatorParameter = indicatorParameter; this.populationFactors = popFactors; int events = 0; for (Tree t : trees) { // number of coalescent events events += t.getExternalNodeCount() - 1; // we will have to handle this I guess assert t.getUnits() == trees[0].getUnits(); } // all trees share time 0, need fixing for serial data events += type == Type.STEPWISE ? 0 : 1; final int popSizes = popSizeParameter.getDimension(); final int nIndicators = indicatorParameter.getDimension(); this.type = type; this.logSpace = logSpace; this.mid = mid; if (popSizes != events) { System.err.println("WARNING: resetting parameter size of parameter " + popSizeParameter.getParameterName() + "(size " + popSizeParameter.getSize() + ") in variable demographic model to " + events); popSizeParameter.setDimension(events); // ") must be the same as the number of internal nodes in the tree. (" + events + ")"); } if (nIndicators != events - 1) { System.err.println("WARNING: resetting parameter size of parameter " + indicatorParameter.getParameterName() + " in variable demographic model to " + (events - 1)); indicatorParameter.setDimension(events); // + nIndicators + " != " + (events - 1) + ")"); } this.trees = trees; for (TreeModel t : trees) { addModel(t); } addVariable(indicatorParameter); addVariable(popSizeParameter); } public int nLoci() { return trees.length; } public Tree getTree(int k) { return trees[k]; } public TreeIntervals getTreeIntervals(int nt) { return getDemographicFunction().getTreeIntervals(nt); } public double getPopulationFactor(int nt) { return populationFactors[nt]; } public void storeTheState() { // as a demographic model store/restore is already taken care of } public void restoreTheState() { // as a demographic model store/restore is already taken care of } public VDdemographicFunction getDemographicFunction() { if (demoFunction == null) { demoFunction = new VDdemographicFunction(trees, type, indicatorParameter.getParameterValues(), popSizeParameter.getParameterValues(), logSpace, mid); } else { demoFunction.setup(trees, indicatorParameter.getParameterValues(), popSizeParameter.getParameterValues(), logSpace, mid); } return demoFunction; } protected void handleModelChangedEvent(Model model, Object object, int index) { // tree has changed //System.out.println("model changed: " + model); if (demoFunction != null) { if (demoFunction == savedDemoFunction) { demoFunction = new VDdemographicFunction(demoFunction); } for (int k = 0; k < trees.length; ++k) { if (model == trees[k]) { demoFunction.treeChanged(k); //System.out.println("tree changed: " + k + " " + Arrays.toString(demoFunction.dirtyTrees) // + " " + demoFunction.dirtyTrees); break; } assert k + 1 < trees.length; } } super.handleModelChangedEvent(model, object, index); fireModelChanged(this); } protected final void handleVariableChangedEvent(Variable variable, int index, Parameter.ChangeType type) { //System.out.println("parm changed: " + parameter); super.handleVariableChangedEvent(variable, index, type); if (demoFunction != null) { if (demoFunction == savedDemoFunction) { demoFunction = new VDdemographicFunction(demoFunction); } demoFunction.setDirty(); } fireModelChanged(this); } protected void storeState() { savedDemoFunction = demoFunction; } protected void restoreState() { //System.out.println("restore"); demoFunction = savedDemoFunction; savedDemoFunction = null; } public static XMLObjectParser PARSER = new AbstractXMLObjectParser() { public String getParserName() { return VariableDemographicModel.MODEL_NAME; } public Object parseXMLObject(XMLObject xo) throws XMLParseException { XMLObject cxo = xo.getChild(VariableSkylineLikelihood.POPULATION_SIZES); Parameter popParam = (Parameter) cxo.getChild(Parameter.class); cxo = xo.getChild(VariableSkylineLikelihood.INDICATOR_PARAMETER); Parameter indicatorParam = (Parameter) cxo.getChild(Parameter.class); cxo = xo.getChild(POPULATION_TREES); final int nc = cxo.getChildCount(); TreeModel[] treeModels = new TreeModel[nc]; double[] populationFactor = new double[nc]; for (int k = 0; k < treeModels.length; ++k) { final XMLObject child = (XMLObject) cxo.getChild(k); populationFactor[k] = child.hasAttribute(PLOIDY) ? child.getDoubleAttribute(PLOIDY) : 1.0; treeModels[k] = (TreeModel) child.getChild(TreeModel.class); } Type type = Type.STEPWISE; if (xo.hasAttribute(TYPE)) { final String s = xo.getStringAttribute(TYPE); if (s.equalsIgnoreCase(Type.STEPWISE.toString())) { type = Type.STEPWISE; } else if (s.equalsIgnoreCase(Type.LINEAR.toString())) { type = Type.LINEAR; } else if (s.equalsIgnoreCase(Type.EXPONENTIAL.toString())) { type = Type.EXPONENTIAL; } else { throw new XMLParseException("Unknown Bayesian Skyline type: " + s); } } final boolean logSpace = xo.getAttribute(LOG_SPACE, false) || type == Type.EXPONENTIAL; final boolean useMid = xo.getAttribute(USE_MIDPOINTS, false); Logger.getLogger("dr.evomodel").info("Variable demographic: " + type.toString() + " control points"); return new VariableDemographicModel(treeModels, populationFactor, popParam, indicatorParam, type, logSpace, useMid); }
package dr.evomodel.continuous; import dr.inference.model.*; import dr.math.distributions.NormalDistribution; import dr.util.DataTable; import dr.xml.*; import java.io.FileReader; import java.io.IOException; import java.util.*; /** * @author Andrew Rambaut * @author Marc Suchard * @version $Id$ */ public class AntigenicTraitLikelihood extends AbstractModelLikelihood { public final static String ANTIGENIC_TRAIT_LIKELIHOOD = "antigenicTraitLikelihood"; public AntigenicTraitLikelihood( int mdsDimension, Parameter mdsPrecision, CompoundParameter tipTraitParameter, MatrixParameter virusLocationsParameter, MatrixParameter serumLocationsParameter, DataTable<double[]> dataTable, final boolean log2Transform) { super(ANTIGENIC_TRAIT_LIKELIHOOD); this.mdsDimension = mdsDimension; String[] virusNames = dataTable.getRowLabels(); String[] serumNames = dataTable.getColumnLabels(); // mdsDimension = virusLocationsParameter.getColumnDimension(); // the total number of viruses is the number of rows in the table int virusCount = dataTable.getRowCount(); // the number of sera is the number of columns int serumCount = dataTable.getColumnCount(); tipCount = virusCount; Map<String, Integer> tipNameMap = null; if (tipTraitParameter != null) { if (tipCount != tipTraitParameter.getNumberOfParameters()) { System.err.println("Tree has different number of tips than the number of viruses"); } // the tip -> virus map tipIndices = new int[tipCount]; tipNameMap = new HashMap<String, Integer>(); for (int i = 0; i < tipCount; i++) { String label = tipTraitParameter.getParameter(i).getParameterName(); tipNameMap.put(label, i); tipIndices[i] = -1; } } else { tipIndices = null; } // the virus -> tip map virusIndices = new int[virusCount]; // a set of vectors for each virus giving serum indices for which assay data is available measuredSerumIndices = new int[virusCount][]; // a compressed (no missing values) set of measured assay values between virus and sera. this.assayTable = new double[virusCount][]; int totalMeasurementCount = 0; for (int i = 0; i < virusCount; i++) { virusIndices[i] = -1; double[] dataRow = dataTable.getRow(i); if (tipIndices != null) { // if the virus is in the tree then add a entry to map tip to virus Integer tipIndex = tipNameMap.get(virusNames[i]); if (tipIndex != null) { tipIndices[tipIndex] = i; virusIndices[i] = tipIndex; } else { System.err.println("Virus, " + virusNames[i] + ", not found in tree"); } } int measuredCount = 0; for (int j = 0; j < serumCount; j++) { if (!Double.isNaN(dataRow[j]) && dataRow[j] > 0) { measuredCount ++; } } assayTable[i] = new double[measuredCount]; measuredSerumIndices[i] = new int[measuredCount]; int k = 0; for (int j = 0; j < serumCount; j++) { if (!Double.isNaN(dataRow[j]) && dataRow[j] > 0) { if (log2Transform) { this.assayTable[i][k] = transform(dataRow[j]); } else { this.assayTable[i][k] = dataRow[j]; } measuredSerumIndices[i][k] = j; k ++; } } totalMeasurementCount += measuredCount; } this.totalMeasurementCount = totalMeasurementCount; // a cache of virus to serum distances (serum indices given by array above). distances = new double[totalMeasurementCount]; storedDistances = new double[totalMeasurementCount]; virusUpdates = new boolean[virusCount]; serumUpdates = new boolean[serumCount]; distanceUpdate = new boolean[totalMeasurementCount]; // a cache of individual truncations truncations = new double[totalMeasurementCount]; storedTruncations = new double[totalMeasurementCount]; if (tipIndices != null) { for (int i = 0; i < tipCount; i++) { if (tipIndices[i] == -1) { String label = tipTraitParameter.getParameter(i).getParameterName(); System.err.println("Tree tip, " + label + ", not found in virus assay table"); } } } // add tipTraitParameter to enable store / restore this.tipTraitParameter = tipTraitParameter; if (tipTraitParameter != null) { addVariable(tipTraitParameter); } this.virusLocationsParameter = virusLocationsParameter; virusLocationsParameter.setColumnDimension(mdsDimension); virusLocationsParameter.setRowDimension(virusCount); addVariable(virusLocationsParameter); this.serumLocationsParameter = serumLocationsParameter; if (virusLocationsParameter != serumLocationsParameter) { serumLocationsParameter.setColumnDimension(mdsDimension); serumLocationsParameter.setRowDimension(serumCount); addVariable(serumLocationsParameter); } this.mdsParameter = mdsPrecision; addVariable(mdsPrecision); this.isLeftTruncated = true; // Re-normalize likelihood for strictly positive distances } private double transform(final double value) { // transform to log_2 return Math.log(value) / Math.log(2.0); } @Override protected void handleModelChangedEvent(Model model, Object object, int index) { } @Override protected void handleVariableChangedEvent(Variable variable, int index, Variable.ChangeType type) { // TODO Flag which cachedDistances or mdsPrecision need to be updated if (variable == virusLocationsParameter) { if (tipTraitParameter != null) { // the virus locations have changed so update the tipTraitParameter int k = 0; for (int i = 0; i < tipCount; i++) { if (tipIndices[i] != -1) { Parameter virusLoc = virusLocationsParameter.getParameter(tipIndices[i]); for (int j = 0; j < mdsDimension; j++) { tipTraitParameter.setParameterValue(k, virusLoc.getValue(j)); k++; } } else { k += mdsDimension; } } } virusUpdates[index / mdsDimension] = true; distancesKnown = false; } else if (variable == serumLocationsParameter) { serumUpdates[index / mdsDimension] = true; distancesKnown = false; } else if (variable == mdsParameter) { for (int i = 0; i < distanceUpdate.length; i++) { distanceUpdate[i] = true; } } else { throw new IllegalArgumentException("Unknown parameter"); } truncationKnown = false; likelihoodKnown = false; } @Override protected void storeState() { System.arraycopy(distances, 0, storedDistances, 0, distances.length); System.arraycopy(truncations, 0, storedTruncations, 0, truncations.length); storedLogLikelihood = logLikelihood; storedTruncationSum = truncationSum; storedSumOfSquaredResiduals = sumOfSquaredResiduals; } @Override protected void restoreState() { double[] tmp = storedDistances; storedDistances = distances; distances = tmp; distancesKnown = true; tmp = storedTruncations; storedTruncations = truncations; truncations = tmp; logLikelihood = storedLogLikelihood; likelihoodKnown = true; truncationSum = storedTruncationSum; truncationKnown = true; sumOfSquaredResiduals = storedSumOfSquaredResiduals; } @Override protected void acceptState() { // do nothing } public void makeDirty() { distancesKnown = false; likelihoodKnown = false; truncationKnown = false; for (int i = 0; i < virusUpdates.length; i++) { virusUpdates[i] = true; } for (int i = 0; i < serumUpdates.length; i++) { serumUpdates[i] = true; } for (int i = 0; i < distanceUpdate.length; i++) { distanceUpdate[i] = true; } } public Model getModel() { return this; } public double getLogLikelihood() { makeDirty(); if (!likelihoodKnown) { if (!distancesKnown) { calculateDistances(); sumOfSquaredResiduals = calculateSumOfSquaredResiduals(); distancesKnown = true; } logLikelihood = computeLogLikelihood(); likelihoodKnown = true; } for (int i = 0; i < virusUpdates.length; i++) { virusUpdates[i] = false; } for (int i = 0; i < serumUpdates.length; i++) { serumUpdates[i] = false; } for (int i = 0; i < distanceUpdate.length; i++) { distanceUpdate[i] = false; } return logLikelihood; } // This function can be overwritten to implement other sampling densities, i.e. discrete ranks protected double computeLogLikelihood() { double precision = mdsParameter.getParameterValue(0); double logLikelihood = (totalMeasurementCount / 2) * Math.log(precision) - 0.5 * precision * sumOfSquaredResiduals; if (isLeftTruncated) { if (!truncationKnown) { truncationSum = calculateTruncation(precision); truncationKnown = true; } logLikelihood -= truncationSum; } return logLikelihood; } private double calculateTruncation(double precision) { double sum = 0.0; double sd = 1.0 / Math.sqrt(precision); int k = 0; for (int i = 0; i < assayTable.length; i++) { for (int j = 0; j < assayTable[i].length; j++) { if (distanceUpdate[k]) { truncations[k] = Math.log(NormalDistribution.cdf(distances[k], 0.0, sd)); } k++; } } for ( k = 0; k < truncations.length; k++) { sum += truncations[k]; } return sum; } private double calculateSumOfSquaredResiduals() { double sum = 0.0; int k = 0; for (int i = 0; i < assayTable.length; i++) { for (int j = 0; j < assayTable[i].length; j++) { double residual = distances[k] - assayTable[i][j]; sum += residual * residual; k++; } } return sum; } private void calculateDistances() { int k = 0; for (int i = 0; i < assayTable.length; i++) { for (int j = 0; j < assayTable[i].length; j++) { if (virusUpdates[i] || serumUpdates[measuredSerumIndices[i][j]]) { distances[k] = calculateDistance(virusLocationsParameter.getParameter(i), serumLocationsParameter.getParameter(measuredSerumIndices[i][j])); distanceUpdate[k] = true; } k++; } } } private double calculateDistance(Parameter X, Parameter Y) { double sum = 0.0; for (int i = 0; i < mdsDimension; i++) { double difference = X.getParameterValue(i) - Y.getParameterValue(i); sum += difference * difference; } return Math.sqrt(sum); } // XMLObjectParser public static XMLObjectParser PARSER = new AbstractXMLObjectParser() { public final static String FILE_NAME = "fileName"; public final static String TIP_TRAIT = "tipTrait"; public final static String VIRUS_LOCATIONS = "virusLocations"; public final static String SERUM_LOCATIONS = "serumLocations"; public static final String MDS_DIMENSION = "mdsDimension"; public static final String MDS_PRECISION = "mdsPrecision"; public static final String LOG_2_TRANSFORM = "log2Transform"; public String getParserName() { return ANTIGENIC_TRAIT_LIKELIHOOD; } public Object parseXMLObject(XMLObject xo) throws XMLParseException { String fileName = xo.getStringAttribute(FILE_NAME); DataTable<double[]> assayTable; try { assayTable = DataTable.Double.parse(new FileReader(fileName)); } catch (IOException e) { throw new XMLParseException("Unable to read assay data from file, " + fileName); } int mdsDimension = xo.getIntegerAttribute(MDS_DIMENSION); boolean log2Transform = false; if (xo.hasAttribute(LOG_2_TRANSFORM)) { xo.getBooleanAttribute(LOG_2_TRANSFORM); } // This parameter needs to be linked to the one in the IntegratedMultivariateTreeLikelihood (I suggest that the parameter is created // here and then a reference passed to IMTL - which optionally takes the parameter of tip trait values, in which case it listens and // updates accordingly. CompoundParameter tipTraitParameter = null; if (xo.hasChildNamed(TIP_TRAIT)) { tipTraitParameter = (CompoundParameter) xo.getElementFirstChild(TIP_TRAIT); } MatrixParameter virusLocationsParameter = (MatrixParameter) xo.getElementFirstChild(VIRUS_LOCATIONS); MatrixParameter serumLocationsParameter = (MatrixParameter) xo.getElementFirstChild(SERUM_LOCATIONS); if (serumLocationsParameter.getColumnDimension() != virusLocationsParameter.getColumnDimension()) { throw new XMLParseException("Virus Locations parameter and Serum Locations parameter have different column dimensions"); } Parameter mdsPrecision = (Parameter) xo.getElementFirstChild(MDS_PRECISION); return new AntigenicTraitLikelihood(mdsDimension, mdsPrecision, tipTraitParameter, virusLocationsParameter, serumLocationsParameter, assayTable, log2Transform); }
package dr.inferencexml.model; import dr.inference.model.ReciprocalStatistic; import dr.inference.model.Statistic; import dr.xml.*; public class ReciprocalStatisticParser extends AbstractXMLObjectParser { public static String RECIPROCAL_STATISTIC = "reciprocalStatistic"; public static String RECIPROCAL = "reciprocal"; public String[] getParserNames() { return new String[]{getParserName(), RECIPROCAL}; } public String getParserName() { return RECIPROCAL_STATISTIC; } public Object parseXMLObject(XMLObject xo) throws XMLParseException { ReciprocalStatistic recipStatistic = null; Object child = xo.getChild(0); if (child instanceof Statistic) { recipStatistic = new ReciprocalStatistic(xo.getId(), (Statistic) child); } else { throw new XMLParseException("Unknown element found in " + getParserName() + " element:" + child); } return recipStatistic; }
package edu.mit.streamjit.impl.compiler; import static com.google.common.base.Preconditions.*; import edu.mit.streamjit.api.Identity; import java.io.PrintWriter; import java.util.Arrays; import java.util.Map; import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.ClassWriter; import org.objectweb.asm.Opcodes; import org.objectweb.asm.tree.ClassNode; import org.objectweb.asm.tree.FieldNode; import org.objectweb.asm.util.CheckClassAdapter; /** * Builds a .class file (as a byte[]) from a Klass. * @author Jeffrey Bosboom <jeffreybosboom@gmail.com> * @since 4/17/2013 */ public final class KlassUnresolver { public static byte[] unresolve(Klass k) { checkNotNull(k); //TODO: permit this for testing //checkArgument(k.isMutable()); return new KlassUnresolver(k).unresolve(); } private final Klass klass; private final ClassNode classNode; private KlassUnresolver(Klass k) { this.klass = k; this.classNode = new ClassNode(Opcodes.ASM4); } @SuppressWarnings("unchecked") private byte[] unresolve() { this.classNode.version = Opcodes.V1_7; this.classNode.access = Modifier.toBits(klass.modifiers()); this.classNode.name = internalName(klass); assert klass.getSuperclass() != null || Object.class.equals(klass.getBackingClass()) : klass; this.classNode.superName = internalName(klass.getSuperclass()); for (Klass k : klass.interfaces()) this.classNode.interfaces.add(internalName(k)); for (Field f : klass.fields()) { FieldNode fn = new FieldNode(Opcodes.ASM4, Modifier.toBits(f.modifiers()), f.getName(), f.getType().getFieldType().getDescriptor(), null, null); this.classNode.fields.add(fn); } for (Method m : klass.methods()) this.classNode.methods.add(MethodUnresolver.unresolve(m)); ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_MAXS | ClassWriter.COMPUTE_FRAMES); ClassVisitor cv = cw; boolean assertionsEnabled = false; assert assertionsEnabled = true; //intentional side effect if (assertionsEnabled) cv = new CheckClassAdapter(cv, true); classNode.accept(cv); return cw.toByteArray(); } private String internalName(Klass k) { return k.getName().replace('.', '/'); } public static void main(String[] args) { Module m = new Module(); Klass k = m.getKlass(Module.class); for (Method method : k.methods()) if (method.isResolvable()) { method.resolve(); method.dump(new PrintWriter(System.out, true)); } byte[] b = unresolve(k); System.out.println(Arrays.toString(b)); System.out.println(b.length); } }
package edu.mit.streamjit.test.apps; import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import com.google.common.util.concurrent.Uninterruptibles; import edu.mit.streamjit.api.Filter; import edu.mit.streamjit.api.Input; import edu.mit.streamjit.api.OneToOneElement; import edu.mit.streamjit.api.Pipeline; import edu.mit.streamjit.api.StreamCompiler; import edu.mit.streamjit.impl.compiler2.Compiler2StreamCompiler; import edu.mit.streamjit.impl.interp.DebugStreamCompiler; import edu.mit.streamjit.test.AbstractBenchmark; import edu.mit.streamjit.test.Benchmarker; import java.awt.Polygon; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Random; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; /** * Parses string representations of triangles, then tests whether they contain * the origin. Result is a sequence of booleans representing whether the * triangle contained the origin or not. Loosely based on Project Euler problem * 102, "Triangle Containment". * * This file also contains an implementation using threads, for * comparison purposes. It simply computes a count of origin-containing * triangles, rather than a list of booleans. * @author Jeffrey Bosboom <jeffreybosboom@gmail.com> * @since 1/7/2014 */ public final class TriangleContainment { private static final int TRIANGLE_SIDES = 3; private TriangleContainment() {} private static final class Parser extends Filter<String, Integer> { private Parser() { super(1, TRIANGLE_SIDES*2); } @Override public void work() { for (String s : pop().split(",")) push(Integer.parseInt(s)); } } private static final class OriginTester extends Filter<Integer, Boolean> { private OriginTester() { super(TRIANGLE_SIDES*2, 1); } @Override public void work() { Polygon p = new Polygon(); for (int i = 0; i < TRIANGLE_SIDES; ++i) p.addPoint(pop(), pop()); push(p.contains(0, 0)); } } private static final class ManuallyFused extends Filter<String, Boolean> { private ManuallyFused() { super(1, 1); } @Override public void work() { String[] data = pop().split(","); Polygon p = new Polygon(); for (int i = 0; i < TRIANGLE_SIDES*2; i += 2) p.addPoint(Integer.parseInt(data[i]), Integer.parseInt(data[i+1])); push(p.contains(0, 0)); } } private static final class TriangleContainmentBenchmark extends AbstractBenchmark { private TriangleContainmentBenchmark() { super(new Dataset("triangles", Input.fromIterable(generateInput()))); } @Override @SuppressWarnings("unchecked") public OneToOneElement<Object, Object> instantiate() { // return new Pipeline(new ManuallyFused()); return new Pipeline(new Parser(), new OriginTester()); } } private static final int NUM_TRIANGLES = 10000; private static final int REPETITIONS = 5000; private static Iterable<String> generateInput() { Random rng = new Random(0); ImmutableList.Builder<String> list = ImmutableList.builder(); StringBuilder sb = new StringBuilder(); for (int i = 0; i < NUM_TRIANGLES; ++i) { sb.append(rng.nextInt(2001)-1000).append(",").append(rng.nextInt(2001)-1000); for (int j = 1; j < TRIANGLE_SIDES; ++j) sb.append(",").append(rng.nextInt(2001)-1000).append(",").append(rng.nextInt(2001)-1000); list.add(sb.toString()); sb.delete(0, sb.length()); } return Iterables.concat(Collections.nCopies(REPETITIONS, list.build())); } private static final int NUM_THREADS = Runtime.getRuntime().availableProcessors(); private static int runThreads() { Iterator<String> taskIterator = generateInput().iterator(); AtomicInteger result = new AtomicInteger(0); List<Thread> threads = new ArrayList<>(NUM_THREADS); List<Semaphore> readSemaphores = new ArrayList<>(NUM_THREADS), writeSemaphores = new ArrayList<>(NUM_THREADS); for (int i = 0; i < NUM_THREADS; ++i) { readSemaphores.add(new Semaphore(i == 0 ? 1 : 0)); writeSemaphores.add(new Semaphore(i == 0 ? 1 : 0)); } for (int i = 0; i < NUM_THREADS; ++i) threads.add(new ComputeThread(taskIterator, result, readSemaphores.get(i), readSemaphores.get((i+1)%readSemaphores.size()), writeSemaphores.get(i), writeSemaphores.get((i+1)%writeSemaphores.size()))); Stopwatch stopwatch = Stopwatch.createStarted(); for (Thread t : threads) t.start(); for (Thread t : threads) Uninterruptibles.joinUninterruptibly(t); System.out.println("Thread impl ran in " + stopwatch.stop().elapsed(TimeUnit.MILLISECONDS)); return result.get(); } private static final class ComputeThread extends Thread { private static final int STRINGS_PER_TASK = 100000; private final Iterator<String> taskIterator; //A token-passing scheme to ensure tasks are issued and retired in order, //for fair comparison against StreamJIT. private final Semaphore readSemaphore, nextReadSemaphore, writeSemaphore, nextWriteSemaphore; private final AtomicInteger result; private ComputeThread(Iterator<String> taskIterator, AtomicInteger result, Semaphore readSemaphore, Semaphore nextReadSemaphore, Semaphore writeSemaphore, Semaphore nextWriteSemaphore) { this.taskIterator = taskIterator; this.result = result; this.readSemaphore = readSemaphore; this.nextReadSemaphore = nextReadSemaphore; this.writeSemaphore = writeSemaphore; this.nextWriteSemaphore = nextWriteSemaphore; } @Override public void run() { List<String> tasks = new ArrayList<>(STRINGS_PER_TASK); while (true) { tasks.clear(); readSemaphore.acquireUninterruptibly(); for (int i = 0; i < STRINGS_PER_TASK && taskIterator.hasNext(); ++i) tasks.add(taskIterator.next()); nextReadSemaphore.release(); if (tasks.isEmpty()) return; int accum = 0; for (String task : tasks) { String[] data = task.split(","); Polygon p = new Polygon(); for (int i = 0; i < TRIANGLE_SIDES*2; i += 2) p.addPoint(Integer.parseInt(data[i]), Integer.parseInt(data[i+1])); if (p.contains(0, 0)) ++accum; } writeSemaphore.acquireUninterruptibly(); result.addAndGet(accum); nextWriteSemaphore.release(); } } } public static void main(String[] args) { // StreamCompiler sc = new DebugStreamCompiler(); StreamCompiler sc = new Compiler2StreamCompiler().maxNumCores(4).multiplier(Short.MAX_VALUE); Benchmarker.runBenchmark(new TriangleContainmentBenchmark(), sc).get(0).print(System.out); runThreads(); } }
package edu.ucsb.cs56.projects.games.poker; import java.awt.*; import java.awt.event.ActionListener; import java.awt.event.ActionEvent; import javax.swing.*; import java.lang.String; import java.lang.System; import java.net.URL; import java.util.ArrayList; public class PokerGameGui extends PokerGame{ protected JFrame mainFrame, gameOverFrame; protected JTextField betTextField; protected JButton foldButton, betButton, checkButton, callButton, showdownButton, rulesButton, // mainFrame overviewRulesButton, gameplayRulesButton, exampleRulesButton, //Rules Panel gameOverButton; // gameOverFrame protected JLabel rulesExampleImg, rulesOverviewImg, rulesGameplayImg, playerWinsLabel, opponentWinsLabel, // Possibly don't need these playerChipsLabel, opponentChipsLabel, potLabel, gameMessage, playerPrompt, // Possibly don't need these backCardLabel1, backCardLabel2, // Not sure what these are used for gameOverLabel; // gameOverFrame protected JPanel rulesPanel, rulesPart1, rulesPart2, rulesPart3, opponentPanel, playerPanel, centerPanel, messagePanel, optionArea, oSubPane1, oSubPane2, oSubPane3, pSubPane1, pSubPane2, pSubPane3, flopPane, turnPane, riverPane, betPane, gameOverMessage, gameOverPanel, gameOverButtonPanel; // gameOverFrame public PokerGameGui(){ super(); } public void layoutSubViews() { if (!gameOver) { Color pokerGreen = new Color(83, 157, 89); foldButton = new JButton("FOLD"); foldButton.setEnabled(false); foldButton.addActionListener(new foldButtonHandler()); betButton = new JButton("BET"); betButton.setEnabled(false); betButton.addActionListener(new betButtonHandler()); betTextField = new JTextField(4); checkButton = new JButton("CHECK"); checkButton.setEnabled(false); checkButton.addActionListener(new checkButtonHandler()); callButton = new JButton("CALL"); callButton.setEnabled(false); callButton.addActionListener(new callButtonHandler()); showdownButton = new JButton("SHOWDOWN"); showdownButton.addActionListener(new showdownButtonHandler()); /*putting the rules pictures into the game without adding a new window */ rulesButton = new JButton("RULES"); rulesButton.setEnabled(true); rulesButton.addActionListener(new rulesButtonHandler()); rulesPart1 = new JPanel(); rulesPart2 = new JPanel(); rulesPart3 = new JPanel(); rulesPart1.setLayout(new BoxLayout(rulesPart1, BoxLayout.Y_AXIS)); rulesPart2.setLayout(new BoxLayout(rulesPart2, BoxLayout.Y_AXIS)); rulesPart3.setLayout(new BoxLayout(rulesPart3, BoxLayout.Y_AXIS)); rulesOverviewImg = new JLabel(); rulesGameplayImg = new JLabel(); rulesExampleImg = new JLabel(); rulesOverviewImg.setIcon(new ImageIcon("src/edu/ucsb/cs56/projects/games/poker/rules/rulesOverview.png")); rulesGameplayImg.setIcon(new ImageIcon("src/edu/ucsb/cs56/projects/games/poker/rules/rulesGamePlay.png")); rulesExampleImg.setIcon(new ImageIcon("src/edu/ucsb/cs56/projects/games/poker/rules/rulesExamples.png")); rulesPart1.add(rulesOverviewImg); rulesPart2.add(rulesGameplayImg); rulesPart3.add(rulesExampleImg); overviewRulesButton = new JButton("Overview"); overviewRulesButton.setEnabled(true); overviewRulesButton.addActionListener(new overviewButtonHandler() ); gameplayRulesButton = new JButton("Gameplay"); gameplayRulesButton.setEnabled(true); gameplayRulesButton.addActionListener(new gameplayButtonHandler() ); exampleRulesButton = new JButton("Example Hands"); exampleRulesButton.setEnabled(true); exampleRulesButton.addActionListener(new exampleButtonHandler() ); rulesPart1.add(gameplayRulesButton); rulesPart1.add(exampleRulesButton); rulesPart2.add(overviewRulesButton); rulesPart2.add(exampleRulesButton); rulesPart3.add(overviewRulesButton); rulesPart3.add(gameplayRulesButton); rulesPanel = rulesPart1; opponentPanel = new JPanel(); opponentPanel.setLayout(new BorderLayout()); oSubPane1 = new JPanel(); oSubPane1.setLayout(new BoxLayout(oSubPane1, BoxLayout.Y_AXIS)); oSubPane2 = new JPanel(); oSubPane3 = new JPanel(); oSubPane3.setLayout(new BorderLayout()); opponentChipsLabel = new JLabel(String.format("Chips: %d", opponent.getChips())); opponentWinsLabel = new JLabel(); opponentWinsLabel.setText(String.format("Opponent wins: %d", opponent.getWins())); playerWinsLabel = new JLabel(); playerWinsLabel.setText(String.format("Player wins: %d", player.getWins())); oSubPane1.add(new JLabel("OPPONENT")); oSubPane1.add(opponentChipsLabel); oSubPane3.add(BorderLayout.NORTH, playerWinsLabel); oSubPane3.add(BorderLayout.SOUTH, opponentWinsLabel); opponentPanel.add(BorderLayout.WEST, oSubPane1); opponentPanel.add(BorderLayout.CENTER, oSubPane2); opponentPanel.add(BorderLayout.EAST, oSubPane3); optionArea = new JPanel(); optionArea.setLayout(new BoxLayout(optionArea, BoxLayout.Y_AXIS)); optionArea.add(betButton); optionArea.add(betTextField); optionArea.add(callButton); optionArea.add(checkButton); optionArea.add(foldButton); optionArea.add(rulesButton); playerPanel = new JPanel(); playerPanel.setLayout(new BorderLayout()); pSubPane1 = new JPanel(); pSubPane1.setLayout(new BoxLayout(pSubPane1, BoxLayout.Y_AXIS)); pSubPane2 = new JPanel(); pSubPane3 = new JPanel(); playerChipsLabel = new JLabel(String.format("Chips: %d", player.getChips())); pSubPane1.add(new JLabel("PLAYER")); pSubPane1.add(playerChipsLabel); pSubPane3.add(optionArea); playerPanel.add(BorderLayout.WEST, pSubPane1); playerPanel.add(BorderLayout.CENTER, pSubPane2); playerPanel.add(BorderLayout.EAST, pSubPane3); backCardLabel1 = new JLabel(backCardImage); backCardLabel2 = new JLabel(backCardImage); oSubPane2.add(backCardLabel1); oSubPane2.add(backCardLabel2); for (int i = 0; i < 2; i++) { pSubPane2.add(new JLabel(getCardImage((player.getHand()).get(i)))); } centerPanel = new JPanel(); centerPanel.setLayout(new BoxLayout(centerPanel, BoxLayout.X_AXIS)); flopPane = new JPanel(); flopPane.add(new JLabel("Flop:")); for (int i = 0; i < 3; i++) { flopPane.add(new JLabel(getCardImage((table.getFlopCards()).get(i)))); } flopPane.setVisible(false); turnPane = new JPanel(); turnPane.add(new JLabel("Turn:")); turnPane.add(new JLabel(getCardImage(table.getTurnCard()))); turnPane.setVisible(false); riverPane = new JPanel(); riverPane.add(new JLabel("River:")); riverPane.add(new JLabel(getCardImage(table.getRiverCard()))); riverPane.setVisible(false); centerPanel.add(flopPane); centerPanel.add(turnPane); centerPanel.add(riverPane); messagePanel = new JPanel(); messagePanel.setLayout(new BoxLayout(messagePanel, BoxLayout.Y_AXIS)); messagePanel.add(Box.createRigidArea(new Dimension(0, 20))); potLabel = new JLabel(); potLabel.setText(String.format("Pot: %d", pot)); messagePanel.add(potLabel); messagePanel.add(Box.createRigidArea(new Dimension(10, 20))); gameMessage = new JLabel(message); messagePanel.add(Box.createRigidArea(new Dimension(10, 20))); messagePanel.add(gameMessage); playerPrompt = new JLabel(prompt); messagePanel.add(playerPrompt); messagePanel.add(Box.createRigidArea(new Dimension(10, 0))); showdownButton.setVisible(false); messagePanel.add(showdownButton); messagePanel.add(Box.createRigidArea(new Dimension(0, 20))); oSubPane1.setBackground(pokerGreen); oSubPane2.setBackground(pokerGreen); oSubPane3.setBackground(pokerGreen); pSubPane1.setBackground(pokerGreen); pSubPane2.setBackground(pokerGreen); pSubPane3.setBackground(pokerGreen); messagePanel.setBackground(pokerGreen); centerPanel.setBackground(pokerGreen); optionArea.setBackground(pokerGreen); flopPane.setBackground(pokerGreen); turnPane.setBackground(pokerGreen); riverPane.setBackground(pokerGreen); mainFrame = new JFrame("Poker Game"); mainFrame.setSize(new Dimension(1000, 600)); mainFrame.setLayout(new BorderLayout() ); mainFrame.setResizable(false); mainFrame.setLocation(250, 250); mainFrame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); mainFrame.getContentPane().add(BorderLayout.NORTH, opponentPanel); mainFrame.getContentPane().add(BorderLayout.SOUTH, playerPanel); mainFrame.getContentPane().add(BorderLayout.CENTER, centerPanel); mainFrame.getContentPane().add(BorderLayout.EAST, messagePanel); mainFrame.getContentPane().add(BorderLayout.WEST, rulesPanel); mainFrame.setVisible(true); // mainFramee.add(BorderLayout.CENTER, mainFrame); // mainFramee.getContentPane().add(BorderLayout.WEST, rulesPanel); } } /** * Method that updates the panels in the frame based on step */ public void updateFrame() { if (step == Step.FLOP) { flopPane.setVisible(true); } else if (step == Step.TURN) { turnPane.setVisible(true); } else if (step == Step.RIVER) { riverPane.setVisible(true); } gameMessage.setText(message); playerPrompt.setText(prompt); potLabel.setText(String.format("Pot: %d", pot)); opponentChipsLabel.setText(String.format("Chips: %d", opponent.getChips())); playerChipsLabel.setText(String.format("Chips: %d", player.getChips())); opponentPanel.revalidate(); playerPanel.revalidate(); centerPanel.revalidate(); } /** * Enables and disables buttons on the screen depending on turn and step */ protected void controlButtons() { if (step == Step.SHOWDOWN) { betButton.setEnabled(false); betTextField.setEnabled(false); checkButton.setEnabled(false); callButton.setEnabled(false); foldButton.setEnabled(false); showdownButton.setVisible(true); rulesButton.setEnabled(true); } else if (turn == Turn.PLAYER && responding) { betButton.setEnabled(false); betTextField.setEnabled(false); checkButton.setEnabled(false); callButton.setEnabled(true); foldButton.setEnabled(true); rulesButton.setEnabled(true); } else if (turn == Turn.PLAYER) { betButton.setEnabled(true); betTextField.setEnabled(true); checkButton.setEnabled(true); callButton.setEnabled(false); foldButton.setEnabled(true); rulesButton.setEnabled(true); } else { betButton.setEnabled(false); betTextField.setEnabled(false); checkButton.setEnabled(false); callButton.setEnabled(false); foldButton.setEnabled(false); rulesButton.setEnabled(true); } updateFrame(); } /** * Inner class that handles the betButton using ActionListener */ protected class betButtonHandler implements ActionListener { public void actionPerformed(ActionEvent e) { String inputText = betTextField.getText(); if (!inputText.equals("")) { bet = Integer.parseInt(inputText); if (bet<=0) { prompt = "Enter a valid bet!"; updateFrame(); } else if ((player.getChips()-bet>=0) && (opponent.getChips()-bet>=0)) { betTextField.setText(""); pot += bet; player.bet(bet); message = "Opponent waiting for turn."; prompt = "Player bets " + bet + "."; betButton.setEnabled(false); betTextField.setEnabled(false); checkButton.setEnabled(false); callButton.setEnabled(false); foldButton.setEnabled(false); responding = true; checkPassTurnUpdate(); updateFrame(); } else { prompt = "Not enough chips!"; updateFrame(); } } else { prompt = "Enter a number of chips to bet!"; updateFrame(); } } } /** * Inner class that handles the checkButton using ActionListener */ protected class checkButtonHandler implements ActionListener { public void actionPerformed(ActionEvent e) { bet = 0; message = "Opponent waiting to deal."; prompt = "Player checks."; betButton.setEnabled(false); betTextField.setEnabled(false); checkButton.setEnabled(false); callButton.setEnabled(false); foldButton.setEnabled(false); rulesButton.setEnabled(true); checkPassTurnUpdate(); updateFrame(); } } /** * Inner class that handles the foldButton using ActionListener */ protected class foldButtonHandler implements ActionListener { public void actionPerformed(ActionEvent e) { message = "Opponent waiting for turn."; prompt = "You fold."; player.foldHand(); } } /** * Inner class that handles the callButton using ActionListener */ protected class callButtonHandler implements ActionListener { public void actionPerformed(ActionEvent e) { pot += bet; player.bet(bet); message = "You call."; prompt = "Next turn: "; responding = false; callButton.setEnabled(false); foldButton.setEnabled(false); changeTurn(); updateFrame(); } } /** * Inner class that handles the showdownButton using ActionListener */ protected class showdownButtonHandler implements ActionListener { public void actionPerformed(ActionEvent e) { determineWinner(); collectPot(); showWinnerAlert(); } } protected class rulesButtonHandler implements ActionListener {// rules public void actionPerformed(ActionEvent e) { // mainFrame.setSize(1000, 1000); //rulesPanel.setVisible(!rulesPanel.isVisible() ); if(!rulesPanel.isVisible()){ rulesPanel.setVisible(true); rulesPart1.setVisible(true); rulesPanel = rulesPart1; } else rulesPanel.setVisible(false); } } protected class overviewButtonHandler implements ActionListener {// rules public void actionPerformed(ActionEvent e) { rulesPart1.setVisible(true); rulesPart2.setVisible(false); rulesPart3.setVisible(false); rulesPanel = rulesPart1; } } protected class gameplayButtonHandler implements ActionListener {// rules public void actionPerformed(ActionEvent e) { rulesPart1.setVisible(false); rulesPart2.setVisible(true); rulesPart3.setVisible(false); rulesPanel = rulesPart2; } } protected class exampleButtonHandler implements ActionListener {// rules public void actionPerformed(ActionEvent e) { // mainFrame.setSize(1000, 1000); rulesPart1.setVisible(false); rulesPart2.setVisible(false); rulesPart3.setVisible(true); rulesPanel = rulesPart3; } } /** * Function that puts up a Game Over Frame that can take us back to the Main Screen */ protected void gameOver(String label) { gameOverFrame = new JFrame(); gameOverFrame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); gameOverFrame.setBackground(Color.red); gameOverMessage = new JPanel(); gameOverMessage.setBackground(Color.red); gameOverButtonPanel = new JPanel(); gameOverButtonPanel.setBackground(Color.red); gameOverLabel = new JLabel(label); gameOverButton = new JButton("Back to Main Menu"); gameOverButton.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e){ gameOverFrame.setVisible(false); PokerMain restart = new PokerMain(); restart.go(); } }); gameOverMessage.add(gameOverLabel); gameOverButtonPanel.add(gameOverButton); gameOverFrame.setSize(300, 200); gameOverFrame.setResizable(false); gameOverFrame.setLocation(250, 250); gameOverFrame.getContentPane().add(BorderLayout.NORTH, gameOverMessage); gameOverFrame.getContentPane().add(BorderLayout.SOUTH, gameOverButtonPanel); gameOverFrame.pack(); gameOverFrame.setVisible(true); mainFrame.dispose(); } }
package fi.iki.joker.sevedroid; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Calendar; import java.util.List; import android.app.Activity; import android.app.AlertDialog; import android.app.DatePickerDialog; import android.app.Dialog; import android.content.DialogInterface; import android.content.Intent; import android.os.AsyncTask; import android.os.Bundle; import android.util.Log; import android.view.Menu; import android.view.MenuInflater; import android.view.MenuItem; import android.view.View; import android.view.View.OnClickListener; import android.widget.AdapterView; import android.widget.AdapterView.OnItemSelectedListener; import android.widget.ArrayAdapter; import android.widget.DatePicker; import android.widget.EditText; import android.widget.PopupWindow; import android.widget.ProgressBar; import android.widget.Spinner; import android.widget.TextView; import android.widget.Toast; /** * The main activity for the application * TODO: Split this into multiple source files, too long now * Logcat: logcat Sevedroid:V *:S * @author juha * */ public class SevedroidProjectActivity extends Activity implements OnItemSelectedListener, OnClickListener { /** * Key under which the project list is saved to spawned Activies extras */ public static final String PROJECTLIST_BUNDLE_KEY = "pListInMyStack"; private static final String TAG = "Sevedroid"; private static final int optionsMenuId = 1; private SeveraCommsUtils mScu = null; private static final int requestCode = 1; // IDs for various dialog views private static final int DATE_PICKER_DIALOG_ID = 0; private static final int NOT_CONNECTED_DIALOG_ID = 1; private static final int DIALOG_ID_MISSING_CASEGUID = 2; private static final int DIALOG_ID_MISSING_PHASE_GUID = 3; private static final int DIALOG_ID_MISSING_DESCRIPTION = 4; private static final int DIALOG_ID_BAD_WORKTYPE_GUID = 5; private static final int DIALOG_ID_BAD_USER_GUID = 6; private static final int DIALOG_ID_BAD_HOURS_QUANTITY = 7; private static final int DIALOG_ID_BAD_EVENT_DATE = 8; private boolean stateRestored = false; private boolean fullyCreated = false; private static final String FULLY_CREATED_BOOL_KEY = "fullyCreatedBoolean"; private Calendar claimDate = null; /** * The list containing the Cases this user id has access to and the parcel identifier */ protected ArrayList<S3CaseItem> projectList = null; protected static final String CASEITEMLIST_PARCEL_ID = "caseItemParcelID"; /** * The list containing the phases under some particular case this user has access to and the parcel identifier */ protected ArrayList<S3PhaseItem> phaseList = null; protected static final String PHASEITEMLIST_PARCEL_ID = "phaseItemParcelID"; /** * The list containing the work type items under some particular phase and the parcel identifier */ protected ArrayList<S3WorkTypeItem> workTypeList = null; protected static final String WORKTYPEITEMLIST_PARCEL_ID = "workTypeParcelID"; protected String currentWorkTypeGUID = null; protected String currentPhaseGUID = null; ProgressBar projectsProgress = null; ProgressBar phasesProgress = null; ProgressBar workTypeProgress = null; ArrayAdapter<CharSequence> phaseAdapter = null; Spinner projectPhaseSpinner = null; ArrayAdapter<CharSequence> projectAdapter = null; Spinner projectNameSpinner = null; ArrayAdapter<CharSequence> workTypeAdapter = null; Spinner workTypeSpinner = null; boolean hourEntryStatus = false; /** * This callback is called when user has selected a date from the change_data_button */ private DatePickerDialog.OnDateSetListener mDateSetListener = new DatePickerDialog.OnDateSetListener() { public void onDateSet(DatePicker view, int year, int monthOfYear, int dayOfMonth) { claimDate.set(year, monthOfYear, dayOfMonth); } }; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); Log.d(TAG,"OnCreate called!"); if(savedInstanceState == null) { Log.d(TAG, "Saved instance state is null, recreating Activity state."); stateRestored = false; } else if (savedInstanceState != null && savedInstanceState.getBoolean(FULLY_CREATED_BOOL_KEY) == false) { //this is the case that the activity was not fully constructed since user started it the first //time and we needed to ask for the API key. Log.d(TAG, "Saved instance state is not null, but still restoring Activity state (not fully created)..."); stateRestored = false; } else { Log.d(TAG, "Saved instance state is not null, restoring Activity state..."); projectList = savedInstanceState.getParcelableArrayList(CASEITEMLIST_PARCEL_ID); phaseList = savedInstanceState.getParcelableArrayList(PHASEITEMLIST_PARCEL_ID); workTypeList = savedInstanceState.getParcelableArrayList(WORKTYPEITEMLIST_PARCEL_ID); stateRestored = true; } if(SeveraCommsUtils.checkIfConnected(this) == false) { showDialog(NOT_CONNECTED_DIALOG_ID); return; } setContentView(R.layout.main); // test if the user has set the api key, if yes, then follow on to load up the projects // if not, then auto start the config activity SevedroidContentStore contentStore = new SevedroidContentStore(this); if(contentStore.fetchApiKey() == null) { Toast.makeText(this,"Please input your API key to use this app!",Toast.LENGTH_LONG).show(); Intent intent = new Intent(); intent.setClass(this, SevedroidConfig.class); this.startActivityForResult(intent, requestCode); return; } else { runOnCreate(); } } @Override protected void onSaveInstanceState(Bundle outState) { Log.d(TAG,"onSaveInstanceState called!"); outState.putParcelableArrayList(CASEITEMLIST_PARCEL_ID, projectList); outState.putParcelableArrayList(PHASEITEMLIST_PARCEL_ID, phaseList); outState.putParcelableArrayList(WORKTYPEITEMLIST_PARCEL_ID, workTypeList); outState.putBoolean(FULLY_CREATED_BOOL_KEY,fullyCreated); super.onSaveInstanceState(outState); } private void runOnCreate() { // make progress indicators global, hide before use projectsProgress = (ProgressBar)findViewById(R.id.projectsProgressBar); phasesProgress = (ProgressBar)findViewById(R.id.phasesProgressBar); workTypeProgress = (ProgressBar)findViewById(R.id.workTypeProgressBar); projectsProgress.setVisibility(View.GONE); phasesProgress.setVisibility(View.GONE); workTypeProgress.setVisibility(View.GONE); // the buttons are all handled by the OnClickListener implementation of this class) (findViewById(R.id.button1hour)).setOnClickListener(this); (findViewById(R.id.button2hour)).setOnClickListener(this); (findViewById(R.id.button3hour)).setOnClickListener(this); (findViewById(R.id.button4hour)).setOnClickListener(this); (findViewById(R.id.button5hour)).setOnClickListener(this); (findViewById(R.id.button6hour)).setOnClickListener(this); (findViewById(R.id.button7hour)).setOnClickListener(this); (findViewById(R.id.button8hour)).setOnClickListener(this); (findViewById(R.id.button9hour)).setOnClickListener(this); (findViewById(R.id.button10hour)).setOnClickListener(this); (findViewById(R.id.buttonplus30min)).setOnClickListener(this); (findViewById(R.id.button_claim)).setOnClickListener(this); (findViewById(R.id.button_claim_overtime)).setOnClickListener(this); (findViewById(R.id.change_date_button)).setOnClickListener(this); projectNameSpinner = (Spinner)findViewById(R.id.projectnamespinner); projectPhaseSpinner = (Spinner)findViewById(R.id.phasenamespinner); workTypeSpinner = (Spinner)findViewById(R.id.worktypespinner); if(projectList == null) { projectList = new ArrayList<S3CaseItem>(); projectsProgress.setVisibility(View.VISIBLE); new LoadCasesXMLTask(this).execute(); Toast.makeText(this, "Started to load projects... they will be available once loaded...", Toast.LENGTH_SHORT).show(); } if(phaseList == null) { phaseList = new ArrayList<S3PhaseItem>(); } if(workTypeList == null) { workTypeList = new ArrayList<S3WorkTypeItem>(); } //as the spinner gets enabled and disabled from the asynctask, make sure it's not null projectAdapter = new ArrayAdapter(this, android.R.layout.simple_spinner_dropdown_item, projectList); projectAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); projectNameSpinner.setAdapter(projectAdapter); projectNameSpinner.setOnItemSelectedListener(this); phaseAdapter = new ArrayAdapter(this, android.R.layout.simple_spinner_dropdown_item,phaseList); phaseAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); projectPhaseSpinner.setOnItemSelectedListener(this); projectPhaseSpinner.setAdapter(phaseAdapter); workTypeAdapter = new ArrayAdapter(this, android.R.layout.simple_spinner_dropdown_item,workTypeList); workTypeAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); workTypeSpinner.setOnItemSelectedListener(this); workTypeSpinner.setAdapter(workTypeAdapter); claimDate = Calendar.getInstance(); fullyCreated = true; Log.d(TAG,"OnCreate for main activity done."); } @Override protected void onActivityResult(int requestCode, int resultCode, Intent data) { // TODO Auto-generated method stub if(requestCode == SevedroidProjectActivity.requestCode && resultCode == Activity.RESULT_OK) { Log.d(TAG,"API KEY input finished with ok result... re-create!"); //TODO: Some happy day, change this to Activity.reCreate(), api level 11 stateRestored = false; Log.d(TAG, "Here we relie on onCreate for being called when the sevedroidConfig finishes."); } else { Log.d(TAG,"Received activity result:"+resultCode+ " with owner requestCode of "+requestCode+", but don't know what to do with it"); } } @Override public boolean onCreateOptionsMenu(Menu menu) { // TODO Auto-generated method stub MenuInflater inflater = getMenuInflater(); inflater.inflate(R.menu.mainmenu, menu); return true; } public List<S3CaseItem> getProjectList() { return this.projectList; } @Override public boolean onOptionsItemSelected(MenuItem item) { // TODO Auto-generated method stub Log.d(TAG,"OnOptionsItemSelected..."); switch (item.getItemId()) { case R.id.input_api_key_option: Intent intent = new Intent(); intent.setClass(this, SevedroidConfig.class); Log.d(TAG,"SevedroidConfig Activity starting..."); this.startActivity(intent); break; case R.id.query_claimed_hours: Intent queryIntent = new Intent(); queryIntent.setClass(this, QueryHourEntries.class); Bundle projectsListBundle = new Bundle(); projectsListBundle.putParcelableArrayList(PROJECTLIST_BUNDLE_KEY, projectList); queryIntent.putExtras(projectsListBundle); Log.d(TAG,"QueryHourEntries Activity starting..."); this.startActivity(queryIntent); break; case R.id.refresh: this.projectList = null; this.phaseList = null; this.workTypeList = null; runOnCreate(); break; } return super.onOptionsItemSelected(item); } /** * For some reason, I need to recreate the whole spinner, adapter, listener deal in order to * get the Spinner to refresh. Please debug & optimize. * DO NOT CALL FROM ASyncTask threads, will fail! * These also do have the side-effect that the onItemSelected will get called. Thus, the * binary switch there on that method. */ private void projectSpinnerRefreshHack() { this.projectAdapter.notifyDataSetChanged(); projectNameSpinner = (Spinner)findViewById(R.id.projectnamespinner); projectAdapter = new ArrayAdapter(this, android.R.layout.simple_spinner_dropdown_item,projectList); projectAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); projectNameSpinner.setOnItemSelectedListener(this); projectNameSpinner.setAdapter(projectAdapter); } private void phaseSpinnerRefreshHack() { this.phaseAdapter.notifyDataSetChanged(); projectPhaseSpinner = (Spinner)findViewById(R.id.phasenamespinner); phaseAdapter = new ArrayAdapter(this, android.R.layout.simple_spinner_dropdown_item,phaseList); phaseAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); projectPhaseSpinner.setOnItemSelectedListener(this); projectPhaseSpinner.setAdapter(phaseAdapter); } private void workTypeSpinnerRefreshHack() { this.workTypeAdapter.notifyDataSetChanged(); workTypeSpinner = (Spinner)findViewById(R.id.worktypespinner); workTypeAdapter = new ArrayAdapter(this, android.R.layout.simple_spinner_dropdown_item,workTypeList); workTypeAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); workTypeSpinner.setOnItemSelectedListener(this); workTypeSpinner.setAdapter(workTypeAdapter); } @Override public void onItemSelected(AdapterView<?> adapterView, View view, int position, long id) { Log.d(TAG,"OnItemSelected called: adapterViewID:"+adapterView.getId()+"view ID:"+view.getId()+", pos:"+position+" id:"+id); if(adapterView.getId() == R.id.projectnamespinner) { /* user selected new case, look up case phases */ if(stateRestored) { Log.d(TAG,"Because state is just restored, ignoring this onItemSelected event as a side effect of that..."); return; } else { String caseGuid = projectList.get(position).getCaseGuid(); Log.d(TAG,"User selected case pos: "+position+" with GUID: "+caseGuid); if(caseGuid == null || caseGuid.isEmpty()) { showDialog(DIALOG_ID_MISSING_CASEGUID); return; } phasesProgress.setVisibility(View.VISIBLE); new LoadPhasesXMLTask(this).execute(caseGuid); } } else if(adapterView.getId() == R.id.phasenamespinner) { if(stateRestored) { Log.d(TAG,"Because state is just restored, ignoring this onItemSelected event as a side effect of that..."); return; } else { Log.d(TAG,"User selected phase pos: "+position); Log.d(TAG,"Phase GUID for claiming:"+phaseList.get(position).getPhaseGUID()); Log.d(TAG,"Phase Name for claiming:"+phaseList.get(position).getPhaseName()); String phaseGuid = phaseList.get(position).getPhaseGUID(); this.currentPhaseGUID = phaseGuid; Log.d(TAG,"Setting currentPhaseGUID:"+this.currentPhaseGUID); if(phaseGuid == null || phaseGuid.isEmpty()) { showDialog(DIALOG_ID_MISSING_PHASE_GUID); return; } new LoadWorkTypesXMLTask(this).execute(phaseGuid); } } else if(adapterView.getId() == R.id.worktypespinner) { if(stateRestored) { Log.d(TAG,"Because state is just restored, ignoring this onItemSelected event as a side effect of that..."); stateRestored = false; return; } else { String workTypeGuid = workTypeList.get(position).getWorkTypeGUID(); Log.d(TAG,"User selected work type pos: "+position); Log.d(TAG,"WorkType Guid for claiming:"+workTypeList.get(position).getWorkTypeGUID()); Log.d(TAG,"WorkType Name for claiming:"+workTypeList.get(position).getWorkTypeName()); this.currentWorkTypeGUID = workTypeGuid; } } } @Override public void onNothingSelected(AdapterView<?> arg0) { Log.d(TAG,"OnNothingSelected called."); } @Override public void onClick(View v) { Log.d(TAG,"OnClick Called for view ID: "+v.getId()); int id = v.getId(); EditText hoursDisplay = (EditText)this.findViewById(R.id.hours_amount); EditText minutesDisplay = (EditText)this.findViewById(R.id.minutes_amount); //check if hours display and minutes display are numeric. If not, reset to zero try { Integer.parseInt(hoursDisplay.getText().toString()); } catch (NumberFormatException e) { hoursDisplay.setText("0"); } try { Integer.parseInt(minutesDisplay.getText().toString()); } catch (NumberFormatException e) { minutesDisplay.setText("0"); } switch(id) { case R.id.button1hour: hoursDisplay.setText("1"); minutesDisplay.setText("0"); Log.d(TAG, "Adding 1 to hours..."); break; case R.id.button2hour: hoursDisplay.setText("2"); minutesDisplay.setText("0"); Log.d(TAG, "Adding 2 to hours..."); break; case R.id.button3hour: hoursDisplay.setText("3"); minutesDisplay.setText("0"); break; case R.id.button4hour: hoursDisplay.setText("4"); minutesDisplay.setText("0"); break; case R.id.button5hour: hoursDisplay.setText("5"); minutesDisplay.setText("0"); break; case R.id.button6hour: hoursDisplay.setText("6"); minutesDisplay.setText("0"); break; case R.id.button7hour: hoursDisplay.setText("7"); minutesDisplay.setText("0"); break; case R.id.button8hour: hoursDisplay.setText("8"); minutesDisplay.setText("0"); break; case R.id.button9hour: hoursDisplay.setText("9"); minutesDisplay.setText("0"); break; case R.id.button10hour: hoursDisplay.setText("10"); minutesDisplay.setText("0"); break; case R.id.buttonplus30min: // add 30 minutes to minutes display and try to be smart about it Log.d(TAG,"Minutesdisplay is: ["+minutesDisplay.getText()+"]"); if(minutesDisplay.getText() == null || minutesDisplay.getText().toString().equals("") || minutesDisplay.getText().toString().equals("0")) { Log.d(TAG,"Since it seems null, setting minutes display to 30."); minutesDisplay.setText("30"); } else { Log.d(TAG,"Since it is not null, incrementing minutes display by 30."); Integer minutesAmount = new Integer(minutesDisplay.getText().toString()); int realAmount = minutesAmount.intValue(); realAmount += 30; if (realAmount > 59) { Integer currentHoursAmount = new Integer(hoursDisplay.getText().toString()); int realHours = currentHoursAmount.intValue(); hoursDisplay.setText(String.valueOf(realHours+1)); minutesDisplay.setText(String.valueOf(realAmount-60)); } else { minutesDisplay.setText(String.valueOf(realAmount)); } } break; case R.id.button_claim_overtime: case R.id.button_claim: Log.d(TAG,"Started to claim..."); String description = ((EditText)findViewById(R.id.explanation_text)).getText().toString(); if(description == null || description.isEmpty()) { showDialog(DIALOG_ID_MISSING_DESCRIPTION); return; } if(claimDate == null) { claimDate = Calendar.getInstance(); } String eventDate = SevedroidConstants.S3_DATE_FORMATTER.format(claimDate.getTime()); if(eventDate == null || eventDate.isEmpty()) { showDialog(DIALOG_ID_BAD_EVENT_DATE); return; } String phaseGuid = this.currentPhaseGUID; if(phaseGuid == null || phaseGuid.isEmpty()) { showDialog(DIALOG_ID_MISSING_PHASE_GUID); return; } String hours = ((EditText)findViewById(R.id.hours_amount)).getText().toString(); String minutes = ((EditText)findViewById(R.id.minutes_amount)).getText().toString(); String quantity = hours+"."+Math.round((Integer.parseInt(minutes))/0.6); if(quantity == null || quantity.isEmpty()) { showDialog(DIALOG_ID_BAD_HOURS_QUANTITY); return; } SevedroidContentStore scs = new SevedroidContentStore(this); String userGuid = scs.fetchUserGUID(); if(userGuid == null || userGuid.isEmpty()) { showDialog(DIALOG_ID_BAD_USER_GUID); return; } String workTypeGuid = this.currentWorkTypeGUID; if(workTypeGuid == null || workTypeGuid.isEmpty()) { showDialog(DIALOG_ID_BAD_WORKTYPE_GUID); return; } String [] params = {description, eventDate, phaseGuid, quantity, userGuid, workTypeGuid}; new PublishHourEntryTask(this).execute(params); break; case R.id.change_date_button: Log.d(TAG, "Change claim date..."); showDialog(DATE_PICKER_DIALOG_ID); break; default: Log.d(TAG, "Click event detected but no action taken..."); break; } return; } protected void receiveProjectLoadingReadyEvent() { Log.d(TAG,"Received project loading ready event on UI thread..."); Log.d(TAG,"Here, length of loaded projects list: "+projectList.size()); projectSpinnerRefreshHack(); Toast.makeText(this, "Projects are now loaded, you can now make your selection.", Toast.LENGTH_SHORT).show(); projectsProgress.setVisibility(View.GONE); } protected void receivePhasesLoadingReadyEvent() { Log.d(TAG,"Received phases loading ready event on UI thread..."); Log.d(TAG,"Here, length of loaded phases list: "+phaseList.size()); phaseSpinnerRefreshHack(); Toast.makeText(this, "Phases are now loaded, you can now make your selection.", Toast.LENGTH_SHORT).show(); phasesProgress.setVisibility(View.GONE); } protected void receiveWorkTypesLoadingReadyEvent() { Log.d(TAG,"Received work types loading ready event on UI thread..."); Log.d(TAG,"Here, length of loaded worktypes list: "+workTypeList.size()); workTypeSpinnerRefreshHack(); Toast.makeText(this, "Work types are now loaded, you can now make your selection.", Toast.LENGTH_SHORT).show(); phasesProgress.setVisibility(View.GONE); } protected void receivePublishHourEntryReadyEvent() { Log.d(TAG,"Received hour entry ready event on UI thread..."); Log.d(TAG,"Result was: "+hourEntryStatus); if(hourEntryStatus) { Toast.makeText(this, "Your work hours have been saved!", Toast.LENGTH_SHORT).show(); } else { Toast.makeText(this, "Failed to save your work hours! Please try again later!", Toast.LENGTH_LONG).show(); } } @Override protected Dialog onCreateDialog(int id) { // TODO Auto-generated method stub if(id == DATE_PICKER_DIALOG_ID) { return new DatePickerDialog(this, mDateSetListener, claimDate.get(Calendar.YEAR), claimDate.get(Calendar.MONTH), claimDate.get(Calendar.DAY_OF_MONTH)); } // Critical Alert Dialogs if(id == NOT_CONNECTED_DIALOG_ID) { AlertDialog.Builder builder = new AlertDialog.Builder(this); builder.setMessage("This application requires a network connection.") .setCancelable(false) .setPositiveButton("OK", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { SevedroidProjectActivity.this.finish(); } }); AlertDialog alert = builder.create(); return alert; } // Non-critical but blocking dialogs String alertMessage = "(UNSPECIFIED)"; switch(id) { case DIALOG_ID_MISSING_CASEGUID: alertMessage = "Missing GUID For Case. Most likely this is a bug. Please report bugs to:"+ SevedroidConstants.DF_SUPPORT_EMAIL; break; case DIALOG_ID_MISSING_PHASE_GUID: alertMessage = "Missing GUID for Phase. Most likely this is a bug. Please report bugs to:"+ SevedroidConstants.DF_SUPPORT_EMAIL; break; case DIALOG_ID_MISSING_DESCRIPTION: alertMessage = "You hours claim is missing description."; break; case DIALOG_ID_BAD_WORKTYPE_GUID: alertMessage = "Work type for this claim is missing."; break; case DIALOG_ID_BAD_USER_GUID: alertMessage = "User's unique ID is missing. Most likely this is a bug. Please report bugs to:"+ SevedroidConstants.DF_SUPPORT_EMAIL; break; case DIALOG_ID_BAD_HOURS_QUANTITY: alertMessage = "The hours quantity is not proper."; break; case DIALOG_ID_BAD_EVENT_DATE: alertMessage = "The date for this claim is empty or missing."; break; } AlertDialog.Builder builder = new AlertDialog.Builder(this); builder.setMessage(alertMessage) .setCancelable(false) .setPositiveButton("OK", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { return; } }); AlertDialog alert = builder.create(); return alert; } //TODO: Make these asynctasks go away from this already bloated class /** * AsyncTask for loading the cases XML from S3 SOAP service */ private class LoadCasesXMLTask extends AsyncTask<Void, Integer, Boolean> { private SevedroidProjectActivity mParent; public static final int STATUS_INIT = 1; public static final int STATUS_TRANSFERRING = 2; public static final int STATUS_PARSING = 3; public static final int STATUS_RETURNING = 4; public LoadCasesXMLTask(SevedroidProjectActivity parent) { mParent = parent; } @Override protected Boolean doInBackground(Void... nullArgs) { Log.d(TAG,"Started doInBackground for LoadCasesXMLTask!"); mParent.projectNameSpinner.setClickable(false); publishProgress(STATUS_INIT); SeveraCommsUtils scu = new SeveraCommsUtils(); S3CaseContainer S3Cases = S3CaseContainer.getInstance(); //The next invocation is the one that takes time publishProgress(STATUS_TRANSFERRING); S3Cases.setCasesXML(scu.getAllCasesXml(mParent)); publishProgress(STATUS_PARSING); mParent.projectList = S3Cases.getCases(); publishProgress(STATUS_RETURNING); if(mParent.projectList.isEmpty()) { Log.e(TAG,"Project list is empty! Returning with nothing to tell."); return new Boolean(false); } else { Log.d(TAG,"Project list is not empty, enabling projectspinner..."); mParent.projectNameSpinner.setClickable(true); return new Boolean(true); } } protected void onProgressUpdate(Integer... progress) { Log.d(TAG,"Setting progress to: "+progress[0].toString()); setProgress(progress[0]); } @Override protected void onPostExecute(Boolean result) { Log.d(TAG,"onPostExecute on LoadCasesXMLTask firing."); mParent.receiveProjectLoadingReadyEvent(); } } /** * AsyncTask for loading the phases XML from S3 SOAP service */ private class LoadPhasesXMLTask extends AsyncTask<String, Integer, Boolean> { private SevedroidProjectActivity mParent; public static final int STATUS_INIT = 1; public static final int STATUS_TRANSFERRING = 2; public static final int STATUS_PARSING = 3; public static final int STATUS_RETURNING = 4; public LoadPhasesXMLTask(SevedroidProjectActivity parent) { mParent = parent; } @Override protected Boolean doInBackground(String... caseGuid) { Log.d(TAG,"Started doInBackground for LoadPhasesXMLTask!"); if(caseGuid != null && !caseGuid[0].isEmpty()) { Log.d(TAG, "Parameters checked OK."); } mParent.projectPhaseSpinner.setClickable(false); publishProgress(STATUS_INIT); SeveraCommsUtils scu = new SeveraCommsUtils(); S3PhaseContainer S3Phases = S3PhaseContainer.getInstance(); //The next invocation is the one that takes time publishProgress(STATUS_TRANSFERRING); S3Phases.setPhasesXML(scu.getPhasesXMLByCaseGUID(mParent, caseGuid[0])); publishProgress(STATUS_PARSING); mParent.phaseList = S3Phases.getPhases(); publishProgress(STATUS_RETURNING); if(mParent.phaseList.isEmpty()) { Log.e(TAG,"Phase list is empty! Returning with nothing to tell."); return new Boolean(false); } else { Log.d(TAG,"Project list is not empty, enabling projectspinner..."); mParent.projectPhaseSpinner.setClickable(true); return new Boolean(true); } } protected void onProgressUpdate(Integer... progress) { Log.d(TAG,"Setting progress to: "+progress[0].toString()); setProgress(progress[0]); } @Override protected void onPostExecute(Boolean result) { Log.d(TAG,"onPostExecute on LoadCasesXMLTask firing."); if(result == null) { } else { mParent.receivePhasesLoadingReadyEvent(); } } } /** * AsyncTask for loading the worktypes XML from S3 SOAP service */ private class LoadWorkTypesXMLTask extends AsyncTask<String, Integer, Boolean> { private SevedroidProjectActivity mParent; public static final int STATUS_INIT = 1; public static final int STATUS_TRANSFERRING = 2; public static final int STATUS_PARSING = 3; public static final int STATUS_RETURNING = 4; public LoadWorkTypesXMLTask(SevedroidProjectActivity parent) { mParent = parent; } @Override protected Boolean doInBackground(String... phaseGuid) { Log.d(TAG,"Started doInBackground for LoadPhasesXMLTask!"); if(phaseGuid != null && phaseGuid[0].isEmpty()) { Log.d(TAG,"Paramters checked OK."); } mParent.workTypeSpinner.setClickable(false); publishProgress(STATUS_INIT); SeveraCommsUtils scu = new SeveraCommsUtils(); S3WorkTypeContainer S3WorkTypes = S3WorkTypeContainer.getInstance(); //The next invocation is the one that takes time publishProgress(STATUS_TRANSFERRING); S3WorkTypes.setWorkTypesXML(scu.getWorkTypesXMLByPhaseGUID(mParent, phaseGuid[0])); publishProgress(STATUS_PARSING); mParent.workTypeList = S3WorkTypes.getWorkTypes(); publishProgress(STATUS_RETURNING); if(mParent.workTypeList.isEmpty()) { Log.e(TAG,"Worktype list is empty! Returning with nothing to tell."); return new Boolean(false); } else { Log.d(TAG,"Work type list is not empty, enabling worktypespinner..."); mParent.workTypeSpinner.setClickable(true); return new Boolean(true); } } protected void onProgressUpdate(Integer... progress) { Log.d(TAG,"Setting progress to: "+progress[0].toString()); setProgress(progress[0]); } @Override protected void onPostExecute(Boolean result) { Log.d(TAG,"onPostExecute on LoadWorkTypesXMLTask firing."); mParent.receiveWorkTypesLoadingReadyEvent(); } } /** * AsyncTask for calling the IHourEntry */ private class PublishHourEntryTask extends AsyncTask<String, Integer, Boolean> { SevedroidProjectActivity mParent = null; PublishHourEntryTask(SevedroidProjectActivity activity) { mParent = activity; } /** * Publish this hour entry * @params String Description * @params String EventDate - the date formatted YYYY-MM-DD * @params String Phase's guid * @params String quantity amount of hours formatted as "1.5" (that's DOT, not comma) * @params String user guid as obtained from app config * @params String work type GUID */ @Override protected Boolean doInBackground(String... params) { //gather necessary parameters String description = params[0]; String eventDate = params[1]; String phaseGuid = params[2]; String quantity = params[3]; String userGuid = params[4]; String workTypeGuid = params[5]; SeveraCommsUtils scu = new SeveraCommsUtils(); boolean res = scu.publishHourEntry(mParent, description, eventDate, phaseGuid, quantity, userGuid, workTypeGuid); return new Boolean(res); } @Override protected void onPostExecute(Boolean result) { Log.d(TAG,"onPostExecute on PublishHourEntryTask firing."); if(result) { mParent.hourEntryStatus = true; } else { mParent.hourEntryStatus = false; } mParent.receivePublishHourEntryReadyEvent(); } @Override protected void onProgressUpdate(Integer... values) { // TODO Auto-generated method stub super.onProgressUpdate(values); } } }
package org.voltdb.compiler; import java.io.Serializable; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Timer; import java.util.TimerTask; import org.voltdb.common.Constants; import org.voltdb.planner.BoundPlan; import com.google_voltpatches.common.cache.Cache; import com.google_voltpatches.common.cache.CacheBuilder; /** * Keep a cache two level cache of plans generated by the Ad Hoc * planner. * * First, store literals mapping to full plans that are * ready to execute. * * Second, store a string representation of a parameterized parsed * statement mapped to core parameterized plans. These parameterized * plans need parameter values and sql literals in order to be * actually used. */ public class AdHocCompilerCache implements Serializable { private static final long serialVersionUID = 1L; // STATIC CODE TO MANAGE CACHE LIFETIMES / GLOBALNESS // weak values should remove the object when the catalog version is no longer needed private static Cache<Integer, AdHocCompilerCache> m_catalogVersionMatch = CacheBuilder.newBuilder().weakValues().build(); public static void clearVersionCache() { m_catalogVersionMatch.invalidateAll(); } /** * Get the global cache for a given version of the catalog. Note that there can be only * one cache per catalogVersion at a time. */ public synchronized static AdHocCompilerCache getCacheForCatalogVersion(int catalogVersion) { AdHocCompilerCache cache = m_catalogVersionMatch.getIfPresent(catalogVersion); if (cache == null) { cache = new AdHocCompilerCache(); m_catalogVersionMatch.put(catalogVersion, cache); } return cache; } // PER-INSTANCE AWESOMEC CACHING CODE // cache sizes determined at construction time final int MAX_LITERAL_ENTRIES; final int MAX_CORE_ENTRIES; /** cache of literals to full plans */ final Map<String, AdHocPlannedStatement> m_literalCache; /** cache of parameterized plan descriptions to one or more core parameterized plans, * each plan optionally has its own requirements for which parameters need to be bound * to what values to enable its specialized (expression-indexed) plan. */ final Map<String, List<BoundPlan> > m_coreCache; // placeholder stats used during development that may/may not survive long m_literalHits = 0; long m_literalQueries = 0; long m_literalInsertions = 0; long m_literalEvictions = 0; long m_planHits = 0; long m_planQueries = 0; long m_planInsertions = 0; long m_planEvictions = 0; /** {@see this#startPeriodicStatsPrinting() } */ Timer m_statsTimer = null; /** * Constructor with default cache sizes. */ private AdHocCompilerCache() { this(1000, 1000); } /** * Constructor with specific cache sizes is only called directly for testing. * * @param maxLiteralEntries cache size for literals * @param maxCoreEntries cache size for parameterized plans */ AdHocCompilerCache(int maxLiteralEntries, int maxCoreEntries) { MAX_LITERAL_ENTRIES = maxLiteralEntries; MAX_CORE_ENTRIES = maxCoreEntries; // an LRU cache map m_literalCache = new LinkedHashMap<String, AdHocPlannedStatement>(MAX_LITERAL_ENTRIES * 2, .75f, true) { private static final long serialVersionUID = 1L; // This method is called just after a new entry has been added @Override public boolean removeEldestEntry(Map.Entry<String, AdHocPlannedStatement> eldest) { if (size() > MAX_LITERAL_ENTRIES) { ++m_literalEvictions; return true; } return false; } }; // an LRU cache map m_coreCache = new LinkedHashMap<String, List<BoundPlan> >(MAX_CORE_ENTRIES * 2, .75f, true) { private static final long serialVersionUID = 1L; // This method is called just after a new entry has been added @Override public boolean removeEldestEntry(Map.Entry<String, List<BoundPlan> > eldest) { if (size() > MAX_CORE_ENTRIES) { ++m_planEvictions; return true; } return false; } }; } /** * Stats printing method used during development. * Probably shouldn't live past real stats integration. */ synchronized void printStats() { String line1 = String.format("CACHE STATS - Literals: Hits %d/%d (%.1f%%), Inserts %d Evictions %d\n", m_literalHits, m_literalQueries, (m_literalHits * 100.0) / m_literalQueries, m_literalInsertions, m_literalEvictions); String line2 = String.format("CACHE STATS - Plans: Hits %d/%d (%.1f%%), Inserts %d Evictions %d\n", m_planHits, m_planQueries, (m_planHits * 100.0) /m_planQueries, m_planInsertions, m_planEvictions); System.out.print(line1 + line2); System.out.flush(); // reset these m_literalHits = 0; m_literalQueries = 0; m_literalInsertions = 0; m_literalEvictions = 0; m_planHits = 0; m_planQueries = 0; m_planInsertions = 0; m_planEvictions = 0; } /** * @param sql SQL literal * @return full, ready-to-go plan */ public synchronized AdHocPlannedStatement getWithSQL(String sql) { ++m_literalQueries; AdHocPlannedStatement retval = m_literalCache.get(sql); if (retval != null) { ++m_literalHits; } return retval; } /** * @param parsedToken String representing a parameterized and parsed * SQL statement * @return A CorePlan that needs parameter values to run. */ public synchronized List<BoundPlan> getWithParsedToken(String parsedToken) { ++m_planQueries; List<BoundPlan> retval = m_coreCache.get(parsedToken); if (retval != null) { ++m_planHits; } return retval; } /** * Called from the PlannerTool directly when it finishes planning. * This is the only way to populate the cache. * * Note that one goal here is to reduce the number of times two * separate plan instances with the same value are input for the * same SQL literal. * @param sql original query text * @param parsedToken massaged query text, possibly with literals purged * @param planIn * @param extractedLiterals the basis values for any "bound parameter" restrictions to plan re-use */ public synchronized void put(String sql, String parsedToken, AdHocPlannedStatement planIn, String[] extractedLiterals) { assert(sql != null); assert(parsedToken != null); assert(planIn != null); AdHocPlannedStatement plan = planIn; assert(new String(plan.sql, Constants.UTF8ENCODING).equals(sql)); // uncomment this to get some raw stdout cache performance stats every 5s //startPeriodicStatsPrinting(); BoundPlan matched = null; BoundPlan unmatched = new BoundPlan(planIn.core, planIn.parameterBindings(extractedLiterals)); // deal with the parameterized plan cache first List<BoundPlan> boundVariants = m_coreCache.get(parsedToken); if (boundVariants == null) { boundVariants = new ArrayList<BoundPlan>(); m_coreCache.put(parsedToken, boundVariants); // Note that there is an edge case in which more than one plan is getting counted as one // "plan insertion". This only happens when two different plans arose from the same parameterized // query (token) because one invocation used the correct constants to trigger an expression index and // another invocation did not. These are not counted separately (which would have to happen below // after each call to boundVariants.add) because they are not evicted separately. // It seems saner to use consistent units when counting insertions vs. evictions. ++m_planInsertions; } else { for (BoundPlan boundPlan : boundVariants) { if (boundPlan.equals(unmatched)) { matched = boundPlan; break; } } if (matched != null) { // if a different core is found, reuse it // this is useful when updating the literal cache if (unmatched.core != matched.core) { plan = new AdHocPlannedStatement(planIn, matched.core); plan.setBoundConstants(matched.constants); } } } if (matched == null) { // Don't count insertions (of possibly repeated tokens) here // -- see the comment above where only UNIQUE token insertions are being counted, instead. boundVariants.add(unmatched); } // then deal with the AdHocPlannedStatement cachedPlan = m_literalCache.get(sql); if (cachedPlan == null) { m_literalCache.put(sql, plan); ++m_literalInsertions; } else { assert(cachedPlan.equals(plan)); } } /** * Start a timer that prints cache stats to the console every 5s. * Used for development until we get better stats integration. */ public void startPeriodicStatsPrinting() { if (m_statsTimer == null) { m_statsTimer = new Timer(); m_statsTimer.scheduleAtFixedRate(new TimerTask() { @Override public void run() { printStats(); } }, 5000, 5000); } } /** * Return the number of items in the literal cache. * @return literal cache size as a count */ public int getLiteralCacheSize() { return m_literalCache.size(); } /** * Return the number of items in the core (parameterized) cache. * @return core cache size as a count */ public int getCoreCacheSize() { return m_coreCache.size(); } }
package gov.nih.nci.calab.ui.workflow; /** * This class shows the view aliquot page . * * @author pansu */ /* CVS $Id: ViewAliquotAction.java,v 1.3 2006-04-11 18:26:17 pansu Exp $*/ import gov.nih.nci.calab.dto.administration.AliquotBean; import gov.nih.nci.calab.service.workflow.ExecuteWorkflowService; import gov.nih.nci.calab.ui.core.AbstractBaseAction; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.log4j.Logger; import org.apache.struts.action.ActionForm; import org.apache.struts.action.ActionForward; import org.apache.struts.action.ActionMapping; import org.apache.struts.action.ActionMessage; import org.apache.struts.action.ActionMessages; import org.apache.struts.action.DynaActionForm; public class ViewAliquotAction extends AbstractBaseAction { private static Logger logger = Logger.getLogger(ViewAliquotAction.class); public ActionForward executeTask(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response) throws Exception { ActionForward forward = null; String aliquotId = null; try { DynaActionForm theForm = (DynaActionForm) form; aliquotId = (String) theForm.get("aliquotId"); ExecuteWorkflowService executeWorkflowService = new ExecuteWorkflowService(); AliquotBean aliquot = executeWorkflowService.getAliquot(aliquotId); if (aliquot != null) { request.setAttribute("aliquot", aliquot); forward = mapping.findForward("success"); } else { logger.error("Can't find an aliquot by the given ID"); ActionMessages errors = new ActionMessages(); ActionMessage error = new ActionMessage( "error.viewAliquot.noresult", aliquotId); errors.add("error", error); saveMessages(request, errors); forward = mapping.findForward("failure"); } } catch (Exception e) { logger.error("Caught exception when showing aliquot.", e); ActionMessages errors = new ActionMessages(); ActionMessage error = new ActionMessage("error.viewAliquot", aliquotId); errors.add("error", error); saveMessages(request, errors); forward = mapping.findForward("failure"); } return forward; } public boolean loginRequired() { return true; } }
package gov.nih.nci.cananolab.dto.particle; import gov.nih.nci.cananolab.domain.common.Keyword; import gov.nih.nci.cananolab.domain.common.Report; import gov.nih.nci.cananolab.domain.common.Source; import gov.nih.nci.cananolab.domain.particle.NanoparticleSample; import gov.nih.nci.cananolab.dto.common.ReportBean; import gov.nih.nci.cananolab.util.StringUtils; import java.util.ArrayList; import java.util.Date; import java.util.HashSet; import java.util.List; import java.util.SortedSet; import java.util.TreeSet; /** * This class represents shared properties of nanoparticle samples to be shown * in the view pages. * * @author pansu * */ public class ParticleBean { private String keywordsStr; private String[] visibilityGroups = new String[0]; private String gridNode; private NanoparticleSample domainParticleSample = new NanoparticleSample(); private String createdBy; private boolean hidden; private List<ReportBean> reports = new ArrayList<ReportBean>(); public ParticleBean() { domainParticleSample.setSource(new Source()); } public ParticleBean(NanoparticleSample particleSample) { this.domainParticleSample = particleSample; SortedSet<String> keywordStrs = new TreeSet<String>(); if (particleSample.getKeywordCollection() != null) { for (Keyword keyword : particleSample.getKeywordCollection()) { keywordStrs.add(keyword.getName()); } } keywordsStr = StringUtils.join(keywordStrs, "\r\n"); if (particleSample.getReportCollection() != null) { for (Report report : particleSample.getReportCollection()) { reports.add(new ReportBean(report, false)); } } } public String[] getVisibilityGroups() { return this.visibilityGroups; } public void setVisibilityGroups(String[] visibilityGroups) { this.visibilityGroups = visibilityGroups; } public String getGridNode() { return this.gridNode; } public void setGridNode(String gridNode) { this.gridNode = gridNode; } public String getKeywordsStr() { return this.keywordsStr; } public NanoparticleSample getDomainParticleSample() { return domainParticleSample; } public String getCreatedBy() { return createdBy; } public void setCreatedBy(String createdBy) { this.createdBy = createdBy; } public void setDomainParticleSample() { // always update createdBy and createdDate domainParticleSample.setCreatedBy(createdBy); domainParticleSample.setCreatedDate(new Date()); if (domainParticleSample.getKeywordCollection() != null) { domainParticleSample.getKeywordCollection().clear(); } else { domainParticleSample.setKeywordCollection(new HashSet<Keyword>()); } if (keywordsStr.length() > 0) { String[] strs = keywordsStr.split("\r\n"); for (String str : strs) { // change to upper case Keyword keyword = new Keyword(); keyword.setName(str.toUpperCase()); domainParticleSample.getKeywordCollection().add(keyword); } } } public void setKeywordsStr(String keywordsStr) { this.keywordsStr = keywordsStr; } public boolean isHidden() { return hidden; } public void setHidden(boolean hidden) { this.hidden = hidden; } public List<ReportBean> getReports() { return reports; } }
package afc.ant.modular; import java.text.MessageFormat; import java.util.ArrayList; import java.util.Collection; import java.util.IdentityHashMap; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.concurrent.LinkedBlockingQueue; public class ParallelDependencyResolver implements DependencyResolver { private LinkedBlockingQueue<Node> shortlist; private IdentityHashMap<Module, Node> modulesAcquired; private int remainingModuleCount; public void init(final Collection<Module> rootModules) throws CyclicDependenciesDetectedException { if (rootModules == null) { throw new NullPointerException("rootModules"); } for (final Module module : rootModules) { if (module == null) { throw new NullPointerException("rootModules contains null element."); } } synchronized (this) { final LinkedBlockingQueue<Node> newShortlist = new LinkedBlockingQueue<Node>(); /* If buildNodeGraph() throws an exception then the state is not changed so that this ParallelDependencyResolver instance could be used as if this init() were not invoked. */ remainingModuleCount = buildNodeGraph(rootModules, newShortlist); shortlist = newShortlist; modulesAcquired = new IdentityHashMap<Module, Node>(); } } // returns a module that does not have dependencies public synchronized Module getFreeModule() { ensureInitialised(); if (remainingModuleCount == 0) { return null; } try { final Node node = shortlist.take(); final Module module = node.module; modulesAcquired.put(module, node); --remainingModuleCount; return module; } catch (InterruptedException ex) { Thread.currentThread().interrupt(); throw new IllegalStateException(); } } public synchronized void moduleProcessed(final Module module) { ensureInitialised(); if (module == null) { throw new NullPointerException("module"); } try { final Node node = modulesAcquired.remove(module); if (node == null) { throw new IllegalArgumentException(MessageFormat.format( "The module ''{0}'' is not being processed.", module.getPath())); } for (int j = 0, n = node.dependencyOf.size(); j < n; ++j) { final Node depOf = node.dependencyOf.get(j); if (--depOf.dependencyCount == 0) { // all modules with no dependencies go to the shortlist shortlist.put(depOf); } } } catch (InterruptedException ex) { Thread.currentThread().interrupt(); throw new IllegalStateException(); } } private void ensureInitialised() { if (shortlist == null) { throw new IllegalStateException("Resolver is not initialised."); } } private static class Node { // TODO implement synchronised wrapper of module private Node(final Module module) { this.module = module; dependencyCount = module.getDependencies().size(); dependencyOf = new ArrayList<Node>(); } private final Module module; /* Knowing just dependency count is enough to detect the moment when this node has no dependencies remaining. */ private int dependencyCount; private final ArrayList<Node> dependencyOf; } /* * Builds a DAG which nodes hold modules and arcs that represent inverted module dependencies. * The list of nodes returned via shortlist contains the starting vertices of the graph. * The modules that are bound to these vertices do not have dependencies on other modules * and are used as modules to start unwinding dependencies from. * * @returns the total number of modules. */ private static int buildNodeGraph(final Collection<Module> rootModules, LinkedBlockingQueue<Node> shortlist) throws CyclicDependenciesDetectedException { final IdentityHashMap<Module, Node> registry = new IdentityHashMap<Module, Node>(); final LinkedHashSet<Module> path = new LinkedHashSet<Module>(); for (final Module module : rootModules) { addNodeDeep(module, shortlist, registry, path); } // the number of nodes in the graph return registry.size(); } private static Node addNodeDeep(final Module module, final LinkedBlockingQueue<Node> shortlist, final IdentityHashMap<Module, Node> registry, final LinkedHashSet<Module> path) throws CyclicDependenciesDetectedException { Node node = registry.get(module); if (node != null) { return node; // the module is already processed } if (path.add(module)) { node = new Node(module); final ArrayList<Module> deps = module.dependencies; if (deps.isEmpty()) { shortlist.add(node); } else { // inverted dependencies are assigned for (int i = 0, n = deps.size(); i < n; ++i) { final Module dep = deps.get(i); final Node depNode = addNodeDeep(dep, shortlist, registry, path); assert depNode != null; depNode.dependencyOf.add(node); } } registry.put(module, node); path.remove(module); return node; } /* A loop is detected. It does not necessarily end with the starting node, some leading nodes could be truncated. */ int loopSize = path.size(); final Iterator<Module> it = path.iterator(); while (it.next() != module) { // skipping all leading nodes that are outside the loop --loopSize; } final ArrayList<Module> loop = new ArrayList<Module>(loopSize); loop.add(module); while (it.hasNext()) { loop.add(it.next()); } assert loopSize == loop.size(); throw new CyclicDependenciesDetectedException(loop); } }
package com.opensymphony.workflow; import com.opensymphony.module.propertyset.PropertySet; import com.opensymphony.module.propertyset.PropertySetManager; import com.opensymphony.provider.BeanProvider; import com.opensymphony.provider.bean.DefaultBeanProvider; import com.opensymphony.util.TextUtils; import com.opensymphony.workflow.config.ConfigLoader; import com.opensymphony.workflow.loader.*; import com.opensymphony.workflow.query.WorkflowQuery; import com.opensymphony.workflow.spi.*; import com.opensymphony.workflow.util.beanshell.BeanShellCondition; import com.opensymphony.workflow.util.beanshell.BeanShellFunctionProvider; import com.opensymphony.workflow.util.beanshell.BeanShellRegister; import com.opensymphony.workflow.util.beanshell.BeanShellValidator; import com.opensymphony.workflow.util.bsf.BSFCondition; import com.opensymphony.workflow.util.bsf.BSFFunctionProvider; import com.opensymphony.workflow.util.bsf.BSFRegister; import com.opensymphony.workflow.util.bsf.BSFValidator; import com.opensymphony.workflow.util.ejb.local.LocalEJBCondition; import com.opensymphony.workflow.util.ejb.local.LocalEJBFunctionProvider; import com.opensymphony.workflow.util.ejb.local.LocalEJBRegister; import com.opensymphony.workflow.util.ejb.local.LocalEJBValidator; import com.opensymphony.workflow.util.ejb.remote.RemoteEJBCondition; import com.opensymphony.workflow.util.ejb.remote.RemoteEJBFunctionProvider; import com.opensymphony.workflow.util.ejb.remote.RemoteEJBRegister; import com.opensymphony.workflow.util.ejb.remote.RemoteEJBValidator; import com.opensymphony.workflow.util.jndi.JNDICondition; import com.opensymphony.workflow.util.jndi.JNDIFunctionProvider; import com.opensymphony.workflow.util.jndi.JNDIRegister; import com.opensymphony.workflow.util.jndi.JNDIValidator; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import java.io.InputStream; import java.net.URL; import java.util.*; /** * Abstract workflow instance that serves as the base for specific implementations, such as EJB or SOAP. * * @author <a href="mailto:plightbo@hotmail.com">Pat Lightbody</a> */ public class AbstractWorkflow implements Workflow { //~ Static fields/initializers ///////////////////////////////////////////// // statics public static final String CLASS_NAME = "class.name"; public static final String EJB_LOCATION = "ejb.location"; public static final String JNDI_LOCATION = "jndi.location"; public static final String BSF_LANGUAGE = "language"; public static final String BSF_SOURCE = "source"; public static final String BSF_ROW = "row"; public static final String BSF_COL = "col"; public static final String BSF_SCRIPT = "script"; public static final String BSH_SCRIPT = "script"; private static final Log log = LogFactory.getLog(AbstractWorkflow.class); protected static boolean configLoaded = false; private static BeanProvider beanProvider = new DefaultBeanProvider(); //~ Instance fields //////////////////////////////////////////////////////// protected WorkflowContext context; //~ Constructors /////////////////////////////////////////////////////////// public AbstractWorkflow() { try { loadConfig(null); } catch (FactoryException e) { throw new InternalWorkflowException("Error loading config", (e.getRootCause() != null) ? e.getRootCause() : e); } } //~ Methods //////////////////////////////////////////////////////////////// public int[] getAvailableActions(long id) throws WorkflowException { WorkflowDescriptor wf = null; WorkflowStore store = getPersistence(); WorkflowEntry entry = store.findEntry(id); if (entry == null) { throw new IllegalArgumentException("No such workflow id " + id); } wf = getWorkflow(entry.getWorkflowName()); if (wf == null) { throw new IllegalArgumentException("No such workflow " + entry.getWorkflowName()); } List l = new ArrayList(); PropertySet ps = store.getPropertySet(id); Map transientVars = new HashMap(); populateTransientMap(entry, transientVars, wf.getRegisters()); // get global actions List globalActions = wf.getGlobalActions(); for (Iterator iterator = globalActions.iterator(); iterator.hasNext();) { ActionDescriptor action = (ActionDescriptor) iterator.next(); RestrictionDescriptor restriction = action.getRestriction(); String conditionType = null; List conditions = null; if (restriction != null) { conditionType = restriction.getConditionType(); conditions = restriction.getConditions(); } if (passesConditions(conditionType, conditions, transientVars, ps)) { l.add(new Integer(action.getId())); } } // get normal actions Collection currentSteps = store.findCurrentSteps(id); for (Iterator iterator = currentSteps.iterator(); iterator.hasNext();) { Step step = (Step) iterator.next(); l.addAll(getAvailableActionsForStep(wf, step, transientVars, ps)); } int[] actions = new int[l.size()]; for (int i = 0; i < actions.length; i++) { actions[i] = ((Integer) l.get(i)).intValue(); } return actions; } /** * @ejb.interface-method */ public List getCurrentSteps(long id) throws StoreException { WorkflowStore store = getPersistence(); return store.findCurrentSteps(id); } /** * @ejb.interface-method */ public List getHistorySteps(long id) throws StoreException { WorkflowStore store = getPersistence(); return store.findHistorySteps(id); } /** * @ejb.interface-method */ public Properties getPersistenceProperties() { Properties p = new Properties(); Iterator iter = ConfigLoader.persistenceArgs.entrySet().iterator(); while (iter.hasNext()) { Map.Entry entry = (Map.Entry) iter.next(); p.setProperty((String) entry.getKey(), (String) entry.getValue()); } return p; } /** * Get the PropertySet for the specified workflow ID * @ejb.interface-method * @param id The workflow ID */ public PropertySet getPropertySet(long id) throws StoreException { PropertySet ps = getPersistence().getPropertySet(id); return ps; } /** * @ejb.interface-method */ public List getSecurityPermissions(long id) throws WorkflowException { WorkflowStore store = getPersistence(); WorkflowEntry entry = store.findEntry(id); WorkflowDescriptor wf = getWorkflow(entry.getWorkflowName()); PropertySet ps = store.getPropertySet(id); Map transientVars = new HashMap(); populateTransientMap(entry, transientVars, wf.getRegisters()); List s = new ArrayList(); Collection currentSteps = store.findCurrentSteps(id); for (Iterator interator = currentSteps.iterator(); interator.hasNext();) { Step step = (Step) interator.next(); int stepId = step.getStepId(); StepDescriptor xmlStep = wf.getStep(stepId); List securities = xmlStep.getPermissions(); for (Iterator iterator2 = securities.iterator(); iterator2.hasNext();) { PermissionDescriptor security = (PermissionDescriptor) iterator2.next(); // securities can't have restrictions based on inputs, so it's null if (passesConditions(security.getRestriction().getConditionType(), security.getRestriction().getConditions(), transientVars, ps)) { s.add(security.getName()); } } } return s; } /** * @ejb.interface-method */ public WorkflowDescriptor getWorkflowDescriptor(String workflowName) throws FactoryException { return getWorkflow(workflowName); } /** * @ejb.interface-method */ public String getWorkflowName(long id) throws StoreException { WorkflowStore store = getPersistence(); WorkflowEntry entry = store.findEntry(id); if (entry != null) { return entry.getWorkflowName(); } else { return null; } } /** * Get a list of workflow names available * @ejb.interface-method * @return String[] an array of workflow names. * @throws UnsupportedOperationException if the underlying workflow factory cannot obtain a list of workflow names. */ public String[] getWorkflowNames() throws FactoryException { return ConfigLoader.getWorkflowNames(); } /** * @ejb.interface-method */ public boolean canInitialize(String workflowName, int initialAction) throws WorkflowException { final String mockWorkflowName = workflowName; WorkflowEntry mockEntry = new WorkflowEntry() { public long getId() { return 0; } public String getWorkflowName() { return mockWorkflowName; } public boolean isInitialized() { return false; } }; // since no state change happens here, a memory instance is just fine PropertySet ps = PropertySetManager.getInstance("memory", null); Map transientVars = new HashMap(); populateTransientMap(mockEntry, transientVars, Collections.EMPTY_LIST); return canInitialize(workflowName, initialAction, transientVars, ps); } /** * @ejb.interface-method * @param workflowName the name of the workflow to check * @param initialAction The initial action to check * @param inputs the inputs map * @return true if the workflow can be initialized * @throws WorkflowException if an unexpected error happens */ public boolean canInitialize(String workflowName, int initialAction, Map inputs) throws WorkflowException { final String mockWorkflowName = workflowName; WorkflowEntry mockEntry = new WorkflowEntry() { public long getId() { return 0; } public String getWorkflowName() { return mockWorkflowName; } public boolean isInitialized() { return false; } }; // since no state change happens here, a memory instance is just fine PropertySet ps = PropertySetManager.getInstance("memory", null); Map transientVars = new HashMap(); if (inputs != null) { transientVars.putAll(inputs); } populateTransientMap(mockEntry, transientVars, Collections.EMPTY_LIST); return canInitialize(workflowName, initialAction, transientVars, ps); } public void doAction(long id, int actionId, Map inputs) throws WorkflowException { int[] availableActions = getAvailableActions(id); boolean validAction = false; for (int i = 0; i < availableActions.length; i++) { if (availableActions[i] == actionId) { validAction = true; break; } } if (!validAction) { throw new IllegalArgumentException("Action " + actionId + " is invalid"); } WorkflowDescriptor wf = null; WorkflowEntry entry = null; WorkflowStore store = getPersistence(); entry = store.findEntry(id); wf = getWorkflow(entry.getWorkflowName()); List currentSteps = store.findCurrentSteps(id); ActionDescriptor action = wf.getAction(actionId); PropertySet ps = store.getPropertySet(id); Map transientVars = new HashMap(); if (inputs != null) { transientVars.putAll(inputs); } populateTransientMap(entry, transientVars, wf.getRegisters()); try { transitionWorkflow(entry, currentSteps, store, wf, action, transientVars, inputs, ps); } catch (WorkflowException e) { context.setRollbackOnly(); throw e; } } public void executeTriggerFunction(long id, int triggerId) throws WorkflowException { WorkflowDescriptor wf = null; WorkflowEntry entry = null; WorkflowStore store = getPersistence(); entry = store.findEntry(id); wf = getWorkflow(entry.getWorkflowName()); PropertySet ps = store.getPropertySet(id); Map transientVars = new HashMap(); populateTransientMap(entry, transientVars, wf.getRegisters()); executeFunction(wf.getTriggerFunction(triggerId), transientVars, ps); } public long initialize(String workflowName, int initialAction, Map inputs) throws InvalidRoleException, InvalidInputException, WorkflowException { WorkflowDescriptor wf = getWorkflow(workflowName); WorkflowStore store = getPersistence(); WorkflowEntry entry = store.createEntry(workflowName); // start with a memory property set, but clone it after we have an ID PropertySet ps = store.getPropertySet(entry.getId()); Map transientVars = new HashMap(); if (inputs != null) { transientVars.putAll(inputs); } populateTransientMap(entry, transientVars, wf.getRegisters()); if (!canInitialize(workflowName, initialAction, transientVars, ps)) { context.setRollbackOnly(); throw new InvalidRoleException("You are restricted from initializing this workflow"); } ActionDescriptor action = wf.getInitialAction(initialAction); try { transitionWorkflow(entry, Collections.EMPTY_LIST, store, wf, action, transientVars, inputs, ps); } catch (WorkflowException e) { context.setRollbackOnly(); throw e; } long entryId = entry.getId(); // now clone the memory PS to the real PS //PropertySetManager.clone(ps, store.getPropertySet(entryId)); return entryId; } /** * @ejb.interface-method */ public List query(WorkflowQuery query) throws StoreException { return getPersistence().query(query); } /** * @ejb.interface-method */ public boolean saveWorkflowDescriptor(String workflowName, WorkflowDescriptor descriptor, boolean replace) throws FactoryException { boolean success = ConfigLoader.saveWorkflow(workflowName, descriptor, replace); return success; } protected List getAvailableActionsForStep(WorkflowDescriptor wf, Step step, Map transientVars, PropertySet ps) throws WorkflowException { List l = new ArrayList(); StepDescriptor s = wf.getStep(step.getStepId()); if (s == null) { log.warn("getAvailableActionsForStep called for non-existent step Id #" + step.getStepId()); return l; } List actions = s.getActions(); if ((actions == null) || (actions.size() == 0)) { return l; } for (Iterator iterator2 = actions.iterator(); iterator2.hasNext();) { ActionDescriptor action = (ActionDescriptor) iterator2.next(); RestrictionDescriptor restriction = action.getRestriction(); String conditionType = null; List conditions = null; if (restriction != null) { conditionType = restriction.getConditionType(); conditions = restriction.getConditions(); } if (passesConditions(conditionType, conditions, Collections.unmodifiableMap(transientVars), ps)) { l.add(new Integer(action.getId())); } } return l; } protected WorkflowStore getPersistence() throws StoreException { return StoreFactory.getPersistence(context); } /** * Returns a workflow definition object associated with the given name. * * @param name the name of the workflow * @return the object graph that represents a workflow definition */ protected synchronized WorkflowDescriptor getWorkflow(String name) throws FactoryException { return ConfigLoader.getWorkflow(name); } /** * Load the default configuration from the current context classloader. The search order is: * <li>osworkflow.xml</li> * <li>/osworkflow.xml</li> * <li>META-INF/osworkflow.xml</li> * <li>/META-INF/osworkflow.xml</li> */ protected void loadConfig() throws FactoryException { loadConfig(null); } /** * Loads the configurtion file <b>osworkflow.xml</b> from the thread's class loader if no url is specified. * @param url the URL to first attempt to load the configuration file from. If this url is unavailable, * then the default search mechanism is used (as outlined in {@link #loadConfig}). */ protected void loadConfig(URL url) throws FactoryException { if (configLoaded) { return; } InputStream is = null; if (url != null) { try { is = url.openStream(); } catch (Exception ex) { } } ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); if (is == null) { try { is = classLoader.getResourceAsStream("osworkflow.xml"); } catch (Exception e) { } } if (is == null) { try { is = classLoader.getResourceAsStream("/osworkflow.xml"); } catch (Exception e) { } } if (is == null) { try { is = classLoader.getResourceAsStream("META-INF/osworkflow.xml"); } catch (Exception e) { } } if (is == null) { try { is = classLoader.getResourceAsStream("/META-INF/osworkflow.xml"); } catch (Exception e) { } } if (is != null) { ConfigLoader.load(is); configLoaded = true; } } protected Object loadObject(String clazz) { try { return Thread.currentThread().getContextClassLoader().loadClass(clazz).newInstance(); } catch (Exception e) { log.error("Could not load object '" + clazz + "'", e); return null; } } protected boolean passesCondition(ConditionDescriptor conditionDesc, Map transientVars, PropertySet ps) throws WorkflowException { String type = conditionDesc.getType(); HashMap args = new HashMap(conditionDesc.getArgs()); for (Iterator iterator = args.entrySet().iterator(); iterator.hasNext();) { Map.Entry mapEntry = (Map.Entry) iterator.next(); mapEntry.setValue(translateVariables((String) mapEntry.getValue(), transientVars, ps)); } Condition condition = null; String clazz = null; if ("remote-ejb".equals(type)) { clazz = RemoteEJBCondition.class.getName(); } else if ("local-ejb".equals(type)) { clazz = LocalEJBCondition.class.getName(); } else if ("jndi".equals(type)) { clazz = JNDICondition.class.getName(); } else if ("bsf".equals(type)) { clazz = BSFCondition.class.getName(); } else if ("beanshell".equals(type)) { clazz = BeanShellCondition.class.getName(); } else { clazz = (String) args.get(CLASS_NAME); } condition = (Condition) loadObject(clazz); if (condition == null) { String message = "Could not load Condition: " + clazz; throw new WorkflowException(message); } try { boolean passed = condition.passesCondition(transientVars, args, ps); if (conditionDesc.isNegate()) { passed = !passed; } return passed; } catch (Exception e) { String message = "Unknown exception encountered when trying condition: " + clazz; context.setRollbackOnly(); throw new WorkflowException(message, e); } } protected boolean passesConditions(String conditionType, List conditions, Map transientVars, PropertySet ps) throws WorkflowException { if ((conditions == null) || (conditions.size() == 0)) { return true; } boolean and = "AND".equals(conditionType); boolean or = !and; for (Iterator iterator = conditions.iterator(); iterator.hasNext();) { ConditionDescriptor conditionDescriptor = (ConditionDescriptor) iterator.next(); boolean result = passesCondition(conditionDescriptor, transientVars, ps); if (and && !result) { return false; } else if (or && result) { return true; } } if (and) { return true; } else if (or) { return false; } else { return false; } } protected void populateTransientMap(WorkflowEntry entry, Map transientVars, List registers) throws WorkflowException { transientVars.put("context", context); transientVars.put("entry", entry); transientVars.put("store", getPersistence()); transientVars.put("descriptor", getWorkflow(entry.getWorkflowName())); // now talk to the registers for any extra objects needed in scope for (Iterator iterator = registers.iterator(); iterator.hasNext();) { RegisterDescriptor register = (RegisterDescriptor) iterator.next(); Map args = register.getArgs(); String type = register.getType(); String clazz = null; if ("remote-ejb".equals(type)) { clazz = RemoteEJBRegister.class.getName(); } else if ("local-ejb".equals(type)) { clazz = LocalEJBRegister.class.getName(); } else if ("jndi".equals(type)) { clazz = JNDIRegister.class.getName(); } else if ("bsf".equals(type)) { clazz = BSFRegister.class.getName(); } else if ("beanshell".equals(type)) { clazz = BeanShellRegister.class.getName(); } else { clazz = (String) args.get(CLASS_NAME); } Register r = null; r = (Register) loadObject(clazz); if (r == null) { String message = "Could not load register class: " + clazz; throw new WorkflowException(message); } try { transientVars.put(register.getVariableName(), r.registerVariable(context, entry, args)); } catch (Exception e) { String message = "An unknown exception occured while registering variable using class: " + clazz; context.setRollbackOnly(); throw new WorkflowException(message, e); } } } /** * Validates input against a list of ValidatorDescriptor objects. * * @param entry the workflow instance * @param validators the list of ValidatorDescriptors * @param transientVars the transientVars * @param ps the persistence variables * @throws InvalidInputException if the input is deemed invalid by any validator */ protected void verifyInputs(WorkflowEntry entry, List validators, Map transientVars, PropertySet ps) throws WorkflowException { for (Iterator iterator = validators.iterator(); iterator.hasNext();) { ValidatorDescriptor input = (ValidatorDescriptor) iterator.next(); if (input != null) { String type = input.getType(); HashMap args = new HashMap(input.getArgs()); for (Iterator iterator2 = args.entrySet().iterator(); iterator2.hasNext();) { Map.Entry mapEntry = (Map.Entry) iterator2.next(); mapEntry.setValue(translateVariables((String) mapEntry.getValue(), transientVars, ps)); } Validator validator = null; String clazz = null; if ("remote-ejb".equals(type)) { clazz = RemoteEJBValidator.class.getName(); } else if ("local-ejb".equals(type)) { clazz = LocalEJBValidator.class.getName(); } else if ("jndi".equals(type)) { clazz = JNDIValidator.class.getName(); } else if ("bsf".equals(type)) { clazz = BSFValidator.class.getName(); } else if ("beanshell".equals(type)) { clazz = BeanShellValidator.class.getName(); } else { clazz = (String) args.get(CLASS_NAME); } validator = (Validator) loadObject(clazz); if (validator == null) { String message = "Could not load validator class: " + clazz; throw new WorkflowException(message); } try { validator.validate(transientVars, args, ps); } catch (Exception e) { if (e instanceof InvalidInputException) { throw (InvalidInputException) e; } else { String message = "An unknown exception occured executing Validator: " + clazz; context.setRollbackOnly(); throw new WorkflowException(message, e); } } } } } Object getVariableFromMaps(String var, Map transientVars, PropertySet ps) { Object o = null; int firstDot = var.indexOf('.'); String actualVar = var; if (firstDot != -1) { actualVar = var.substring(0, firstDot); } o = transientVars.get(actualVar); if (o == null) { o = ps.getAsActualType(actualVar); } if (firstDot != -1) { o = beanProvider.getProperty(o, var.substring(firstDot + 1)); } return o; } /** * Parses a string for instances of "${foo}" and returns a string with all instances replaced * with the string value of the foo object (<b>foo.toString()</b>). If the string being passed * in only refers to a single variable and contains no other characters (for example: ${foo}), * then the actual object is returned instead of converting it to a string. */ Object translateVariables(String s, Map transientVars, PropertySet ps) { String temp = s.trim(); if (temp.startsWith("${") && temp.endsWith("}") && (temp.indexOf('$', 1) == -1)) { // the string is just a variable reference, don't convert it to a string String var = temp.substring(2, temp.length() - 1); return getVariableFromMaps(var, transientVars, ps); } else { // the string passed in contains multiple variables (or none!) and should be treated as a string while (true) { int x = s.indexOf("${"); int y = s.indexOf("}", x); if ((x != -1) && (y != -1)) { String var = s.substring(x + 2, y); String t = null; Object o = getVariableFromMaps(var, transientVars, ps); if (o != null) { t = o.toString(); } if (t != null) { s = s.substring(0, x) + t + s.substring(y + 1); } else { // the variable doesn't exist, so don't display anything s = s.substring(0, x) + s.substring(y + 1); } } else { break; } } return s; } } private Step getCurrentStep(WorkflowDescriptor wfDesc, int actionId, List currentSteps, Map transientVars, PropertySet ps) throws WorkflowException { if (currentSteps.size() == 1) { return (Step) currentSteps.get(0); } for (Iterator iterator = currentSteps.iterator(); iterator.hasNext();) { Step step = (Step) iterator.next(); ActionDescriptor action = wfDesc.getStep(step.getStepId()).getAction(actionId); if (action != null) { List availActions = getAvailableActionsForStep(wfDesc, step, transientVars, ps); if (availActions.contains(new Integer(action.getId()))) { return step; } } } return null; } private boolean canInitialize(String workflowName, int initialAction, Map transientVars, PropertySet ps) throws WorkflowException { WorkflowDescriptor wf = getWorkflow(workflowName); ActionDescriptor actionDescriptor = wf.getInitialAction(initialAction); if (actionDescriptor == null) { throw new WorkflowException("Invalid Initial Action"); } RestrictionDescriptor restriction = actionDescriptor.getRestriction(); String conditionType = null; List conditions = null; if (restriction != null) { conditionType = restriction.getConditionType(); conditions = restriction.getConditions(); } return passesConditions(conditionType, conditions, Collections.unmodifiableMap(transientVars), ps); } private void createNewCurrentStep(ResultDescriptor theResult, WorkflowEntry entry, WorkflowStore store, int actionId, Step currentStep, long[] previousIds, Map transientVars, PropertySet ps) throws StoreException { try { if (log.isDebugEnabled()) { log.debug("Outcome: stepId=" + theResult.getStep() + ", status=" + theResult.getStatus() + ", owner=" + theResult.getOwner() + ", actionId=" + actionId + ", currentStep=" + ((currentStep != null) ? currentStep.getStepId() : 0)); } if (previousIds == null) { previousIds = new long[0]; } String owner = TextUtils.noNull(theResult.getOwner()); if (owner.equals("")) { owner = null; } else { Object o = translateVariables(owner, transientVars, ps); owner = (o != null) ? o.toString() : null; } String oldStatus = theResult.getOldStatus(); oldStatus = translateVariables(oldStatus, transientVars, ps).toString(); String status = theResult.getStatus(); status = translateVariables(status, transientVars, ps).toString(); if (currentStep != null) { store.markFinished(currentStep, actionId, new Date(), oldStatus, context.getCaller()); store.moveToHistory(currentStep); //store.moveToHistory(actionId, new Date(), currentStep, oldStatus, context.getCaller()); } // construct the start date and optional due date Date startDate = new Date(); Date dueDate = null; if ((theResult.getDueDate() != null) && (theResult.getDueDate().length() > 0)) { Object dueDateObject = translateVariables(theResult.getDueDate(), transientVars, ps); if (dueDateObject instanceof Date) { dueDate = (Date) dueDateObject; } else if (dueDateObject instanceof String) { long offset = TextUtils.parseLong((String) dueDateObject); if (offset > 0) { dueDate = new Date(startDate.getTime() + offset); } } else if (dueDateObject instanceof Number) { Number num = (Number) dueDateObject; long offset = num.longValue(); if (offset > 0) { dueDate = new Date(startDate.getTime() + offset); } } } store.createCurrentStep(entry.getId(), theResult.getStep(), owner, startDate, dueDate, status, previousIds); } catch (StoreException e) { context.setRollbackOnly(); throw e; } } /** * Executes a function. * * @param function the function to execute * @param transientVars the transientVars given by the end-user * @param ps the persistence variables */ private void executeFunction(FunctionDescriptor function, Map transientVars, PropertySet ps) throws WorkflowException { if (function != null) { String type = function.getType(); HashMap args = new HashMap(function.getArgs()); for (Iterator iterator = args.entrySet().iterator(); iterator.hasNext();) { Map.Entry mapEntry = (Map.Entry) iterator.next(); mapEntry.setValue(translateVariables((String) mapEntry.getValue(), transientVars, ps)); } FunctionProvider provider = null; String clazz = null; if ("remote-ejb".equals(type)) { clazz = RemoteEJBFunctionProvider.class.getName(); } else if ("local-ejb".equals(type)) { clazz = LocalEJBFunctionProvider.class.getName(); } else if ("jndi".equals(type)) { clazz = JNDIFunctionProvider.class.getName(); } else if ("bsf".equals(type)) { clazz = BSFFunctionProvider.class.getName(); } else if ("beanshell".equals(type)) { clazz = BeanShellFunctionProvider.class.getName(); } else { clazz = (String) args.get(CLASS_NAME); } provider = (FunctionProvider) loadObject(clazz); if (provider == null) { String message = "Could not load FunctionProvider class: " + clazz; context.setRollbackOnly(); throw new WorkflowException(message); } try { provider.execute(transientVars, args, ps); } catch (WorkflowException e) { context.setRollbackOnly(); throw e; } } } private void transitionWorkflow(WorkflowEntry entry, List currentSteps, WorkflowStore store, WorkflowDescriptor wf, ActionDescriptor action, Map transientVars, Map inputs, PropertySet ps) throws WorkflowException { Step step = getCurrentStep(wf, action.getId(), currentSteps, transientVars, ps); // validate transientVars (optional) verifyInputs(entry, action.getValidators(), Collections.unmodifiableMap(transientVars), ps); // preFunctions List preFunctions = action.getPreFunctions(); for (Iterator iterator = preFunctions.iterator(); iterator.hasNext();) { FunctionDescriptor function = (FunctionDescriptor) iterator.next(); executeFunction(function, transientVars, ps); } // check each conditional result List conditionalResults = action.getConditionalResults(); List extraPreFunctions = null; List extraPostFunctions = null; ResultDescriptor[] theResults = new ResultDescriptor[1]; for (Iterator iterator = conditionalResults.iterator(); iterator.hasNext();) { ConditionalResultDescriptor conditionalResult = (ConditionalResultDescriptor) iterator.next(); if (passesConditions(conditionalResult.getConditionType(), conditionalResult.getConditions(), Collections.unmodifiableMap(transientVars), ps)) { //if (evaluateExpression(conditionalResult.getCondition(), entry, wf.getRegisters(), null, transientVars)) { theResults[0] = conditionalResult; verifyInputs(entry, conditionalResult.getValidators(), Collections.unmodifiableMap(transientVars), ps); extraPreFunctions = conditionalResult.getPreFunctions(); extraPostFunctions = conditionalResult.getPostFunctions(); break; } } // use unconditional-result if a condition hasn't been met if (theResults[0] == null) { theResults[0] = action.getUnconditionalResult(); verifyInputs(entry, theResults[0].getValidators(), Collections.unmodifiableMap(transientVars), ps); extraPreFunctions = theResults[0].getPreFunctions(); extraPostFunctions = theResults[0].getPostFunctions(); } if (log.isDebugEnabled()) { log.debug("theResult=" + theResults[0].getStep() + " " + theResults[0].getStatus()); } // run any extra pre-functions that haven't been run already for (Iterator iterator = extraPreFunctions.iterator(); iterator.hasNext();) { FunctionDescriptor function = (FunctionDescriptor) iterator.next(); executeFunction(function, transientVars, ps); } // go to next step if (theResults[0].getSplit() != 0) { // the result is a split request, handle it correctly List splitPreFunctions = null; List splitPostFunctions = null; SplitDescriptor splitDesc = wf.getSplit(theResults[0].getSplit()); Collection results = splitDesc.getResults(); splitPreFunctions = new ArrayList(); splitPostFunctions = new ArrayList(); for (Iterator iterator = results.iterator(); iterator.hasNext();) { ResultDescriptor resultDescriptor = (ResultDescriptor) iterator.next(); verifyInputs(entry, resultDescriptor.getValidators(), Collections.unmodifiableMap(transientVars), ps); splitPreFunctions.addAll(resultDescriptor.getPreFunctions()); splitPostFunctions.addAll(resultDescriptor.getPostFunctions()); } // now execute the pre-functions for (Iterator iterator = splitPreFunctions.iterator(); iterator.hasNext();) { FunctionDescriptor function = (FunctionDescriptor) iterator.next(); executeFunction(function, transientVars, ps); } // now make these steps... boolean moveFirst = true; theResults = new ResultDescriptor[results.size()]; results.toArray(theResults); for (Iterator iterator = results.iterator(); iterator.hasNext();) { ResultDescriptor resultDescriptor = (ResultDescriptor) iterator.next(); Step moveToHistoryStep = null; if (moveFirst) { moveToHistoryStep = step; } long[] previousIds = null; if (step != null) { previousIds = new long[] {step.getId()}; } createNewCurrentStep(resultDescriptor, entry, store, action.getId(), moveToHistoryStep, previousIds, transientVars, ps); moveFirst = false; } // now execute the post-functions for (Iterator iterator = splitPostFunctions.iterator(); iterator.hasNext();) { FunctionDescriptor function = (FunctionDescriptor) iterator.next(); executeFunction(function, transientVars, ps); } } else if (theResults[0].getJoin() != 0) { // this is a join, finish this step... JoinDescriptor joinDesc = wf.getJoin(theResults[0].getJoin()); step = store.markFinished(step, action.getId(), new Date(), theResults[0].getOldStatus(), context.getCaller()); // ... now check to see if the expression evaluates // (get only current steps that have a result to this join) ArrayList joinSteps = new ArrayList(); joinSteps.add(step); //currentSteps = store.findCurrentSteps(id); // shouldn't need to refresh the list for (Iterator iterator = currentSteps.iterator(); iterator.hasNext();) { Step currentStep = (Step) iterator.next(); if (currentStep.getId() != step.getId()) { StepDescriptor stepDesc = wf.getStep(currentStep.getStepId()); if (stepDesc.resultsInJoin(theResults[0].getJoin())) { joinSteps.add(currentStep); } } } JoinNodes jn = new JoinNodes(joinSteps); transientVars.put("jn", jn); if (passesConditions(joinDesc.getConditionType(), joinDesc.getConditions(), Collections.unmodifiableMap(transientVars), ps)) { // move the rest without creating a new step ... ResultDescriptor joinresult = joinDesc.getResult(); verifyInputs(entry, joinresult.getValidators(), Collections.unmodifiableMap(transientVars), ps); // now execute the pre-functions for (Iterator iterator = joinresult.getPreFunctions().iterator(); iterator.hasNext();) { FunctionDescriptor function = (FunctionDescriptor) iterator.next(); executeFunction(function, transientVars, ps); } long[] previousIds = new long[joinSteps.size()]; int i = 1; for (Iterator iterator = joinSteps.iterator(); iterator.hasNext();) { Step currentStep = (Step) iterator.next(); if (currentStep.getId() != step.getId()) { //store.moveToHistory(currentStep.getActionId(), currentStep.getFinishDate(), currentStep, theResult.getOldStatus(), context.getCaller()); store.moveToHistory(currentStep); previousIds[i] = currentStep.getId(); i++; } } // ... now finish this step normally previousIds[0] = step.getId(); theResults[0] = joinDesc.getResult(); createNewCurrentStep(joinDesc.getResult(), entry, store, action.getId(), step, previousIds, transientVars, ps); // now execute the post-functions for (Iterator iterator = joinresult.getPostFunctions().iterator(); iterator.hasNext();) { FunctionDescriptor function = (FunctionDescriptor) iterator.next(); executeFunction(function, transientVars, ps); } } } else { // normal finish, no splits or joins long[] previousIds = null; if (step != null) { previousIds = new long[] {step.getId()}; } createNewCurrentStep(theResults[0], entry, store, action.getId(), step, previousIds, transientVars, ps); } // postFunctions (BOTH) for (Iterator iterator = extraPostFunctions.iterator(); iterator.hasNext();) { FunctionDescriptor function = (FunctionDescriptor) iterator.next(); executeFunction(function, transientVars, ps); } List postFunctions = action.getPostFunctions(); for (Iterator iterator = postFunctions.iterator(); iterator.hasNext();) { FunctionDescriptor function = (FunctionDescriptor) iterator.next(); executeFunction(function, transientVars, ps); } //we have our results, lets check if we need to autoexec any of them int[] availableActions = getAvailableActions(entry.getId()); if (availableActions.length != 0) { for (int i = 0; i < theResults.length; i++) { ResultDescriptor theResult = theResults[i]; StepDescriptor toCheck = wf.getStep(theResult.getStep()); if (toCheck != null) { Iterator iter = toCheck.getActions().iterator(); while (iter.hasNext()) { ActionDescriptor descriptor = (ActionDescriptor) iter.next(); if (descriptor.getAutoExecute()) { //check if it's an action we can actually perform for (int j = 0; j < availableActions.length; j++) { if (descriptor.getId() == availableActions[j]) { doAction(entry.getId(), descriptor.getId(), inputs); break; } } } } } } } } }
package com.puppetlabs.puppetserver.pool; import java.util.Deque; import java.util.LinkedList; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; /** * An implementation of LockablePool for managing a pool of JRubyPuppet * instances. * * @param <E> the type of element that can be added to the pool. */ public final class JRubyPool<E> implements LockablePool<E> { // The `LockingPool` contract requires some synchronization behaviors that // are not natively present in any of the JDK deque implementations - // specifically to allow one calling thread to call lock() to supercede // and hold off any pending pool borrowers until unlock() is called. // This class implementation fulfills the contract by managing the // synchronization constructs directly rather than deferring to an // underlying JDK data structure to manage concurrent access. // This implementation is modeled somewhat off of what the // `LinkedBlockingDeque` class in the OpenJDK does to manage // concurrency. It uses a single `ReentrantLock` to provide mutual // exclusion around offer and take requests, with condition variables // used to park and later reawaken requests as needed, e.g., when pool // items are unavailable for borrowing or when the pool lock is // unavailable. // See http://hg.openjdk.java.net/jdk8/jdk8/jdk/file/687fd7c7986d/src/share/classes/java/util/concurrent/LinkedBlockingDeque.java#l157 // Because access to the underlying deque is synchronized within // this class, the pool is backed by a non-synchronized JDK `LinkedList`. // Underlying queue which holds the elements that clients can borrow. private final Deque<E> liveQueue; // Lock which guards all accesses to the underlying queue and registered // element set. Constructed as "nonfair" for performance, like the // lock that a `LinkedBlockingDeque` does. Not clear that we need this // to be a "fair" lock. private final ReentrantLock queueLock = new ReentrantLock(false); // Condition signaled when all elements that have been registered have been // returned to the queue. Awaited when a lock has been requested but // one or more registered elements has been borrowed from the pool. private final Condition allRegisteredInQueue = queueLock.newCondition(); // Condition signaled when an element has been added into the queue. // Awaited when a request has been made to borrow an item but no elements // currently exist in the queue. private final Condition queueNotEmpty = queueLock.newCondition(); // Condition signaled when the pool has been unlocked. Awaited when a // request has been made to borrow an item or lock the pool but the pool // is currently locked. private final Condition poolNotLocked = queueLock.newCondition(); // Holds a reference to all of the elements that have been registered. // Newly registered elements are also added into the `liveQueue`. // Elements only exist in the `liveQueue` when not currently // borrowed whereas elements that have been registered (but not // yet unregistered) will be accessible via `registeredElements` // even while they are borrowed. private final Set<E> registeredElements = new CopyOnWriteArraySet<>(); // Maximum size that the underlying queue can grow to. private int maxSize; // Thread which currently holds the pool lock. null indicates that // there is no current pool lock holder. Using the current Thread // object for tracking the pool lock owner is comparable to what the JDK's // `ReentrantLock` class does via the `AbstractOwnableSynchronizer` class: // http://hg.openjdk.java.net/jdk8/jdk8/jdk/file/687fd7c7986d/src/share/classes/java/util/concurrent/locks/ReentrantLock.java#l164 // http://hg.openjdk.java.net/jdk8/jdk8/jdk/file/687fd7c7986d/src/share/classes/java/util/concurrent/locks/AbstractOwnableSynchronizer.java#l64 // Unlike the `AbstractOwnableSynchronizer` class implementation, we marked // this variable as `volatile` because we couldn't convince ourselves // that it would be safe to update this variable from different threads and // not be susceptible to per-thread / per-CPU caching causing the wrong // value to be seen by a thread. `volatile` seems safer and doesn't appear // to impose any noticeable performance degradation. private volatile Thread poolLockThread = null; /** * Create a JRubyPool * * @param size maximum capacity for the pool. */ public JRubyPool(int size) { liveQueue = new LinkedList<>(); maxSize = size; } @Override public void register(E e) { final ReentrantLock lock = this.queueLock; lock.lock(); try { if (registeredElements.size() == maxSize) throw new IllegalStateException( "Unable to register additional instance, pool full"); registeredElements.add(e); liveQueue.addLast(e); signalPoolNotEmpty(); } finally { lock.unlock(); } } @Override public void unregister(E e) { final ReentrantLock lock = this.queueLock; lock.lock(); try { registeredElements.remove(e); signalIfAllRegisteredInQueue(); } finally { lock.unlock(); } } @Override public E borrowItem() throws InterruptedException { E item = null; final ReentrantLock lock = this.queueLock; lock.lock(); try { final Thread currentThread = Thread.currentThread(); do { if (isPoolLockHeldByAnotherThread(currentThread)) { poolNotLocked.await(); } else if (liveQueue.size() < 1) { queueNotEmpty.await(); } else { item = liveQueue.removeFirst(); } } while (item == null); } finally { lock.unlock(); } return item; } @Override public E borrowItemWithTimeout(long timeout, TimeUnit unit) throws InterruptedException { E item = null; final ReentrantLock lock = this.queueLock; long remainingMaxTimeToWait = unit.toNanos(timeout); // `queueLock.lockInterruptibly()` is called here as opposed to just // `queueLock.queueLock` to follow the pattern that the JDK's // `LinkedBlockingDeque` does for a timed poll from a deque. See: // http://hg.openjdk.java.net/jdk8/jdk8/jdk/file/687fd7c7986d/src/share/classes/java/util/concurrent/LinkedBlockingDeque.java#l516 lock.lockInterruptibly(); try { final Thread currentThread = Thread.currentThread(); // This pattern of using timed `awaitNanos` on a condition // variable to track the total time spent waiting for an item to // be available to be borrowed follows the logic that the JDK's // `LinkedBlockingDeque` in `pollFirst` uses. See: // http://hg.openjdk.java.net/jdk8/jdk8/jdk/file/687fd7c7986d/src/share/classes/java/util/concurrent/LinkedBlockingDeque.java#l522 do { if (isPoolLockHeldByAnotherThread(currentThread)) { if (remainingMaxTimeToWait <= 0) { break; } remainingMaxTimeToWait = poolNotLocked.awaitNanos(remainingMaxTimeToWait); } else if (liveQueue.size() < 1) { if (remainingMaxTimeToWait <= 0) { break; } remainingMaxTimeToWait = queueNotEmpty.awaitNanos(remainingMaxTimeToWait); } else { item = liveQueue.removeFirst(); } } while (item == null); } finally { lock.unlock(); } return item; } @Override public void releaseItem(E e) { releaseItem(e, true); } @Override public void releaseItem(E e, boolean returnToPool) { final ReentrantLock lock = this.queueLock; lock.lock(); try { if (returnToPool) { addFirst(e); } } finally { lock.unlock(); } } /** * Insert a poison pill into the pool. It should only ever be used to * insert a `PoisonPill` or `RetryPoisonPill` to the pool. */ @Override public void insertPill(E e) { final ReentrantLock lock = this.queueLock; lock.lock(); try { addFirst(e); } finally { lock.unlock(); } } @Override public void clear() { final ReentrantLock lock = this.queueLock; lock.lock(); try { // It would be simpler to just call .clear() on both the liveQueue // and registeredElements here. It is possible, however, that this // method might be called while one or more elements are being // borrowed from the liveQueue. If the associated element from // registeredElements were removed, it would then be possible for // the borrowed elements to be returned to the pool, making them // appear in liveQueue but not in registeredElements. This would // be bad because any subsequent actions that need to be done to // all members of the pool - for example, marking environments in // the pool instance as expired - might inadvertently skip over // any of the elements that are no longer in registeredElements // but can appear in liveQueue. // To avoid this problem, the implementation only removes elements // from registeredElements which have a corresponding entry which // is being removed from the liveQueue. int queueSize = liveQueue.size(); for (int i=0; i<queueSize; i++) { registeredElements.remove(liveQueue.removeFirst()); } } finally { lock.unlock(); } } @Override public int remainingCapacity() { int remainingCapacity; final ReentrantLock lock = this.queueLock; lock.lock(); try { remainingCapacity = maxSize - liveQueue.size(); } finally { lock.unlock(); } return remainingCapacity; } @Override public int size() { int size; final ReentrantLock lock = this.queueLock; lock.lock(); try { size = liveQueue.size(); } finally { lock.unlock(); } return size; } @Override public void lock() throws InterruptedException { final ReentrantLock lock = this.queueLock; lock.lock(); try { final Thread currentThread = Thread.currentThread(); while (!isPoolLockHeldByCurrentThread(currentThread)) { if (!isPoolLockHeld()) { poolLockThread = currentThread; } else { poolNotLocked.await(); } } try { while (registeredElements.size() != liveQueue.size()) { allRegisteredInQueue.await(); } } catch (Exception e) { freePoolLock(); throw e; } } finally { lock.unlock(); } } @Override public void unlock() { final ReentrantLock lock = this.queueLock; lock.lock(); try { final Thread currentThread = Thread.currentThread(); if (!isPoolLockHeldByCurrentThread(currentThread)) { String lockErrorMessage; if (isPoolLockHeldByAnotherThread(currentThread)) { lockErrorMessage = "held by " + poolLockThread; } else { lockErrorMessage = "not held by any thread"; } throw new IllegalStateException( "Unlock requested from thread not holding the lock. " + "Requested from " + currentThread + " but lock " + lockErrorMessage + "."); } freePoolLock(); } finally { lock.unlock(); } } public Set<E> getRegisteredElements() { return registeredElements; } private void addFirst(E e) { liveQueue.addFirst(e); signalPoolNotEmpty(); } private void freePoolLock() { poolLockThread = null; // Need to use 'signalAll' here because there might be multiple // waiters (e.g., multiple borrowers) queued up, waiting for the // pool to be unlocked. poolNotLocked.signalAll(); // Borrowers that are woken up when an instance is returned to the // pool and the pool queueLock is held would then start waiting on a // 'poolNotLocked' signal instead. Re-signalling 'queueNotEmpty' here // allows any borrowers still waiting on the 'queueNotEmpty' signal to be // reawoken when the pool lock is released, compensating for any // 'queueNotEmpty' signals that might have been essentially ignored from // when the pool lock was held. if (liveQueue.size() > 0) { queueNotEmpty.signalAll(); } } private void signalPoolNotEmpty() { // Could use 'signalAll' here instead of 'signal' but 'signal' is // less expensive in that only one waiter will be woken up. Can use // signal here because the thread being awoken will be able to borrow // a pool instance and any further waiters will be woken up by // subsequent posts of this signal when instances are added/returned to // the queue. queueNotEmpty.signal(); signalIfAllRegisteredInQueue(); } private void signalIfAllRegisteredInQueue() { // Could use 'signalAll' here instead of 'signal'. Doesn't really // matter though in that there will only be one waiter at most which // is active at a time - a caller of lock() that has just acquired // the pool lock but is waiting for all registered elements to be // returned to the queue. if (registeredElements.size() == liveQueue.size()) { allRegisteredInQueue.signal(); } } private boolean isPoolLockHeld() { return poolLockThread != null; } private boolean isPoolLockHeldByCurrentThread(Thread currentThread) { return poolLockThread == currentThread; } private boolean isPoolLockHeldByAnotherThread(Thread currentThread) { return (poolLockThread != null) && (poolLockThread != currentThread); } }
// $Id: TableDirector.java,v 1.3 2001/10/23 20:26:30 mdb Exp $ package com.threerings.parlor.client; import java.util.ArrayList; import com.threerings.presents.dobj.ElementAddedEvent; import com.threerings.presents.dobj.ElementUpdatedEvent; import com.threerings.presents.dobj.ElementRemovedEvent; import com.threerings.presents.dobj.SetListener; import com.threerings.crowd.data.BodyObject; import com.threerings.crowd.data.PlaceObject; import com.threerings.parlor.Log; import com.threerings.parlor.data.Table; import com.threerings.parlor.data.TableLobbyObject; import com.threerings.parlor.game.GameConfig; import com.threerings.parlor.util.ParlorContext; /** * As tables are created and managed within the scope of a place (a * lobby), we want to fold the table management functionality into the * standard hierarchy of place controllers that deal with place-related * functionality on the client. Thus, instead of forcing places that * expect to have tables to extend a <code>TableLobbyController</code> or * something similar, we instead provide the table manager which can be * instantiated by the place controller (or specific table related views) * to handle the table matchmaking services. * * <p> Entites that do so, will need to implement the {@link * TableObserver} interface so that the table manager can notify them when * table related things happen. * * <p> The table services expect that the place object being used as a * lobby in which the table matchmaking takes place implements the {@link * TableLobbyObject} interface. */ public class TableManager implements SetListener { /** * Creates a new table manager to manage tables with the specified * observer which will receive callbacks when interesting table * related things happen. * * @param ctx the parlor context in use by the client. * @param tableField the field name of the distributed set that * contains the tables we will be managing. * @param observer the entity that will receive callbacks when things * happen to the tables. */ public TableManager ( ParlorContext ctx, String tableField, TableObserver observer) { // keep track of this stuff _ctx = ctx; _tableField = tableField; _observer = observer; } /** * This must be called by the entity that uses the table manager when * the using entity prepares to enter and display a place. It is * assumed that the client is already subscribed to the provided place * object. */ public void willEnterPlace (PlaceObject place) { // add ourselves as a listener to the place object place.addListener(this); // and remember this for later _lobby = place; } /** * This must be called by the entity that uses the table manager when * the using entity has left and is done displaying a place. */ public void didLeavePlace (PlaceObject place) { // remove our listenership place.removeListener(this); // clear out our lobby reference _lobby = null; } /** * Requests that the specified observer be added to the list of * observers that are notified when this client sits down at or stands * up from a table. */ public void addSeatednessObserver (SeatednessObserver observer) { _seatedObservers.add(observer); } /** * Requests that the specified observer be removed from to the list of * observers that are notified when this client sits down at or stands * up from a table. */ public void removeSeatednessObserver (SeatednessObserver observer) { _seatedObservers.remove(observer); } /** * Returns true if this client is currently seated at a table, false * if they are not. */ public boolean isSeated () { return (_ourTable != null); } /** * Sends a request to create a table with the specified game * configuration. This user will become the owner of this table and * will be added to the first position in the table. The response will * be communicated via the {@link TableObserver} interface. */ public void createTable (GameConfig config) { // if we're already in a table, refuse the request if (_ourTable != null) { Log.warning("Ignoring request to create table as we're " + "already in a table [table=" + _ourTable + "]."); return; } // make sure we're currently in a place if (_lobby == null) { Log.warning("Requested to create a table but we're not " + "currently in a place [config=" + config + "]."); return; } // go ahead and issue the create request ParlorService.createTable( _ctx.getClient(), _lobby.getOid(), config, this); } /** * Sends a request to join the specified table at the specified * position. The response will be communicated via the {@link * TableObserver} interface. */ public void joinTable (int tableId, int position) { // if we're already in a table, refuse the request if (_ourTable != null) { Log.warning("Ignoring request to join table as we're " + "already in a table [table=" + _ourTable + "]."); return; } // make sure we're currently in a place if (_lobby == null) { Log.warning("Requested to join a table but we're not " + "currently in a place [tableId=" + tableId + "]."); return; } // go ahead and issue the create request ParlorService.joinTable( _ctx.getClient(), tableId, position, this); } /** * Sends a request to leave the specified table at which we are * presumably seated. The response will be communicated via the {@link * TableObserver} interface. */ public void leaveTable (int tableId) { // make sure we're currently in a place if (_lobby == null) { Log.warning("Requested to leave a table but we're not " + "currently in a place [tableId=" + tableId + "]."); return; } // go ahead and issue the create request ParlorService.leaveTable(_ctx.getClient(), tableId, this); } // documentation inherited public void elementAdded (ElementAddedEvent event) { if (event.getName().equals(_tableField)) { Table table = (Table)event.getElement(); // check to see if we just joined a table checkSeatedness(table); // now let the observer know what's up _observer.tableAdded(table); } } // documentation inherited public void elementUpdated (ElementUpdatedEvent event) { if (event.getName().equals(_tableField)) { Table table = (Table)event.getElement(); // check to see if we just joined or left a table checkSeatedness(table); // now let the observer know what's up _observer.tableUpdated(table); } } // documentation inherited public void elementRemoved (ElementRemovedEvent event) { if (event.getName().equals(_tableField)) { Integer tableId = (Integer)event.getKey(); // check to see if our table just disappeared if (_ourTable != null && tableId.equals(_ourTable.tableId)) { _ourTable = null; notifySeatedness(false); } // now let the observer know what's up _observer.tableRemoved(tableId.intValue()); } } /** * Called by the invocation services when a table creation request was * received by the server and the table was successfully created. * * @param invid the invocation id of the invitation request. */ public void handleTableCreated (int invid, int tableId) { // nothing much to do here Log.info("Table creation succeeded [tableId=" + tableId + "]."); } /** * Called by the invocation services when a table creation request * failed or was rejected for some reason. * * @param invid the invocation id of the creation request. * @param reason a reason code explaining the failure. */ public void handleCreateFailed (int invid, String reason) { Log.warning("Table creation failed [reason=" + reason + "]."); } /** * Called by the invocation services when a join table request failed * or was rejected for some reason. * * @param invid the invocation id of the join request. * @param reason a reason code explaining the failure. */ public void handleJoinFailed (int invid, String reason) { Log.warning("Join table failed [reason=" + reason + "]."); } /** * Called by the invocation services when a leave table request failed * or was rejected for some reason. * * @param invid the invocation id of the leave request. * @param reason a reason code explaining the failure. */ public void handleLeaveFailed (int invid, String reason) { Log.warning("Leave table failed [reason=" + reason + "]."); } /** * Checks to see if we're a member of this table and notes it as our * table, if so. */ protected void checkSeatedness (Table table) { Table oldTable = _ourTable; // if this is the same table as our table, clear out our table // reference and allow it to be added back if we are still in the // table if (table.equals(_ourTable)) { _ourTable = null; } // look for our username in the occupants array BodyObject self = (BodyObject)_ctx.getClient().getClientObject(); for (int i = 0; i < table.occupants.length; i++) { if (self.username.equals(table.occupants[i])) { _ourTable = table; break; } } // if nothing changed, bail now if (oldTable == _ourTable || (oldTable != null && oldTable.equals(_ourTable))) { return; } // otherwise notify the observers notifySeatedness(_ourTable != null); } /** * Notifies the seatedness observers of a seatedness change. */ protected void notifySeatedness (boolean isSeated) { int slength = _seatedObservers.size(); for (int i = 0; i < slength; i++) { SeatednessObserver observer = (SeatednessObserver) _seatedObservers.get(i); try { observer.seatednessDidChange(isSeated); } catch (Exception e) { Log.warning("Observer choked in seatednessDidChange() " + "[observer=" + observer + "]."); Log.logStackTrace(e); } } } /** A context by which we can access necessary client services. */ protected ParlorContext _ctx; /** The place object in which we're currently managing tables. */ protected PlaceObject _lobby; /** The field name of the distributed set that contains our tables. */ protected String _tableField; /** The entity that we talk to when table stuff happens. */ protected TableObserver _observer; /** The table of which we are a member if any. */ protected Table _ourTable; /** An array of entities that want to hear about when we stand up or * sit down. */ protected ArrayList _seatedObservers = new ArrayList(); }
//This library is free software; you can redistribute it and/or //modify it under the terms of the GNU Lesser General Public //This library is distributed in the hope that it will be useful, //MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //You should have received a copy of the GNU Lesser General Public //Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. package opennlp.tools.parser; import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.StringTokenizer; import opennlp.tools.util.Sequence; import opennlp.tools.util.Span; import opennlp.maxent.io.SuffixSensitiveGISModelReader; import opennlp.tools.chunker.ChunkerME; import opennlp.tools.postag.DefaultPOSContextGenerator; import opennlp.tools.postag.POSDictionary; import opennlp.tools.postag.POSTaggerME; public class EnglishTreebankParser { public static ParserME getParser(String dataDir, boolean useTagDictionary, boolean useCaseSensitiveTagDictionary) throws IOException { if (useTagDictionary) { return new ParserME( new SuffixSensitiveGISModelReader(new File(dataDir + "/build.bin.gz")).getModel(), new SuffixSensitiveGISModelReader(new File(dataDir + "/check.bin.gz")).getModel(), new EnglishTreebankPOSTagger(dataDir + "/tag.bin.gz", dataDir + "/tagdict", useCaseSensitiveTagDictionary), new EnglishTreebankChunker(dataDir + "/chunk.bin.gz"), new HeadRules(dataDir + "/head_rules")); } else { return new ParserME( new SuffixSensitiveGISModelReader(new File(dataDir + "/build.bin.gz")).getModel(), new SuffixSensitiveGISModelReader(new File(dataDir + "/check.bin.gz")).getModel(), new EnglishTreebankPOSTagger(dataDir + "/tag.bin.gz"), new EnglishTreebankChunker(dataDir + "/chunk.bin.gz"), new HeadRules(dataDir + "/head_rules")); } } private static class EnglishTreebankPOSTagger extends POSTaggerME implements ParserTagger { private static final int K = 10; public EnglishTreebankPOSTagger(String modelFile) throws IOException { super(10, new SuffixSensitiveGISModelReader(new File(modelFile)).getModel(), new DefaultPOSContextGenerator(), null); } public EnglishTreebankPOSTagger(String modelFile, String tagDictionary, boolean useCase) throws IOException { super(10, new SuffixSensitiveGISModelReader(new File(modelFile)).getModel(), new DefaultPOSContextGenerator(), new POSDictionary(tagDictionary, useCase)); } public Sequence[] topKSequences(List sentence) { return beam.bestSequences(K, sentence, null); } public Sequence[] topKSequences(String[] sentence) { return beam.bestSequences(K, Arrays.asList(sentence), null); } } private static class EnglishTreebankChunker extends ChunkerME implements ParserChunker { private static final int K = 10; public EnglishTreebankChunker(String modelFile) throws IOException { super(new SuffixSensitiveGISModelReader(new File(modelFile)).getModel(), new ChunkContextGenerator(), 10); } public Sequence[] topKSequences(List sentence, List tags) { return beam.bestSequences(K, sentence, new Object[] { tags }); } public Sequence[] topKSequences(String[] sentence, String[] tags) { return beam.bestSequences(K, Arrays.asList(sentence), new Object[] { Arrays.asList(tags)}); } public Sequence[] topKSequences(List sentence) { return beam.bestSequences(K, sentence, null); } public Sequence[] topKSequences(String[] sentence) { return beam.bestSequences(K, Arrays.asList(sentence), null); } protected boolean validOutcome(String outcome, Sequence sequence) { if (outcome.startsWith(ParserME.CONT)) { List tagList = sequence.getOutcomes(); int lti = tagList.size() - 1; if (lti == -1) { return (false); } else { String lastTag = (String) tagList.get(lti); if (lastTag.equals(ParserME.OTHER)) { return (false); } String pred = outcome.substring(ParserME.CONT.length()); if (lastTag.startsWith(ParserME.START)) { return lastTag.substring(ParserME.START.length()).equals(pred); } else if (lastTag.startsWith(ParserME.CONT)) { return lastTag.substring(ParserME.CONT.length()).equals(pred); } } } return (true); } } private static String convertToken(String token) { if (token.equals("(")) { return "-LRB-"; } else if (token.equals(")")) { return "-RRB-"; } else if (token.equals("{")) { return "-LCB-"; } else if (token.equals("}")) { return "-RCB-"; } return token; } private static void usage() { System.err.println("Usage: EnglishTreebankParser [-i] dataDirectory < sentences"); System.err.println("dataDirectory: Directory containing parser models."); System.err.println("-d: Use tag dictionary."); System.err.println("-i: Case insensitive tag dictionary."); System.exit(1); } public static void main(String[] args) throws IOException { if (args.length == 0) { usage(); } boolean useTagDictionary=false; boolean caseInsensitiveTagDictionary=false; int ai = 0; while (args[ai].startsWith("-")) { if (args[ai].equals("-d")) { useTagDictionary = true; ai++; } if (args[ai].equals("-i")) { caseInsensitiveTagDictionary = true; ai++; } if (args[ai].equals(" ai++; break; } } ParserME parser; if (caseInsensitiveTagDictionary) { parser = EnglishTreebankParser.getParser(args[ai++],true,false); } else if (useTagDictionary) { parser = EnglishTreebankParser.getParser(args[ai++],true,true); } else { parser = EnglishTreebankParser.getParser(args[ai++],false,false); } BufferedReader in = new BufferedReader(new InputStreamReader(System.in)); String line; try { while (null != (line = in.readLine())) { StringTokenizer str = new StringTokenizer(line); int numToks = str.countTokens(); StringBuffer sb = new StringBuffer(); List tokens = new ArrayList(); while (str.hasMoreTokens()) { String tok = convertToken(str.nextToken()); tokens.add(tok); sb.append(tok).append(" "); } if (sb.length() != 0) { String text = sb.substring(0, sb.length() - 1).toString(); Parse p = new Parse(text, new Span(0, text.length()), "INC", 1, null); int start = 0; for (Iterator ti = tokens.iterator(); ti.hasNext();) { String tok = (String) ti.next(); p.insert(new Parse(text, new Span(start, start + tok.length()), ParserME.TOK_NODE, 0)); start += tok.length() + 1; } p = parser.parse(p); //System.out.print(p.getProb()+" "); p.show(); } System.out.println(); } } catch (IOException e) { System.err.println(e); } } }
package org.dellroad.stuff.graph; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Set; /** * Topological sorting utility class. */ public class TopologicalSorter<E> { private final Collection<E> nodes; private final EdgeLister<E> edgeLister; private final Comparator<? super E> tieBreaker; private HashMap<E, Boolean> visited; private ArrayList<E> ordering; /** * Primary constructor. * * @param nodes partially ordered nodes to be sorted * @param edgeLister provides the edges defining the partial order * @param tieBreaker used to sort nodes that are not otherwise ordered, * or null to tie break based on the original ordering */ public TopologicalSorter(Collection<E> nodes, EdgeLister<E> edgeLister, Comparator<? super E> tieBreaker) { this.nodes = nodes; this.edgeLister = edgeLister; if (tieBreaker == null) tieBreaker = getDefaultTieBreaker(); this.tieBreaker = tieBreaker; } /** * Convenience constructor for when ties should be broken based on the original ordering. * * <p> * Equivalent to: * <blockquote> * {@code TopologicalSorter(nodes, edgeLister, null);} * </blockquote> * </p> */ public TopologicalSorter(Collection<E> nodes, EdgeLister<E> edgeLister) { this(nodes, edgeLister, null); } public List<E> sort() { // Order nodes according to reverse tie breaker ordering ArrayList<E> startList = Collections.list(Collections.enumeration(this.nodes)); Collections.sort(startList, getTieBreaker(true)); // Perform depth-first search through nodes this.visited = new HashMap<E, Boolean>(startList.size()); this.ordering = new ArrayList<E>(startList.size()); for (E node : startList) visit(node, true); // Reverse list Collections.reverse(this.ordering); return this.ordering; } public List<E> sortEdgesReversed() { // Order nodes according to normal tie breaker ordering ArrayList<E> startList = Collections.list(Collections.enumeration(this.nodes)); Collections.sort(startList, getTieBreaker(false)); // Perform depth-first search through nodes this.visited = new HashMap<E, Boolean>(startList.size()); this.ordering = new ArrayList<E>(startList.size()); for (E node : startList) visit(node, false); // Done return this.ordering; } private void visit(E node, boolean reverse) { // Have we been here before? Boolean state = this.visited.get(node); if (state != null) { if (!state.booleanValue()) throw new IllegalArgumentException("cycle in graph containing " + node); return; } this.visited.put(node, false); // Get all destination nodes of all out-edges ArrayList<E> targets = Collections.list(Collections.enumeration(this.edgeLister.getOutEdges(node))); // Sort them in reverse desired order and recurse Collections.sort(targets, getTieBreaker(reverse)); for (E target : targets) visit(target, reverse); // Add this node to list in post-order and mark complete this.ordering.add(node); this.visited.put(node, true); } private Comparator<? super E> getDefaultTieBreaker() { final HashMap<E, Integer> orderMap = new HashMap<E, Integer>(this.nodes.size()); int posn = 0; for (E node : this.nodes) orderMap.put(node, posn++); return new Comparator<E>() { public int compare(E node1, E node2) { return orderMap.get(node1) - orderMap.get(node2); } }; } private Comparator<? super E> getTieBreaker(boolean reverse) { if (reverse) return Collections.reverseOrder(this.tieBreaker); return this.tieBreaker; } /** * Implemented by classes that can enumerate the outgoing edges from a node in a graph. */ public interface EdgeLister<E> { /** * Get the set of all nodes X for which there is an edge from {@code node} to X. */ Set<E> getOutEdges(E node); } }
package ai.ilikeplaces.servlets; import ai.ilikeplaces.doc.DOCUMENTATION; import ai.ilikeplaces.doc.LOGIC; import ai.ilikeplaces.doc.NOTE; import ai.ilikeplaces.util.Parameter; import org.apache.commons.httpclient.Header; import org.apache.commons.httpclient.HttpClient; import org.apache.commons.httpclient.HttpStatus; import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager; import org.apache.commons.httpclient.methods.GetMethod; import org.json.JSONException; import org.json.JSONObject; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.util.Arrays; import java.util.Map; @DOCUMENTATION( NOTE = @NOTE( "This class was specifically designed to be vendor independent." + "Much effort was made to build it by looking at http://tools.ietf.org/html/draft-ietf-oauth-v2 . " + "Please immediately report any bugs you find at https: "Thank you!"), LOGIC = @LOGIC( @NOTE("A servlet is a shared resource." + "Hence this class can contain only shared details of a the application as a client. " + "It shall under no circumstance, contain data related to a specific user." + "Since it is abstract, this class however, can have data related to a specific OAuth facilitator such as Facebook, Twitter and Google " + "as each new instance with be created for that specific vendor and shared within that pool (logical pool). "))) public abstract class AbstractOAuth extends HttpServlet { static final RuntimeException RedirectToOAuthEndpointFailed = new RuntimeException("Redirect to OAuth Endpoint Failed!"); static final String code = "code"; static final String redirect_uri = "redirect_uri"; static final String client_id = "client_id"; static final String response_type = "response_type"; static final String scope = "scope"; static final String state = "state"; static final String access_token = "access_token"; static final String expires_in = "expires_in"; static final String refresh_token = "refresh_token"; static final String parameters = "parameters"; static final String token_type = "token_type"; private static final String QUESTION_MARK = "?"; private static final String GOT_ERROR_CODE = "Got error code:"; private static final String DOT_JASON = ".json"; private static final String EQUALS = "="; private static final String API_KEY = "key" + EQUALS; private static final String AMPERSAND = "&"; private static final String OPEN_SQR_BRCKT = "["; private static final String CLOSE_SQR_BRCKT = "]"; private static final String EMPTY = ""; private OAuthAuthorizationRequest oAuthAuthorizationRequest; private String oAuthEndpoint; private final HttpClient threadSafeHttpClient; private String api_key; private String jsonEndpoint; /** * 4.1.1. Authorization Request * <p/> * <p/> * The client constructs the request URI by adding the following * parameters to the query component of the authorization oAuthEndpoint URI * using the "application/x-www-form-urlencoded" format as defined by * [W3C.REC-html401-19991224]: * <p/> * <p/> * <b>response_type</b> * <p/> * REQUIRED. Value MUST be set to "code". * <p/> * <b>client_id</b> * <p/> * REQUIRED. The client identifier as described in Section 2.2. * <p/> * <b>redirect_uri</b> * <p/> * OPTIONAL, as described in Section 3.1.2. * <p/> * <b>scope</b> * <p/> * OPTIONAL. The scope of the access request as described by * Section 3.3. * <p/> * <b>state</b> * <p/> * RECOMMENDED. An opaque value used by the client to maintain * state between the request and callback. The authorization * server includes this value when redirecting the user-agent back * to the client. The parameter SHOULD be used for preventing * cross-site request forgery as described in Section 10.12. * <p/> * <p/> * The client directs the resource owner to the constructed URI using an * HTTP redirection response, or by other means available to it via the * user-agent. * <p/> * For example, the client directs the user-agent to make the following * HTTP request using transport-layer security (extra line breaks are * for display purposes only): * <p/> * <p/> * GET /authorize?response_type=code&client_id=s6BhdRkqt3&state=xyz * &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1 * <p/> * Host: server.example.com */ @LOGIC( @NOTE("Here our main focus ist to initialize data related to " + "<a href='http://tools.ietf.org/html/draft-ietf-oauth-v2-22 public AbstractOAuth() { this.oAuthEndpoint = oAuthProvider().oAuthEndpoint; this.oAuthAuthorizationRequest = oAuthProvider().oAuthAuthorizationRequest; final MultiThreadedHttpConnectionManager connectionManager = new MultiThreadedHttpConnectionManager(); threadSafeHttpClient = new HttpClient(connectionManager); } abstract OAuthProvider oAuthProvider(); /** * @param endpointEndValue * @param parameters * @return */ public JSONObject getHttpContentAsJson(final String endpointEndValue, final Map<String, String> parameters) { final StringBuilder sb = new StringBuilder(EMPTY); for (final String key : parameters.keySet()) { sb.append(AMPERSAND).append(key).append(EQUALS).append(parameters.get(key)); } JSONObject jsonObject; try { jsonObject = new JSONObject(getHttpContent(endpointEndValue, sb.toString())); } catch (JSONException e) { throw new RuntimeException(e); } return jsonObject; } /** * @param endpointEndValue * @param optionalAppend All strings in array will be concatenated and appended * @return */ private String getHttpContent(final String endpointEndValue, final String... optionalAppend) { final String toBeCalled = jsonEndpoint + endpointEndValue + QUESTION_MARK + ((optionalAppend != null && optionalAppend.length != 0) ? Arrays.toString(optionalAppend).replace(OPEN_SQR_BRCKT, EMPTY).replace(CLOSE_SQR_BRCKT, EMPTY) : EMPTY); final GetMethod getMethod = new GetMethod(toBeCalled); int statusCode = 0; try { statusCode = threadSafeHttpClient.executeMethod(getMethod); } catch (final IOException e) { throw new RuntimeException(e); } if (statusCode != HttpStatus.SC_OK) { throw new RuntimeException(GOT_ERROR_CODE + statusCode); } InputStream inputStream = null; try { inputStream = getMethod.getResponseBodyAsStream(); } catch (IOException e) { throw new RuntimeException(e); } final BufferedReader br = new BufferedReader(new InputStreamReader(inputStream)); String line; String accumulator = EMPTY; try { while ((line = br.readLine()) != null) { accumulator += line; } } catch (IOException e) { throw new RuntimeException(e); } try { br.close(); } catch (IOException e) { throw new RuntimeException(e); } return accumulator; } /** * @param endpointEndValue * @param headerName * @param optionalAppend All strings in array will be concatenated and appended * @return */ private Header getHttpHeader(final String endpointEndValue, final String headerName, final String... optionalAppend) { final String toBeCalled = jsonEndpoint + endpointEndValue + QUESTION_MARK + ((optionalAppend != null && optionalAppend.length != 0) ? Arrays.toString(optionalAppend).replace(OPEN_SQR_BRCKT, EMPTY).replace(CLOSE_SQR_BRCKT, EMPTY) : EMPTY); final GetMethod getMethod = new GetMethod(toBeCalled); int statusCode = 0; try { statusCode = threadSafeHttpClient.executeMethod(getMethod); } catch (final IOException e) { throw new RuntimeException(e); } if (statusCode != HttpStatus.SC_OK) { throw new RuntimeException(GOT_ERROR_CODE + statusCode); } return getMethod.getRequestHeader(headerName); } OAuthAccessTokenResponse getOAuthAccessTokenResponse(final OAuthAuthorizationResponse oAuthAuthorizationResponse) { final Header[] oAuthAccessTokenResponseHeaders = getHttpHeaders( oAuthEndpoint, new Parameter().append(code, oAuthAuthorizationResponse.code).get() ); String name; String value; String access_token_value = ""; String token_type_value = ""; String expires_in_value = ""; String refresh_token_value = ""; String parameters_value = ""; for (final Header header : oAuthAccessTokenResponseHeaders) { name = header.getName(); value = header.getValue(); if (name.equals(access_token)) { access_token_value = value; continue; } if (name.equals(token_type)) { token_type_value = value; continue; } if (name.equals(expires_in)) { expires_in_value = value; continue; } if (name.equals(refresh_token)) { refresh_token_value = value; continue; } if (name.equals(parameters)) { parameters_value = value; continue; } } return new OAuthAccessTokenResponse(access_token_value, token_type_value, expires_in_value, refresh_token_value, parameters_value); } /** * @param endpointEndValue * @param optionalAppend All strings in array will be concatenated and appended * @return */ private Header[] getHttpHeaders(final String endpointEndValue, final String... optionalAppend) { final String toBeCalled = endpointEndValue + QUESTION_MARK + ((optionalAppend != null && optionalAppend.length != 0) ? Arrays.toString(optionalAppend).replace(OPEN_SQR_BRCKT, EMPTY).replace(CLOSE_SQR_BRCKT, EMPTY) : EMPTY); final GetMethod getMethod = new GetMethod(toBeCalled); int statusCode = 0; try { statusCode = threadSafeHttpClient.executeMethod(getMethod); } catch (final IOException e) { throw new RuntimeException(e); } if (statusCode != HttpStatus.SC_OK) { throw new RuntimeException(GOT_ERROR_CODE + statusCode); } return getMethod.getRequestHeaders(); } /** * @param request * @param response * @return OAuthAuthorizationResponse or redirects user to endpoint and returns null */ OAuthAuthorizationResponse getOAuthAuthorizationResponse(final HttpServletRequest request, final HttpServletResponse response) { final String code = request.getParameter(AbstractOAuth.code); final String state = request.getParameter(AbstractOAuth.state); if (code == null || code.isEmpty()) { try { response.sendRedirect( new Parameter(this.oAuthEndpoint) .append(client_id, this.oAuthAuthorizationRequest.client_id, true) .append(redirect_uri, this.oAuthAuthorizationRequest.redirect_uri) .append(response_type, this.oAuthAuthorizationRequest.response_type) .append(scope, this.oAuthAuthorizationRequest.scope) .append(AbstractOAuth.state, this.oAuthAuthorizationRequest.state) .get() ); } catch (final IOException e) { //hmmm! throw RedirectToOAuthEndpointFailed; } return null; } else { return new OAuthAuthorizationResponse(code, state); } } public final class OAuthProvider { final public OAuthAuthorizationRequest oAuthAuthorizationRequest; final public String oAuthEndpoint; public OAuthProvider( final String oAuthEndpoint, final OAuthAuthorizationRequest oAuthAuthorizationRequest) { this.oAuthEndpoint = oAuthEndpoint; this.oAuthAuthorizationRequest = oAuthAuthorizationRequest; } @Override public String toString() { return "OAuthProvider{" + "oAuthAuthorizationRequest=" + oAuthAuthorizationRequest + ", oAuthEndpoint='" + oAuthEndpoint + '\'' + '}'; } } /** * * 4.1.1. Authorization Request * <p/> * <p/> * The client constructs the request URI by adding the following * parameters to the query component of the authorization endpoint URI * using the "application/x-www-form-urlencoded" format as defined by * [W3C.REC-html401-19991224]: * <p/> * <p/> * <b>response_type</b> * <p/> * REQUIRED. Value MUST be set to "code". * <p/> * <b>client_id</b> * <p/> * REQUIRED. The client identifier as described in Section 2.2. * <p/> * <b>redirect_uri</b> * <p/> * OPTIONAL, as described in Section 3.1.2. * <p/> * <b>scope</b> * <p/> * OPTIONAL. The scope of the access request as described by * Section 3.3. * <p/> * <b>state</b> * <p/> * RECOMMENDED. An opaque value used by the client to maintain * state between the request and callback. The authorization * server includes this value when redirecting the user-agent back * to the client. The parameter SHOULD be used for preventing * cross-site request forgery as described in Section 10.12. * <p/> * <p/> * The client directs the resource owner to the constructed URI using an * HTTP redirection response, or by other means available to it via the * user-agent. * <p/> * For example, the client directs the user-agent to make the following * HTTP request using transport-layer security (extra line breaks are * for display purposes only): * <p/> * <p/> * GET /authorize?response_type=code&client_id=s6BhdRkqt3&state=xyz * &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1 * <p/> * Host: server.example.com */ @LOGIC( @NOTE("By nature, we don't want this object to be modified after construction.")) public final class OAuthAuthorizationRequest { /** * REQUIRED. Value MUST be set to "code". */ final private String response_type; /** * REQUIRED. The client identifier as described in Section 2.2. */ final private String client_id; /** * OPTIONAL, as described in Section 3.1.2. */ final private String redirect_uri; /** * OPTIONAL. The scope of the access request as described by */ final private String scope; /** * RECOMMENDED. An opaque value used by the client to maintain * state between the request and callback. The authorization * server includes this value when redirecting the user-agent back * to the client. The parameter SHOULD be used for preventing * cross-site request forgery as described in Section 10.12. */ final private String state; @LOGIC( @NOTE("By nature, we don't want this object to be modified after construction.")) public OAuthAuthorizationRequest(final String response_type, final String client_id, final String redirect_uri, final String scope, final String state) { this.response_type = response_type != null ? response_type : ""; this.client_id = client_id != null ? client_id : ""; this.redirect_uri = redirect_uri != null ? redirect_uri : ""; this.scope = scope != null ? scope : ""; this.state = state != null ? state : ""; } @Override public String toString() { return "OAuthAuthorizationRequest{" + "response_type='" + response_type + '\'' + ", client_id='" + client_id + '\'' + ", redirect_uri='" + redirect_uri + '\'' + ", scope='" + scope + '\'' + ", state='" + state + '\'' + '}'; } } @LOGIC( @NOTE("By nature, we don't want this object to be modified after construction.")) public final class OAuthAuthorizationResponse { /** * REQUIRED. The authorization code generated by the * authorization server. The authorization code MUST expire * shortly after it is issued to mitigate the risk of leaks. A * maximum authorization code lifetime of 10 minutes is * RECOMMENDED. The client MUST NOT use the authorization code * more than once. If an authorization code is used more than * once, the authorization server MUST deny the request and SHOULD * attempt to revoke all tokens previously issued based on that * authorization code. The authorization code is bound to the * client identifier and redirection URI. */ final private String code; /** * REQUIRED if the "state" parameter was present in the client * authorization request. The exact value received from the * client. */ final private String state; public OAuthAuthorizationResponse(final String code, final String state) { this.code = code != null ? code : ""; this.state = state != null ? state : ""; } @Override public String toString() { return "OAuthAuthorizationResponse{" + "code='" + code + '\'' + ", state='" + state + '\'' + '}'; } } /** * The client makes a request to the token endpoint by adding the * following parameters using the "application/x-www-form-urlencoded" * format in the HTTP request entity-body: * <p/> * grant_type * REQUIRED. Value MUST be set to "authorization_code". * code * REQUIRED. The authorization code received from the * authorization server. * redirect_uri * REQUIRED, if the "redirect_uri" parameter was included in the * authorization request as described in Section 4.1.1, and their * values MUST be identical. * <p/> * If the client type is confidential or the client was issued client * credentials (or assigned other authentication requirements), the * client MUST authenticate with the authorization server as described * in Section 3.2.1. * <p/> * For example, the client makes the following HTTP request using * transport-layer security (extra line breaks are for display purposes * only): * <p/> * <p/> * POST /token HTTP/1.1 * Host: server.example.com * Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW * Content-Type: application/x-www-form-urlencoded;charset=UTF-8 * <p/> * grant_type=authorization_code&code=SplxlOBeZQQYbYS6WxSbIA * &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb */ public final class OAuthAccessTokenRequest { /** * REQUIRED. Value MUST be set to "authorization_code". */ private final String grant_type; /** * REQUIRED. The authorization code received from the * authorization server. */ private final String code; /** * REQUIRED, if the "redirect_uri" parameter was included in the * authorization request as described in Section 4.1.1, and their * values MUST be identical. */ private final String redirect_uri; @LOGIC( @NOTE("By nature, we don't want this object to be modified after construction.")) public OAuthAccessTokenRequest(final String grant_type, final String code, final String redirect_uri) { this.grant_type = grant_type != null ? grant_type : ""; this.code = code != null ? code : ""; this.redirect_uri = redirect_uri != null ? redirect_uri : ""; } @Override public String toString() { return "OAuthAccessTokenRequest{" + "grant_type='" + grant_type + '\'' + ", code='" + code + '\'' + ", redirect_uri='" + redirect_uri + '\'' + '}'; } } /** * If the access token request is valid and authorized, the * authorization server issues an access token and optional refresh * token as described in Section 5.1. If the request client * authentication failed or is invalid, the authorization server returns * an error response as described in Section 5.2. * <p/> * An example successful response: * <p/> * <p/> * HTTP/1.1 200 OK * Content-Type: application/json;charset=UTF-8 * Cache-Control: no-store * Pragma: no-cache * <p/> * { * "access_token":"2YotnFZFEjr1zCsicMWpAA", * "token_type":"example", * "expires_in":3600, * "refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA", * "example_parameter":"example_value" * } */ public final class OAuthAccessTokenResponse { /** * REQUIRED. The access token issued by the authorization server. */ final private String access_token; /** * REQUIRED. The type of the token issued as described in Section 7.1. Value is case insensitive. */ final private String token_type; /** * OPTIONAL. The lifetime in seconds of the access token. For * example, the value "3600" denotes that the access token will * expire in one hour from the time the response was generated. */ final private String expires_in; /** * OPTIONAL. The refresh token which can be used to obtain new * access tokens using the same authorization grant as described * in Section 6. */ final private String refresh_token; /** * OPTIONAL. The scope of the access token as described by */ final private String parameters; @LOGIC( @NOTE("By nature, we don't want this object to be modified after construction.")) public OAuthAccessTokenResponse(final String access_token, final String token_type, final String expires_in, final String refresh_token, final String parameters) { this.access_token = access_token != null ? access_token : ""; this.token_type = token_type != null ? token_type : ""; this.expires_in = expires_in != null ? expires_in : ""; this.refresh_token = refresh_token != null ? refresh_token : ""; this.parameters = parameters != null ? parameters : ""; } } /** * Handles the HTTP <code>GET</code> method. * * @param request servlet request * @param response servlet response * @throws javax.servlet.ServletException if a servlet-specific error occurs * @throws java.io.IOException if an I/O error occurs */ @Override protected void doGet(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { processRequest(request, response); } @LOGIC( @NOTE( "As of now, this method is where we get a redirect from the OAuth server with the related data below: (<a href='http://tools.ietf.org/html/draft-ietf-oauth-v2-22 " code\n" + " REQUIRED. The authorization code generated by the\n" + " authorization server. The authorization code MUST expire\n" + " shortly after it is issued to mitigate the risk of leaks. A\n" + " maximum authorization code lifetime of 10 minutes is\n" + " RECOMMENDED. The client MUST NOT use the authorization code\n" + " more than once. If an authorization code is used more than\n" + " once, the authorization server MUST deny the request and SHOULD\n" + " attempt to revoke all tokens previously issued based on that\n" + " authorization code. The authorization code is bound to the\n" + " client identifier and redirection URI.\n" + " state\n" + " REQUIRED if the \"state\" parameter was present in the client\n" + " authorization request. The exact value received from the")) abstract void processRequest(final HttpServletRequest request, final HttpServletResponse response); /** * Handles the HTTP <code>POST</code> method. * * @param request servlet request * @param response servlet response * @throws ServletException if a servlet-specific error occurs * @throws IOException if an I/O error occurs */ @Override protected void doPost(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { processRequest(request, response); } }
package at.aau.dwaspgui.debugger; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.function.Consumer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import at.aau.GringoWrapper; import at.aau.Rule; import at.aau.dwaspgui.app.config.ApplicationPreferences; import at.aau.dwaspgui.debugger.protocol.Message; import at.aau.dwaspgui.debugger.protocol.MessageParsingException; import at.aau.dwaspgui.debugger.protocol.ReadableMessage; import at.aau.dwaspgui.debugger.protocol.assertion.AssertionMessage; import at.aau.dwaspgui.debugger.protocol.info.ProgramCoherentInfoMessage; import at.aau.dwaspgui.debugger.protocol.request.RequestMessage; import at.aau.dwaspgui.debugger.protocol.request.RequestMessage.RequestType; import at.aau.dwaspgui.debugger.protocol.response.CoreResponseMessage; import at.aau.dwaspgui.debugger.protocol.response.QueryResponseMessage; import at.aau.dwaspgui.domain.CoreItem; import at.aau.dwaspgui.domain.Encoding; import at.aau.dwaspgui.domain.QueryAnswer; import at.aau.dwaspgui.domain.TestCase; import at.aau.dwaspgui.util.Messages; import at.aau.grounder.GroundingException; import at.aau.postprocessing.PostprocessingException; import javafx.beans.property.BooleanProperty; import javafx.beans.property.SimpleBooleanProperty; /** * DWASP-{@link Debugger} implementation. * @author Philip Gasteiger */ public class DebuggerImpl implements Debugger { private static final Logger log = LoggerFactory.getLogger(DebuggerImpl.class); private List<Consumer<List<CoreItem>>> coreCallbacks = new ArrayList<Consumer<List<CoreItem>>>(); private List<Consumer<List<String>>> queryCallbacks = new ArrayList<Consumer<List<String>>>(); private List<Consumer<List<String>>> coherentCallbacks = new ArrayList<Consumer<List<String>>>(); private BooleanProperty isRunning = new SimpleBooleanProperty(false); private BooleanProperty isComputingCore = new SimpleBooleanProperty(false); private BooleanProperty isComputingQuery = new SimpleBooleanProperty(false); private Process debugger = null; private Collection<Encoding> currentProgram = null; private Map<String, Rule> debugRuleMap = null; private ExecutorService messageReaderExecutor = null; private ExecutorService debuggerExecutor = null; private static final String DEBUGGER_OPTION_GUI = "--debug-gui"; private static final String DEBUGGER_OPTION_INPUT_FILE = "--debug="; private static final String GRINGO_WRAPPER_OPTIONS = ""; private static final String GRINGO_WRAPPER_DEBUGCONSTANT = "_debug"; @Override public void startDebugger(Collection<Encoding> program, TestCase testCase) throws DebuggerException { // guard: do not start the debugger twice if (isRunning.get()) return; isRunning.set(true); currentProgram = program; debuggerExecutor = Executors.newSingleThreadExecutor(); messageReaderExecutor = Executors.newSingleThreadExecutor(); try { File debugFile = File.createTempFile(".dbg", "debug"); debugFile.deleteOnExit(); groundProgram(program, testCase, debugFile.getAbsolutePath()); startDebugger(debugFile.getAbsolutePath()); } catch (IOException e) { log.error("Could not create a temporary file for the debugger", e); throw new DebuggerException(Messages.ERROR_GROUNDING.format()); } } private void groundProgram(Collection<Encoding> program, TestCase testCase, String filename) throws DebuggerException { StringBuilder inputProgram = new StringBuilder(); for (Encoding encoding : program) { inputProgram.append(encoding.getContent()); } inputProgram.append(testCase.getAssertions()); GringoWrapper wrapper = new GringoWrapper(ApplicationPreferences.COMMAND_GROUNDER.get(), GRINGO_WRAPPER_OPTIONS, GRINGO_WRAPPER_DEBUGCONSTANT, false, false); try { debugRuleMap = new HashMap<String, Rule>(); String groundedProgram = wrapper.ground(inputProgram.toString(), true, debugRuleMap); Files.write(Paths.get(filename), groundedProgram.getBytes()); } catch (GroundingException | PostprocessingException | IOException e) { log.error("Could not ground the logic program.", e); throw new DebuggerException(Messages.ERROR_GROUNDING.format(), e); } } private void startDebugger(String filename) throws DebuggerException { ProcessBuilder builder = new ProcessBuilder(ApplicationPreferences.COMMAND_DEBUGGER.get(), DEBUGGER_OPTION_GUI, DEBUGGER_OPTION_INPUT_FILE + filename); try { debugger = builder.start(); messageReaderExecutor.execute(messageReader); notifyCores(); getQuery(); } catch (IOException e) { log.error("Could not start the debugger.", e); throw new DebuggerException(Messages.ERROR_START_DEBUGGER.format(), e); } } private final Runnable messageReader = () -> { while(isRunning.get()) { try{ ReadableMessage msg = Message.parseFromInputStream(debugger.getInputStream()); if (msg instanceof CoreResponseMessage) { CoreResponseMessage response = (CoreResponseMessage) msg; coreCallbacks.forEach(c -> c.accept(response.getCoreItems(debugRuleMap, currentProgram))); } else if (msg instanceof QueryResponseMessage) { QueryResponseMessage response = (QueryResponseMessage) msg; queryCallbacks.forEach(c -> c.accept(response.getAtoms().subList(0, response.getAtoms().size() > 9 ? 9 : response.getAtoms().size()))); } else if (msg instanceof ProgramCoherentInfoMessage) { ProgramCoherentInfoMessage info = (ProgramCoherentInfoMessage) msg; coherentCallbacks.forEach(c -> c.accept(info.getAnswerSet())); stopDebugger(); } } catch (MessageParsingException e) { log.error("Could not parse the core response from the debugger.", e); } } }; private Runnable coreRequest = () -> { try { RequestMessage request = new RequestMessage(RequestType.GET_CORE); request.writeToOutputStream(debugger.getOutputStream()); } catch (IOException e) { log.error("Could not write the core request to the debugger.", e); } }; private Runnable queryRequest = () -> { try{ RequestMessage request = new RequestMessage(RequestType.GET_QUERY); request.writeToOutputStream(debugger.getOutputStream()); } catch (IOException e) { log.error("Could not write the query request to the debugger.", e); } }; private void notifyCores() { debuggerExecutor.execute(coreRequest); } private void getQuery() { debuggerExecutor.execute(queryRequest); } @Override public void assertAtoms(Map<String, QueryAnswer> assertions) { debuggerExecutor.execute(() -> { try { AssertionMessage msg = new AssertionMessage(assertions); msg.writeToOutputStream(debugger.getOutputStream()); notifyCores(); getQuery(); } catch (Exception e) { log.error("Could not write the assertion request to the debugger"); } }); } @Override public void stopDebugger() { // guard: debugger must be running if (!isRunning.get()) return; isRunning.set(false); currentProgram = null; debugRuleMap = null; if (debuggerExecutor != null) { debuggerExecutor.shutdownNow(); debuggerExecutor = null; } if (messageReaderExecutor != null) { messageReaderExecutor.shutdownNow(); messageReaderExecutor = null; } coreCallbacks.forEach(c -> c.accept(Collections.emptyList())); queryCallbacks.forEach(c -> c.accept(Collections.emptyList())); if (debugger != null && debugger.isAlive()) debugger.destroy(); } @Override public void registerCoreCallback(Consumer<List<CoreItem>> callback) { coreCallbacks.add(callback); } @Override public void registerQueryCallback(Consumer<List<String>> callback) { queryCallbacks.add(callback); } @Override public void registerCoherentCallback(Consumer<List<String>> callback) { coherentCallbacks.add(callback); } @Override public BooleanProperty isRunning() { return isRunning; } @Override public BooleanProperty isComputingCore() { return isComputingCore; } @Override public BooleanProperty isComputingQuery() { return isComputingQuery; } }
package cat.nyaa.nyaautils.elytra; import cat.nyaa.nyaacore.utils.InventoryUtils; import cat.nyaa.nyaautils.I18n; import cat.nyaa.nyaautils.NyaaUtils; import org.bukkit.ChatColor; import org.bukkit.Material; import org.bukkit.entity.Player; import org.bukkit.inventory.ItemStack; import org.bukkit.inventory.meta.ItemMeta; import java.util.ArrayList; import java.util.List; import java.util.Random; public class FuelManager { private final NyaaUtils plugin; public String lore_prefix = ChatColor.translateAlternateColorCodes('&', "&r&9&e&a&1&4&0&2&r"); public FuelManager(NyaaUtils pl) { plugin = pl; } public int getFuelAmount(Player player, boolean exact) { int fuel = 0; if (InventoryUtils.hasItem(player, plugin.cfg.fuelConfig.elytra_fuel, 1)) { fuel = InventoryUtils.getAmount(player, plugin.cfg.fuelConfig.elytra_fuel); } for (int i = 0; i <= player.getInventory().getSize(); i++) { if (!exact && fuel > plugin.cfg.elytra_fuel_notify) { return fuel; } ItemStack item = player.getInventory().getItem(i); int fuelID = getFuelID(item); if (fuelID != -1 && plugin.cfg.fuelConfig.fuel.containsKey(fuelID)) { fuel += getFuelDurability(item); } } return fuel; } public boolean useFuel(Player player) { if (plugin.cfg.fuelConfig.elytra_fuel != null && plugin.cfg.fuelConfig.elytra_fuel.getType() != Material.AIR) { if (InventoryUtils.removeItem(player, plugin.cfg.fuelConfig.elytra_fuel, 1)) { return true; } } for (int i = 0; i <= player.getInventory().getSize(); i++) { ItemStack item = player.getInventory().getItem(i); int fuelID = getFuelID(item); if (fuelID != -1 && getFuel(fuelID) != null) { int durability = getFuelDurability(item); FuelItem fuel = getFuel(fuelID); if (durability > fuel.getMaxDurability()) { durability = fuel.getMaxDurability(); } durability if (durability <= 0) { player.getInventory().setItem(i, new ItemStack(Material.AIR)); } else { updateItem(item, fuelID, durability); } return true; } } return false; } public void updateItem(ItemStack item, int fuelID, int durability) { FuelItem fuel = plugin.cfg.fuelConfig.fuel.get(fuelID); if (fuel == null) { return; } String hex = toHexString(fuelID) + toHexString(durability) + toHexString(new Random().nextInt(65535)); String str = ""; for (int i = 0; i < hex.length(); i++) { str += ChatColor.COLOR_CHAR + hex.substring(i, i + 1); } str += ChatColor.COLOR_CHAR + "r"; ItemMeta meta = fuel.getItem().getItemMeta(); List<String> lore; if (meta.hasLore()) { lore = meta.getLore(); lore.set(0, lore_prefix + str + lore.get(0)); lore.add(lore_prefix + I18n.format("user.elytra_enhance.fuel_durability", durability, fuel.getMaxDurability())); } else { lore = new ArrayList<>(); lore.add(lore_prefix + str + I18n.format("user.elytra_enhance.fuel_durability", durability, fuel.getMaxDurability())); } item.setType(fuel.getItem().getType()); item.setDurability(fuel.getItem().getDurability()); item.setData(fuel.getItem().getData()); meta.setLore(lore); item.setItemMeta(meta); } public int getFuelID(ItemStack item) { if (item != null && !item.getType().equals(Material.AIR) && item.hasItemMeta() && item.getItemMeta().hasLore()) { String lore = item.getItemMeta().getLore().get(0); if (lore != null && lore.length() >= (lore_prefix.length() + 24 + 2) && lore.startsWith(lore_prefix)) { try { return Integer.parseInt(lore.substring(lore_prefix.length(), lore_prefix.length() + 8).replaceAll(String.valueOf(ChatColor.COLOR_CHAR), ""), 16); } catch (NumberFormatException e) { return -1; } } } return -1; } public int getFuelDurability(ItemStack item) { if (item != null && !item.getType().equals(Material.AIR) && item.hasItemMeta() && item.getItemMeta().hasLore()) { String lore = item.getItemMeta().getLore().get(0); if (lore != null && lore.length() >= (lore_prefix.length() + 24 + 2) && lore.contains(lore_prefix)) { try { return Integer.parseInt(lore.substring(lore_prefix.length() + 8, lore_prefix.length() + 16).replaceAll(String.valueOf(ChatColor.COLOR_CHAR), ""), 16); } catch (NumberFormatException e) { return -1; } } } return -1; } public FuelItem getFuel(int fuelID) { if (plugin.cfg.fuelConfig.fuel.containsKey(fuelID)) { return plugin.cfg.fuelConfig.fuel.get(fuelID); } return null; } public String toHexString(int i) { String string = Integer.toHexString(i); if (string.length() < 4) { return "0000".substring(0, 4 - string.length()) + string; } return string; } }
package cn.com.lemon.common.connection; import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; /** * Static utility methods pertaining to {@code Connections} primitives. * <p> * The base utility contain basic operate by {@code newInstance} and * {@code close} * * @author shellpo shih * @version 1.0 */ public final class Mysqls { private Mysqls() { } public static final String url = "jdbc:mysql://192.168.2.88:3306/tas?useUnicode=true&amp;characterEncoding=UTF-8"; public static final String name = "com.mysql.jdbc.Driver"; public static final String user = "test"; public static final String password = "1234"; private static ThreadLocal<Connection> connectthreadLocal = new ThreadLocal<Connection>(); /** * Create the {@code Connection} * * @param url * @param name * @param user * @param password * @return * @return {@code Connection} the mysql connection */ public static Connection newInstance(String url, String name, String user, String password) { Connection connect = connectthreadLocal.get(); if (connect == null) { try { Class.forName(name); connect = DriverManager.getConnection(url, user, password); connectthreadLocal.set(connect); } catch (Exception e) { e.printStackTrace(); return null; } } return connect; } /** * Create the {@code Connection} * * @return {@code Connection} the mysql connection */ public static Connection newInstance() { Connection connect = connectthreadLocal.get(); if (connect == null) { try { Class.forName(name); connect = DriverManager.getConnection(url, user, password); connectthreadLocal.set(connect); } catch (Exception e) { e.printStackTrace(); return null; } } return connect; } /** * Close the database {@code Connection} * * @return {@code Boolean} */ public static boolean close(Connection conn) { try { if (!conn.isClosed()) { conn.close(); return true; } else { return true; } } catch (SQLException e) { e.printStackTrace(); return false; } } }
package com.beamfield.tc2.utils; import cpw.mods.fml.common.Loader; import cpw.mods.fml.common.ModAPIManager; public class ModHelperBase { public static boolean useCofh; public void register() {} public static void detectMods() { useCofh = Loader.isModLoaded("CoFHCore"); } }
package com.civilizer.web.view; import java.io.File; import java.io.Serializable; import org.apache.commons.io.FileUtils; import org.apache.commons.io.FilenameUtils; import org.primefaces.model.UploadedFile; @SuppressWarnings("serial") public final class FileUploadBean implements Serializable { private UploadedFile file; public UploadedFile getFile() { return file; } public void setFile(UploadedFile file) { this.file = file; } public String getFileName() { return FilenameUtils.getName(file.getFileName()); } public boolean saveFile(String path) { boolean result = false; try { FileUtils.writeByteArrayToFile(new File(path), this.file.getContents()); result = true; } catch (Exception e) { e.printStackTrace(); } return result; } }
package com.codeborne.selenide.impl; import com.codeborne.selenide.Selenide; import com.codeborne.selenide.logevents.SelenideLog; import com.codeborne.selenide.logevents.SelenideLogger; import org.openqa.selenium.JavascriptExecutor; import org.openqa.selenium.WebDriver; import org.openqa.selenium.WebDriverException; import org.openqa.selenium.security.UserAndPassword; import java.net.URL; import java.util.logging.Logger; import static com.codeborne.selenide.Configuration.baseUrl; import static com.codeborne.selenide.WebDriverRunner.*; import static com.codeborne.selenide.logevents.LogEvent.EventStatus.PASS; public class Navigator { private static final Logger log = Logger.getLogger(Navigator.class.getName()); public void open(String relativeOrAbsoluteUrl) { open(relativeOrAbsoluteUrl, "", "" , ""); } public void open(URL url) { open(url, "", "" , ""); } public void open(String relativeOrAbsoluteUrl, String domain, String login, String password) { if (relativeOrAbsoluteUrl.startsWith("http:") || relativeOrAbsoluteUrl.startsWith("https:") || isLocalFile(relativeOrAbsoluteUrl)) { navigateToAbsoluteUrl(relativeOrAbsoluteUrl, domain, login, password); } else { navigateToAbsoluteUrl(absoluteUrl(relativeOrAbsoluteUrl), domain, login, password); } } public void open(URL url, String domain, String login, String password) { navigateToAbsoluteUrl(url.toExternalForm()); } protected String absoluteUrl(String relativeUrl) { return baseUrl + relativeUrl; } protected void navigateToAbsoluteUrl(String url) { navigateToAbsoluteUrl(url, "", "", ""); } protected void navigateToAbsoluteUrl(String url, String domain, String login, String password) { if (isIE() && !isLocalFile(url)) { url = makeUniqueUrlToAvoidIECaching(url, System.nanoTime()); if (!domain.isEmpty()) domain += "\\"; } else { if (!domain.isEmpty()) domain += "%5C"; if (!login.isEmpty()) login += ":"; if (!password.isEmpty()) password += "@"; int idx1 = url.indexOf(": url = (idx1 < 3 ? "" : (url.substring(0, idx1 - 3) + ": + domain + login + password + (idx1 < 3 ? url : url.substring(idx1)); } SelenideLog log = SelenideLogger.beginStep("open", url); try { WebDriver webdriver = getAndCheckWebDriver(); webdriver.navigate().to(url); if (isIE() && !"".equals(login)) Selenide.switchTo().alert().authenticateUsing(new UserAndPassword(domain + login, password)); collectJavascriptErrors((JavascriptExecutor) webdriver); SelenideLogger.commitStep(log, PASS); } catch (WebDriverException e) { SelenideLogger.commitStep(log, e); e.addInfo("selenide.url", url); e.addInfo("selenide.baseUrl", baseUrl); throw e; } catch (RuntimeException e) { SelenideLogger.commitStep(log, e); throw e; } catch (Error e) { SelenideLogger.commitStep(log, e); throw e; } } protected void collectJavascriptErrors(JavascriptExecutor webdriver) { try { webdriver.executeScript( "if (!window._selenide_jsErrors) {\n" + " window._selenide_jsErrors = [];\n" + "}\n" + "if (!window.onerror) {\n" + " window.onerror = function (errorMessage, url, lineNumber) {\n" + " var message = errorMessage + ' at ' + url + ':' + lineNumber;\n" + " window._selenide_jsErrors.push(message);\n" + " return false;\n" + " };\n" + "}\n" ); } catch (UnsupportedOperationException cannotExecuteJsAgainstPlainTextPage) { log.warning(cannotExecuteJsAgainstPlainTextPage.toString()); } catch (WebDriverException cannotExecuteJs) { log.severe(cannotExecuteJs.toString()); } } protected String makeUniqueUrlToAvoidIECaching(String url, long unique) { if (url.contains("timestamp=")) { return url.replaceFirst("(.*)(timestamp=)(.*)([&#].*)", "$1$2" + unique + "$4") .replaceFirst("(.*)(timestamp=)(.*)$", "$1$2" + unique); } else { return url.contains("?") ? url + "&timestamp=" + unique : url + "?timestamp=" + unique; } } protected boolean isLocalFile(String url) { return url.startsWith("file:"); } public void back() { getWebDriver().navigate().back(); } public void forward() { getWebDriver().navigate().forward(); } }
package com.gamingmesh.jobs; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.bukkit.Bukkit; import org.bukkit.World; import org.bukkit.entity.Player; import org.bukkit.permissions.PermissionAttachmentInfo; import com.gamingmesh.jobs.container.Job; import com.gamingmesh.jobs.container.JobsPlayer; import net.Zrips.CMILib.Logs.CMIDebug; public class PermissionManager { private final Map<String, Integer> permDelay = new HashMap<>(); private enum prm { // jobs_join_JOBNAME(remade("jobs.join.%JOBNAME%"), 60 * 1000), jobs_use(remade("jobs.use"), 2), jobs_paycreative(remade("jobs.paycreative"), 2), // jobs_boost_JOBNAME_money(remade("jobs.boost.%JOBNAME%.money"), 60 * 1000), // jobs_boost_JOBNAME_exp(remade("jobs.boost.%JOBNAME%.exp"), 60 * 1000), // jobs_boost_JOBNAME_points(remade("jobs.boost.%JOBNAME%.points"), 60 * 1000), // jobs_boost_JOBNAME_all(remade("jobs.boost.%JOBNAME%.all"), 60 * 1000), // jobs_leave_JOBNAME(remade("jobs.leave.%JOBNAME%"), 60 * 1000), jobs_boost_JOBNAME_money_AMOUNT(remade("jobs.boost.%JOBNAME%.money.%AMOUNT%"), 60), jobs_boost_JOBNAME_exp_AMOUNT(remade("jobs.boost.%JOBNAME%.exp.%AMOUNT%"), 60), jobs_boost_JOBNAME_points_AMOUNT(remade("jobs.boost.%JOBNAME%.points.%AMOUNT%"), 60), jobs_boost_JOBNAME_all_AMOUNT(remade("jobs.boost.%JOBNAME%.all.%AMOUNT%"), 60), jobs_boost_all_money_AMOUNT(remade("jobs.boost.all.money.%AMOUNT%"), 60), jobs_boost_all_exp_AMOUNT(remade("jobs.boost.all.exp.%AMOUNT%"), 60), jobs_boost_all_points_AMOUNT(remade("jobs.boost.all.points.%AMOUNT%"), 60), jobs_boost_all_all_AMOUNT(remade("jobs.boost.all.all.%AMOUNT%"), 60), jobs_spawner_AMOUNT(remade("jobs.nearspawner.%AMOUNT%"), 60), jobs_petpay_AMOUNT(remade("jobs.petpay.%AMOUNT%"), 60), jobs_maxfurnaces_AMOUNT(remade("jobs.maxfurnaces.%AMOUNT%"), 2), jobs_maxblastfurnaces_AMOUNT(remade("jobs.maxblastfurnaces.%AMOUNT%"), 2), jobs_maxsmokers_AMOUNT(remade("jobs.maxsmokers.%AMOUNT%"), 2), jobs_maxbrewingstands_AMOUNT(remade("jobs.maxbrewingstands.%AMOUNT%"), 2), jobs_world_WORLDNAME(remade("jobs.world.%WORLDNAME%"), 2); private int reload; private List<String> perms; prm(List<String> perms, int reload) { this.perms = perms; this.reload = reload * 1000; } public int getDelay() { return reload; } private static List<String> remade(String perm) { List<String> perms = new ArrayList<>(); for (Job oneJ : Jobs.getJobs()) { String t = perm; if (t.contains("%JOBNAME%")) t = t.replace("%JOBNAME%", oneJ.getName().toLowerCase()); t = t.replace("%AMOUNT%", ""); perms.add(t); } if (perm.contains("%WORLDNAME%")) for (World oneJ : Bukkit.getWorlds()) { perms.add(perm.replace("%WORLDNAME%", oneJ.getName().toLowerCase())); } return perms; } public List<String> getPerms() { return perms; } } private int getDelay(String perm) { return permDelay.getOrDefault(perm, 1); } public PermissionManager() { for (prm one : prm.values()) { for (String oneP : one.getPerms()) { permDelay.put(oneP, one.getDelay()); } } } private static Map<String, Boolean> getAll(Player player) { Map<String, Boolean> mine = new HashMap<>(); for (PermissionAttachmentInfo permission : player.getEffectivePermissions()) { if (permission.getPermission().startsWith("jobs.")) mine.put(permission.getPermission(), permission.getValue()); } return mine; } public double getMaxPermission(JobsPlayer jPlayer, String perm) { return getMaxPermission(jPlayer, perm, false, false); } public double getMaxPermission(JobsPlayer jPlayer, String perm, boolean force) { return getMaxPermission(jPlayer, perm, force, false); } public double getMaxPermission(JobsPlayer jPlayer, String perm, boolean force, boolean cumulative) { if (jPlayer == null) return 0D; Player player = jPlayer.getPlayer(); if (player == null) return 0D; perm = perm.toLowerCase(); if (!perm.endsWith(".")) perm += "."; Map<String, Boolean> permissions = jPlayer.getPermissionsCache(); if (force || permissions == null || getDelay(perm) + jPlayer.getLastPermissionUpdate() < System.currentTimeMillis()) { if (permissions == null) { permissions = getAll(player); } else { permissions.clear(); permissions.putAll(getAll(player)); } jPlayer.setPermissionsCache(permissions); jPlayer.setLastPermissionUpdate(System.currentTimeMillis()); } double amount = Double.NEGATIVE_INFINITY; for (Map.Entry<String, Boolean> permission : permissions.entrySet()) { if (!permission.getKey().startsWith(perm) || !permission.getValue()) continue; try { double temp = Double.parseDouble(permission.getKey().replace(perm, "")); if (cumulative) amount += temp; else if (temp > amount) amount = temp; } catch (NumberFormatException ex) { Jobs.getPluginLogger().log(java.util.logging.Level.WARNING, ex.getLocalizedMessage()); } } return amount == Double.NEGATIVE_INFINITY ? 0D : amount; } public boolean hasPermission(JobsPlayer jPlayer, String perm) { if (jPlayer == null) return false; Player player = jPlayer.getPlayer(); if (player == null) return false; Map<String, Boolean> permissions = jPlayer.getPermissionsCache(); if (permissions == null || getDelay(perm) + jPlayer.getLastPermissionUpdate() < System.currentTimeMillis()) { if (permissions == null) { permissions = new HashMap<>(); jPlayer.setPermissionsCache(permissions); } permissions.put(perm, player.hasPermission(perm)); jPlayer.setLastPermissionUpdate(System.currentTimeMillis()); } return permissions.getOrDefault(perm, false); } }
package com.github.joschi.jadconfig; import com.github.joschi.jadconfig.converters.NoConverter; import com.github.joschi.jadconfig.converters.StringConverter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; import java.util.LinkedList; import java.util.List; /** * The main class for JadConfig. It's responsible for parsing the configuration bean(s) that contain(s) the annotated * fields, use a {@link Repository} to read the raw configuration and assign the fields with the correct values. * <p/> * The configuration bean(s) you pass in the constructor are expected to have one or more {@link Parameter} annotations * on them. * <p/> * You can pass either a single configuration bean or an array of objects. In the case of an array JadConfig will * collect the {@link Parameter} annotations from all the objects passed in. * * @author jschalanda */ public class JadConfig { private static final Logger LOG = LoggerFactory.getLogger(JadConfig.class); private LinkedList<ConverterFactory> converterFactories; private List<Object> configurationBeans; private Repository repository; /** * Creates a new (empty) instance of JadConfig. * <p/> * Configuration beans will have to be added with {@link #addConfigurationBean(Object)} and a {@link Repository} * will have to be set with {@link #setRepository(Repository)}. * * @see #addConfigurationBean(Object) */ public JadConfig() { configurationBeans = new ArrayList<Object>(); converterFactories = new LinkedList<ConverterFactory>(); converterFactories.add(new DefaultConverterFactory()); } /** * Creates a new instance of JadConfig backed by the provided {@link Repository} and filling the provided * {@literal configurationBeans}. * * @param repository A {@link Repository} for interfacing with the configuration data * @param configurationBeans One or more objects annotated with JadConfig annotations */ public JadConfig(Repository repository, Object... configurationBeans) { this(); this.configurationBeans.addAll(Arrays.asList(configurationBeans)); this.repository = repository; } /** * Processes the configuration provided by the configured {@link Repository} and filling the provided configuration * beans. * * @throws RepositoryException If an error occurred while reading from the configured {@link Repository} * @throws ValidationException If any parameter couldn't be successfully validated */ public void process() throws RepositoryException, ValidationException { LOG.info("Opening repository {}", repository); repository.open(); for (Object configurationBean : configurationBeans) { LOG.info("Processing configuration bean {}", configurationBean); processClassFields(configurationBean, getAllFields(configurationBean.getClass())); invokeValidatorMethods(configurationBean, getAllMethods(configurationBean.getClass() )); } } private Field[] getAllFields(Class<?> klass) { List<Field> fields = new ArrayList<Field>(); for (Class<?> c = klass; c != null; c = c.getSuperclass()) { fields.addAll(Arrays.asList(c.getDeclaredFields())); } Field[] result = new Field[fields.size()]; return fields.toArray(result); } private Method[] getAllMethods(Class<?> klass) { List<Method> methods = new ArrayList<Method>(); for (Class<?> c = klass; c != null; c = c.getSuperclass()) { methods.addAll(Arrays.asList(c.getDeclaredMethods())); } Method[] result = new Method[methods.size()]; return methods.toArray(result); } private void processClassFields(Object configurationBean, Field[] fields) throws ValidationException { for (Field field : fields) { Parameter parameter = field.getAnnotation(Parameter.class); if (parameter != null) { LOG.debug("Processing field {}", parameter); Object fieldValue = getFieldValue(field, configurationBean); String parameterName = parameter.value(); String parameterValue = repository.read(parameterName); if (parameterValue == null && fieldValue == null && parameter.required()) { LOG.warn("Required parameter {} not found", parameterName); throw new ParameterException("Required parameter \"" + parameterName + "\" not found."); } if (parameterValue != null) { if (parameter.trim()) { LOG.debug("Trimmed parameter value {}", parameterName); parameterValue = Strings.trim(parameterValue); } LOG.debug("Validating parameter {}", parameterName); validateParameter(parameter.validator(), parameterName, parameterValue); LOG.debug("Converting parameter value {}: ", parameterName, parameterValue); fieldValue = convertStringValue(field.getType(), parameter.converter(), parameterValue); } LOG.debug("Setting parameter {} to {}", parameterName, parameterValue); try { field.set(configurationBean, fieldValue); } catch (Exception e) { throw new ParameterException("Couldn't set field " + field.getName(), e); } } } } private Object convertStringValue(Class<?> fieldType, Class<? extends Converter<?>> converterClass, String stringValue) { Converter converter = getConverter(fieldType, converterClass); LOG.debug("Loaded converter class for type {}: {}", fieldType, converter); return converter.convertFrom(stringValue); } private Object getFieldValue(Field field, Object bean) { field.setAccessible(true); try { return field.get(bean); } catch (IllegalAccessException e) { throw new ParameterException("Couldn't obtain value of field " + field.getName(), e); } } private Converter getConverter(Class<?> fieldType, Class<? extends Converter<?>> converterClass) { LOG.debug("Trying to find converter class {} for type {}", converterClass, fieldType); Class<? extends Converter<?>> clazz = converterClass; if (clazz == null || clazz == NoConverter.class) { clazz = findConverter(fieldType); } // Fallback to StringConverter if (clazz == null) { clazz = StringConverter.class; LOG.debug("Using fallback converter: {}", clazz); } try { return clazz.newInstance(); } catch (Exception e) { throw new ParameterException("Couldn't initialize converter class " + clazz.getCanonicalName(), e); } } private void validateParameter(Class<? extends Validator> validatorClass, String name, String value) throws ValidationException { Validator validator; LOG.debug("Validating parameter {} with value {}", name, value); try { validator = validatorClass.newInstance(); } catch (Exception e) { throw new ParameterException("Couldn't initialize validator " + validatorClass.getCanonicalName(), e); } validator.validate(name, value); } private void invokeValidatorMethods(Object configurationBean, Method[] methods) throws ValidationException { for (Method method : methods) { if (method.isAnnotationPresent(ValidatorMethod.class)) { LOG.debug("Invoking validator method {} in {}", method, configurationBean); try { method.invoke(configurationBean); } catch (Exception e) { throw new ValidationException("Couldn't run validator method " + method.getName(), e); } } } } private <T> Class<? extends Converter<T>> findConverter(Class<T> clazz) { for (ConverterFactory factory : converterFactories) { Class<? extends Converter<T>> result = factory.getConverter(clazz); if (result != null) { return result; } } return null; } /** * Adds a {@link ConverterFactory} for processing additional types * * @param converterFactory The {@link ConverterFactory} to be added */ public void addConverterFactory(ConverterFactory converterFactory) { converterFactories.addFirst(converterFactory); LOG.info("Added converter factory {}", converterFactory); } /** * Adds a configuration bean annotated with JadConfig annotations. * * @param configurationBean An object annotated with JadConfig annotations */ public void addConfigurationBean(Object configurationBean) { configurationBeans.add(configurationBean); LOG.info("Added configuration bean {}", configurationBean); } /** * Saves all configuration parameters to the configured {@link Repository}. * * @throws RepositoryException If an error occurred while writing to or saving the configured {@link Repository} */ public void save() throws RepositoryException { LOG.info("Saving changed configuration parameters"); for (Object configurationBean : configurationBeans) { LOG.debug("Checking declared fields of {}", configurationBean); for (Field field : configurationBean.getClass().getDeclaredFields()) { Parameter parameter = field.getAnnotation(Parameter.class); LOG.debug("Checking declared field {} of {}", parameter, configurationBean); if (parameter != null) { Object fieldValue = getFieldValue(field, configurationBean); LOG.debug("Retrieved field value {} = {}", field, fieldValue); if (fieldValue != null) { String stringValue = convertFieldValue(field.getType(), parameter.converter(), fieldValue); LOG.debug("Writing {} = {} to repository", parameter.value(), stringValue); repository.write(parameter.value(), stringValue); } } } } LOG.debug("Saving changes to repository {}", repository); repository.save(); } private String convertFieldValue(Class<?> fieldType, Class<? extends Converter<?>> converterClass, Object fieldValue) { Converter converter = getConverter(fieldType, converterClass); LOG.debug("Converting {} to type {} using converter {}", new Object[]{fieldValue, fieldType, converter}); return converter.convertTo(fieldValue); } /** * Set the {@link Repository} to load configuration data from. * * @param repository A {@link Repository} instance */ public void setRepository(Repository repository) { this.repository = repository; } }
package com.github.onsdigital.perkin.api; import com.github.davidcarboni.restolino.framework.Api; import com.github.onsdigital.perkin.helpers.Json; import com.github.onsdigital.perkin.json.FtpInfo; import com.github.onsdigital.perkin.storage.FtpPublisher; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.ws.rs.GET; import java.io.IOException; /** * List files in ftp. */ @Api public class List { private FtpPublisher ftp = new FtpPublisher(); @GET public FtpInfo get(HttpServletRequest request, HttpServletResponse response) throws IOException { FtpInfo ftpInfo = ftp.list(); System.out.println("ftp >>>>>>>> list: " + Json.format(ftpInfo)); return ftpInfo; } }
package com.goodworkalan.mix.builder; import java.io.File; import java.util.ArrayList; import com.goodworkalan.comfort.io.Find; /** * A collection of {@link Find} instances each paired a single directory to * search using the <code>Find</code> instance. * * @author Alan Gutierrez */ public class FindList extends ArrayList<FindList.Entry> { /** Serial version id. */ private static final long serialVersionUID = 1L; /** * A group of files for a particular directory. */ public final static class Entry { /** The file directory. */ private final File directory; /** The criteria for included files. */ private final Find find = new Find(); /** * Create an entry with the given directory. * * @param directory * The file directory. */ public Entry(File directory) { this.directory = directory; } /** * Get the directory. * * @return The directory. */ public File getDirectory() { return directory; } /** * Get the criteria for included files. * * @return The criteria for included files. */ public Find getFind() { return find; } } /** * Add the given directory to the find list paired with an empty * {@link Find} instance. * * @param directory * The directory to add. */ public void addDirectory(File directory) { add(new Entry(directory)); } /** * Apply the include criteria to the associated {@link Find} of the last * directory added to this find list. * * @param include * The include pattern. */ public void addInclude(String include) { get(size() - 1).find.include(include); } /** * Apply the exclude criteria to the associated {@link Find} of the last * directory added to this find list. * * @param exclude * The exclude pattern. */ public void addExclude(String exclude) { get(size() - 1).find.exclude(exclude); } /** * Apply the file type criteria to the find of the last directory added to * this find list. * <p> * This will include only files that are <em>normal</em> files according to * <code>File.isFile</code>. A files is <em>normal</em> if it is not a * directory and satisfies additional system specific criteria. Files * created in Java that are not directories are considered <em>normal</em> * files. */ public void filesOnly() { get(size() - 1).find.filesOnly(); } }
package com.google.sps.data; import java.util.List; import java.util.LinkedList; import java.util.Arrays; import java.io.Serializable; import java.net.URL; import java.util.logging.Logger; import com.google.maps.GeoApiContext; import com.google.maps.PlacesApi; import com.google.maps.NearbySearchRequest; import com.google.maps.model.LatLng; import com.google.maps.model.LocationType; import com.google.maps.model.Photo; import com.google.maps.model.PlaceType; import com.google.maps.model.RankBy; import com.google.maps.model.Geometry; import com.google.maps.model.PlacesSearchResponse; import com.google.maps.model.PlacesSearchResult; import java.util.logging.Logger; import java.util.Iterator; import com.google.appengine.api.datastore.DatastoreService; import com.google.appengine.api.datastore.DatastoreServiceFactory; import com.google.appengine.api.datastore.Entity; import com.google.appengine.api.datastore.PreparedQuery; import com.google.appengine.api.datastore.Query; import com.google.appengine.api.datastore.Query.SortDirection; import com.google.maps.TextSearchRequest; import com.google.api.services.customsearch.model.Search; import com.google.api.services.customsearch.model.Result; import com.google.api.services.customsearch.Customsearch; import com.google.api.services.customsearch.CustomsearchRequestInitializer; import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; import com.google.api.client.json.jackson2.JacksonFactory; import java.security.GeneralSecurityException; import java.io.IOException; import java.util.Map; import java.lang.Integer; import com.google.maps.errors.ApiException; import com.google.maps.model.PlacesSearchResult; /** BusinessesService object representing all businesses * components of the webapp. **/ public class BusinessesService { private List<Listing> allBusinesses; private final String KEY = "REDACTED"; private final static Logger LOGGER = Logger.getLogger(BusinessesService.class.getName()); private final int ALLOWED_SEARCH_REQUESTS = 3; private Listing currentBusiness; private PlacesSearchResult[] similarBusinessesInTheArea; private Iterator<Listing> businesses; private LatLng latLng; private final int minFollowers = 50000; private final String startIndex = "| "; private final String endIndex = "followers"; /** Create a new Businesses instance * @param allBusinesses businesses from SmallCityService **/ public BusinessesService(List<Listing> allBusinesses) { this.allBusinesses = allBusinesses; } public PreparedQuery getBigBusinessFromDatabase(){ DatastoreService datastore = DatastoreServiceFactory.getDatastoreService(); Query query = new Query("BigBusinesses"); PreparedQuery queryOfDatabase = datastore.prepare(query); return queryOfDatabase; } public List<Listing> removeBigBusinessesFromResults(PreparedQuery queryOfDatabase){ businesses = allBusinesses.iterator(); Entity entity; String businessName; while (businesses.hasNext()) { Listing currentBusiness = businesses.next(); Iterator<Entity> bigBusinessEntities = queryOfDatabase.asIterator(); while(bigBusinessEntities.hasNext()) { businessName = (String) bigBusinessEntities.next().getProperty("Business"); if(businessName.equals(currentBusiness.getName())) { businesses.remove(); } } } return allBusinesses; } public List<Listing> getBusinessesFromPlacesApi(MapLocation mapLocation) { latLng = new LatLng(mapLocation.lat, mapLocation.lng); final GeoApiContext context = new GeoApiContext.Builder() .apiKey(KEY) .build(); NearbySearchRequest request = PlacesApi.nearbySearchQuery(context, latLng); try { PlacesSearchResponse response = request.type(PlaceType.STORE) .rankby(RankBy.DISTANCE) .await(); for (int i=0; i<ALLOWED_SEARCH_REQUESTS; i++) { for(PlacesSearchResult place : response.results) { addListingToBusinesses(place); } //Maximum of 2 next token requests allowed if (i < 2) { Thread.sleep(2000); // Required delay before next API request response = PlacesApi .nearbySearchNextPage(context, response.nextPageToken).await(); } } } catch(Exception e) { LOGGER.warning(e.getMessage()); } return allBusinesses; } private void addListingToBusinesses(PlacesSearchResult place) { String name = place.name; String formattedAddress = place.vicinity; Geometry geometry = place.geometry; MapLocation placeLocation = new MapLocation(geometry.location.lat, geometry.location.lng); double rating = place.rating; Photo photos[] = place.photos; String types[] = place.types; allBusinesses.add(new Listing(name, formattedAddress, placeLocation, rating, photos, types)); } public void checkNumberOfLocationsOfBusiness() { GeoApiContext context = new GeoApiContext.Builder() .apiKey(KEY) .build(); businesses = allBusinesses.iterator(); try { while (businesses.hasNext()) { currentBusiness = businesses.next(); TextSearchRequest request = new TextSearchRequest(context) .query(currentBusiness.getName()) .location(latLng) .radius(50000); similarBusinessesInTheArea = request.await().results; if (similarBusinessesInTheArea.length > 1){ checkBusinessThroughLinkedin(currentBusiness.getName()); } } } catch(GeneralSecurityException e) { LOGGER.warning(e.getMessage()); } catch(IOException e) { LOGGER.warning(e.getMessage()); } catch(ApiException e) { LOGGER.warning(e.getMessage()); } catch(InterruptedException e) { LOGGER.warning(e.getMessage()); } } private void checkBusinessThroughLinkedin(String currentBusinessName) throws GeneralSecurityException, IOException { String cx = "REDACTED"; Customsearch cs = new Customsearch.Builder( GoogleNetHttpTransport.newTrustedTransport(), JacksonFactory.getDefaultInstance(), null) .setApplicationName("linkedinSearch") .setGoogleClientRequestInitializer( new CustomsearchRequestInitializer(KEY)) .build(); Customsearch.Cse.List list = cs.cse().list(currentBusinessName).setCx(cx); List<Result> searchJsonResults = list.execute().getItems(); String[] numberOfFollowers; int companyFollowers = 0; if (searchJsonResults!=null && searchJsonResults.size() != 0) { Result linkedinBusiness = searchJsonResults.get(0); String businessDescription = (String) linkedinBusiness.getPagemap() .get("metatags") .get(0) .get("og:description"); if(businessDescription.indexOf(startIndex) != -1 && businessDescription.indexOf(endIndex) != -1){ String followers = businessDescription.substring( businessDescription.indexOf(startIndex) + 2, businessDescription.indexOf(endIndex) - 1); companyFollowers = Integer.parseInt(followers.replaceAll(",", "")); } if (companyFollowers > minFollowers) { addBigBusinessToDatabase(); } else { checkNumberOfSimilarBusinessesInTheArea(currentBusiness.getName()); } } } private void checkNumberOfSimilarBusinessesInTheArea(String businessName){ int countNumberOfMatchingBusiness = 0; int i = 0; System.out.println(currentBusiness.getFormattedAddress()); System.out.println(similarBusinessesInTheArea[i].name); System.out.println(similarBusinessesInTheArea[i]); while (i < similarBusinessesInTheArea.length && countNumberOfMatchingBusiness < 10) { if(similarBusinessesInTheArea[i].vicinity != null){ if (similarBusinessesInTheArea[i].name.contains(businessName) && !(similarBusinessesInTheArea[i].vicinity.equals(currentBusiness.getFormattedAddress()))) { countNumberOfMatchingBusiness++; } } i++; } if (countNumberOfMatchingBusiness >= 10) { addBigBusinessToDatabase(); } } private void addBigBusinessToDatabase(){ businesses.remove(); String title = "Business"; String businessTypes = "BusinessTypes"; String address = "Address"; String rating = "Rating"; String photos = "Photos"; Entity businessEntity = new Entity("BigBusinesses"); DatastoreService datastore = DatastoreServiceFactory.getDatastoreService(); businessEntity.setProperty(title, currentBusiness.getName()); businessEntity.setProperty(address, currentBusiness.getFormattedAddress()); businessEntity.setProperty(rating, currentBusiness.getRating()); businessEntity.setProperty(businessTypes, Arrays.asList(currentBusiness.getBusinessTypes())); datastore.put(businessEntity); } }
package com.jaeksoft.searchlib.cache; import java.io.PrintWriter; import java.text.NumberFormat; import java.util.LinkedHashMap; import java.util.Map; import java.util.TreeMap; import com.jaeksoft.searchlib.SearchLibException; import com.jaeksoft.searchlib.util.ReadWriteLock; import com.jaeksoft.searchlib.util.SimpleLock; import com.jaeksoft.searchlib.util.StringUtils; public abstract class LRUCache<K extends Comparable<K>, V> { final private ReadWriteLock rwl = new ReadWriteLock(); private class EvictionQueue extends LinkedHashMap<K, V> { private static final long serialVersionUID = -2384951296369306995L; protected final SimpleLock lock = new SimpleLock(); protected EvictionQueue(int maxSize) { super(maxSize); } @Override protected boolean removeEldestEntry(Map.Entry<K, V> eldest) { if (size() <= maxSize) return false; tree.remove(eldest.getKey()); evictions++; return true; } final private V promote(K key) { lock.rl.lock(); try { V value = queue.remove(key); queue.put(key, value); return value; } finally { lock.rl.unlock(); } } } private final TreeMap<K, Thread> keyThread; private final TreeMap<K, K> tree; private EvictionQueue queue; private final String name; private volatile int size; private volatile int maxSize; private volatile long evictions; private volatile long lookups; private volatile long hits; private volatile long inserts; protected LRUCache(String name, int maxSize) { this.name = name; queue = (maxSize == 0) ? null : new EvictionQueue(maxSize); tree = new TreeMap<K, K>(); keyThread = new TreeMap<K, Thread>(); this.maxSize = maxSize; this.evictions = 0; this.lookups = 0; this.inserts = 0; this.hits = 0; this.size = 0; } private void setMaxSize_noLock(int newMaxSize) { if (newMaxSize == maxSize) return; if (newMaxSize == 0) { clear_nolock(); queue = null; } else { if (queue == null || newMaxSize < maxSize) queue = new EvictionQueue(maxSize); } maxSize = newMaxSize; } public void setMaxSize(int newMaxSize) { rwl.w.lock(); try { setMaxSize_noLock(newMaxSize); } finally { rwl.w.unlock(); } } final protected void lockKeyThread(K key, int attempt) throws SearchLibException { Thread thread = null; rwl.w.lock(); try { thread = keyThread.get(key); if (thread == null) { keyThread.put(key, Thread.currentThread()); return; } } finally { rwl.w.unlock(); } try { synchronized (thread) { thread.wait(); } } catch (InterruptedException e) { throw new SearchLibException(e); } if (attempt == 0) throw new SearchLibException("Cache lock failed"); lockKeyThread(key, attempt - 1); } final protected void unlockKeyThred(K key) { rwl.w.lock(); try { Thread thread = keyThread.remove(key); if (thread != null) synchronized (thread) { thread.notify(); } Thread ct = Thread.currentThread(); if (ct != thread) synchronized (ct) { ct.notify(); } } finally { rwl.w.unlock(); } } final protected V getAndPromote(K key) { if (queue == null) return null; rwl.r.lock(); try { lookups++; K key2 = tree.get(key); if (key2 == null) return null; hits++; return queue.promote(key2); } finally { rwl.r.unlock(); } } final public void remove(K key) { if (queue == null) return; rwl.w.lock(); try { K key2 = tree.remove(key); queue.remove(key2); evictions++; } finally { rwl.w.unlock(); } } final protected void put(K key, V value) { if (queue == null) return; rwl.w.lock(); try { inserts++; queue.put(key, value); tree.put(key, key); size = queue.size(); } finally { rwl.w.unlock(); } } final private void clear_nolock() { if (queue == null) return; queue.clear(); tree.clear(); size = queue.size(); } final public void clear() { rwl.w.lock(); try { if (queue == null) return; clear_nolock(); } finally { rwl.w.unlock(); } } @Override final public String toString() { rwl.r.lock(); try { return StringUtils .fastConcat(this.getClass().getName(), " - Size: ", Integer.toString(size), " - MaxSize: ", Integer.toString(maxSize), " - Lookup: ", Long.toString(lookups), " HitRatio: ", getHitRatioPercent()); } finally { rwl.r.unlock(); } } final public void xmlInfo(PrintWriter writer) { rwl.r.lock(); try { float hitRatio = getHitRatio(); writer.println("<cache class=\"" + this.getClass().getName() + "\" maxSize=\"" + this.maxSize + "\" size=\"" + size + "\" hitRatio=\"" + hitRatio + "\" lookups=\"" + lookups + "\" hits=\"" + hits + "\" inserts=\"" + inserts + "\" evictions=\"" + evictions + "\">"); writer.println("</cache>"); } finally { rwl.r.unlock(); } } final public String getName() { rwl.r.lock(); try { return name; } finally { rwl.r.unlock(); } } final public int getSize() { rwl.r.lock(); try { return size; } finally { rwl.r.unlock(); } } final public int getMaxSize() { rwl.r.lock(); try { return maxSize; } finally { rwl.r.unlock(); } } final public long getEvictions() { rwl.r.lock(); try { return evictions; } finally { rwl.r.unlock(); } } final public long getLookups() { rwl.r.lock(); try { return lookups; } finally { rwl.r.unlock(); } } final public long getHits() { rwl.r.lock(); try { return hits; } finally { rwl.r.unlock(); } } final public long getInserts() { rwl.r.lock(); try { return inserts; } finally { rwl.r.unlock(); } } final public float getHitRatio() { rwl.r.lock(); try { if (hits > 0 && lookups > 0) return (float) (((float) hits) / ((float) lookups)); else return 0; } finally { rwl.r.unlock(); } } final public String getHitRatioPercent() { return NumberFormat.getPercentInstance().format(getHitRatio()); } }
package com.jillesvangurp.iterables; import java.util.Iterator; import java.util.NoSuchElementException; /** * Collection of static methods to make working with the various iterables a bit nicer. */ public class Iterables { /** * Wraps an iterator with an iterable so that you may use it in a for loop. * @param it any iterator * @return an iterable that may be used with a for loop */ public static <T> Iterable<T> toIterable(final Iterator<T> it) { return new Iterable<T>() { @Override public Iterator<T> iterator() { return it; } }; } /** * @param it * @param filter * @return A filtering iterable that applies the the provided filter. */ public static <S> Iterable<S> filter(Iterable<S> it, Filter<S> filter) { return new FilteringIterable<S>(it, filter); } /** * @param it * @param from * @param to * @return elements between from and to in the wrapped iterator. */ public static <S> Iterable<S> filterRange(Iterable<S> it, final long from, final long to) { return filter(it, new Filter<S>() { long count=0; @Override public boolean passes(S o) { long current = count++; return current >=from && current <=to; } }); } /** * @param it * @param to * @return the elements in the wrapped iterator until element number to */ public static <S> Iterable<S> head(Iterable<S> it, final long to) { return filter(it, new Filter<S>() { long count=0; @Override public boolean passes(S o) { if (count++ <=to) { return true; } else { throw new PermanentlyFailToPassException(); } } }); } /** * @param it * @param from * @return iterable that iterates the elementents from the 'from'th element in the wrapped iterator. */ public static <S> Iterable<S> from(Iterable<S> it, final long from) { return filter(it, new Filter<S>() { long count=0; @Override public boolean passes(S o) { long current = count++; return current >=from; } }); } /** * Implement a map operation that applies a processor to each element in the wrapped iterator and iterates over the resulting output. * @param it * @param processor * @return iteratable over the output of the processor on the input iterator */ public static <I,O> Iterable<O> map(Iterable<I> it, Processor<I,O> processor) { return new ProcessingIterable<I, O>(it.iterator(), processor); } /** * Compose two or more processors into one. * @param first transform into intermediate type * @param last transform intermediate type into output type * @param extraSteps optional, varargs with extra transformation steps on the output type * @return a processor that composes the argumentst */ @SafeVarargs public static <I,S,O> Processor<I,O> compose(final Processor<I,S> first, final Processor<S,O> last, final Processor<O,O>...extraSteps) { return new Processor<I, O>() { @Override public O process(I input) { O result = last.process(first.process(input)); for (Processor<O, O> processor : extraSteps) { result = processor.process(result); } return result; } }; } /** * Process iterable concurrently using the processor. IMPORTANT, you must close the iterable (it implements Closeable) after use otherwise, the process * may never exit. * @param input * @param processor * @param blockSize * @param threadPoolSize * @param queueCapacity * @return a concurrent processing iterable that will process the input iterable concurrently and offer the output as another iterable. */ public static <Input,Output> ConcurrentProcessingIterable<Input, Output> processConcurrently(Iterable<Input> input, Processor<Input,Output> processor, int blockSize, int threadPoolSize, int queueCapacity) { return new ConcurrentProcessingIterable<Input,Output>(input, processor, blockSize, threadPoolSize, queueCapacity); } /** * Given a number of iterables, construct a iterable that iterates all of the iterables. * @param iterables * @return an iterable that can provide a single iterator for all the elements of the iterables. */ public static <V> Iterable<V> compose(final Iterable<Iterable<V>> iterables) { return toIterable(new Iterator<V>() { Iterator<Iterable<V>> it=iterables.iterator(); Iterator<V> current = null; V next = null; @Override public boolean hasNext() { if(next != null) { return true; } else { if((current == null || !current.hasNext()) && it.hasNext()) { while(it.hasNext() && (current == null || !current.hasNext())) { Iterable<V> nextIt = it.next(); if(nextIt != null) { current = nextIt.iterator(); } } } if(current !=null && current.hasNext()) { next = current.next(); return true; } } return false; } @Override public V next() { if(hasNext()) { V result = next; next = null; return result; } else { throw new NoSuchElementException(); } } @Override public void remove() { throw new UnsupportedOperationException("Remove is not supported"); } }); } public static <V> long count(Iterable<V> iterable) { long count = 0; for(@SuppressWarnings("unused") V e:iterable) { count++; } return count; } }
package com.khubla.musicbrainztagger; import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.List; import java.util.Properties; import org.apache.http.client.ClientProtocolException; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonParser; /** * simple wrapper for AcoustID * * @author tom */ public class AcoustID { /** * chromaprint */ public static class ChromaPrint { private final String chromaprint; private final String duration; public String getChromaprint() { return chromaprint; } public String getDuration() { return duration; } public ChromaPrint(String chromaprint, String duration) { this.duration = duration; this.chromaprint = chromaprint; } } /** * recording */ private static class Recording { private final String id; public String getId() { return id; } public Recording(String id) { this.id = id; } } /** * result */ private static class Result { String id; List<Recording> recordings = new ArrayList<Recording>(); String score; } /** * results */ private static class Results { List<Result> results = new ArrayList<Result>(); }; /** * Chromaprint the file passed in */ public static ChromaPrint chromaprint(File file, String fpcalc) throws IOException { final ProcessBuilder processBuilder = new ProcessBuilder(fpcalc, null); processBuilder.redirectErrorStream(true); processBuilder.command().set(1, file.getAbsolutePath()); final Process fpcalcProc = processBuilder.start(); final BufferedReader br = new BufferedReader(new InputStreamReader(fpcalcProc.getInputStream())); String line; String chromaprint = null; String duration = null; while ((line = br.readLine()) != null) { if (line.startsWith("FINGERPRINT=")) { chromaprint = line.substring("FINGERPRINT=".length()); } else if (line.startsWith("DURATION=")) { duration = line.substring("DURATION=".length()); } } return new ChromaPrint(chromaprint, duration); } /** * get the musicbrainz id */ private static AcoustID.Results getResults(String json) { final JsonElement jelement = new JsonParser().parse(json); JsonObject jobject = jelement.getAsJsonObject(); final JsonElement statusElement = jobject.get("status"); if (statusElement.getAsString().compareTo("ok") == 0) { final AcoustID.Results results = new AcoustID.Results(); final JsonArray jarray = jobject.getAsJsonArray("results"); /* * walk the results */ for (int i = 0; i < jarray.size(); i++) { final AcoustID.Result result = new AcoustID.Result(); jobject = jarray.get(i).getAsJsonObject(); result.id = jobject.get("id").getAsString(); result.score = jobject.get("score").getAsString(); final JsonArray recordingsArray = jobject.getAsJsonArray("recordings"); /* * walk the recordings */ for (int j = 0; j < recordingsArray.size(); j++) { final JsonObject recordingJsonObject = recordingsArray.get(j).getAsJsonObject(); String id = recordingJsonObject.get("id").getAsString(); final AcoustID.Recording recording = new AcoustID.Recording(id); result.recordings.add(recording); } results.results.add(result); } return results; } else { return null; } } public static String lookup(ChromaPrint chromaprint) throws ClientProtocolException, IOException { final Properties properties = new Properties(); properties.load(AcoustID.class.getResourceAsStream(PROPERTIES)); final String url = properties.getProperty("url") + "?client=" + properties.getProperty("client") + "&meta=recordingids" + "&fingerprint=" + chromaprint.chromaprint + "&duration=" + chromaprint.duration; final String json = HTTPUtil.get(url); System.out.println(json); final AcoustID.Results results = getResults(json); return results.results.get(0).recordings.get(0).id; } private final static String PROPERTIES = "/acoustid.properties"; }
package com.kryptnostic.rhizome.pods; import java.util.stream.Collectors; import javax.inject.Inject; import org.apache.commons.lang3.StringUtils; import org.apache.spark.SparkConf; import org.apache.spark.sql.SparkSession; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Import; import org.springframework.context.annotation.Profile; import com.datastax.driver.core.Cluster; import com.datastax.driver.core.CodecRegistry; import com.google.common.base.Optional; import com.kryptnostic.rhizome.configuration.RhizomeConfiguration; import com.kryptnostic.rhizome.configuration.cassandra.CassandraConfiguration; import com.kryptnostic.rhizome.configuration.spark.SparkConfiguration; @Configuration @Profile( SparkPod.SPARK_PROFILE ) @Import( CassandraPod.class ) public class SparkPod { public static final String SPARK_PROFILE = "spark"; private static final String CASSANDRA_CONNECTION_FACTORY_PROPERTY = "spark.cassandra.connection.factory"; private static Cluster CLUSTER = null; public static String CASSANDRA_CONNECTION_FACTORY_CLASS = null; @Inject private RhizomeConfiguration rhizomeConfiguration; @Bean public SparkConf sparkConf() { Optional<SparkConfiguration> maybeSparkConfiguration = rhizomeConfiguration.getSparkConfiguration(); Optional<CassandraConfiguration> maybeCassandraConfiguration = rhizomeConfiguration.getCassandraConfiguration(); if ( maybeSparkConfiguration.isPresent() && maybeCassandraConfiguration.isPresent() ) { SparkConfiguration sparkConfiguration = maybeSparkConfiguration.get(); CassandraConfiguration cassandraConfiguration = maybeCassandraConfiguration.get(); CLUSTER = CassandraPod.clusterBuilder( cassandraConfiguration ) .withCodecRegistry( CodecRegistry.DEFAULT_INSTANCE ).build(); StringBuilder sparkMasterUrlBuilder; if ( sparkConfiguration.isLocal() ) { sparkMasterUrlBuilder = new StringBuilder( sparkConfiguration.getSparkMasters().iterator().next() ); } else { sparkMasterUrlBuilder = new StringBuilder( "spark: String sparkMastersAsString = sparkConfiguration.getSparkMasters().stream() .map( master -> master + ":" + Integer.toString( sparkConfiguration.getSparkPort() ) ) .collect( Collectors.joining( "," ) ); sparkMasterUrlBuilder.append( sparkMastersAsString ); } return new SparkConf() .setMaster( sparkMasterUrlBuilder.toString() ) .setAppName( sparkConfiguration.getAppName() ) .set( "spark.sql.warehouse.dir", "file:///" + sparkConfiguration.getWorkingDirectory() ) .set( "spark.cassandra.connection.host", cassandraConfiguration.getCassandraSeedNodes().stream() .map( host -> host.getHostAddress() ).collect( Collectors.joining( "," ) ) ) .set( "spark.cassandra.connection.port", Integer.toString( 9042 ) ) .set( "spark.cassandra.connection.ssl.enabled", String.valueOf( cassandraConfiguration.isSslEnabled() ) ) .setJars( sparkConfiguration.getJarLocations() ); } return null; } @Bean public SparkSession sparkSession() { SparkConf sc = sparkConf(); if ( StringUtils.isNotBlank( CASSANDRA_CONNECTION_FACTORY_CLASS ) ) { sc.set( CASSANDRA_CONNECTION_FACTORY_PROPERTY, CASSANDRA_CONNECTION_FACTORY_CLASS ); } return sc == null ? null : SparkSession.builder().config( sc ).getOrCreate(); } /** * This is hack to allow bootstrapping cluster from cassandra configuration. * * @return A cluster instance as described by the default CassandraConfiguration. Will return null if called before * spring invokes {@code SparkPod#sparkConf()}. */ public static Cluster getCluster() { return CLUSTER; } }
package com.metamx.http.client; import com.google.common.base.Charsets; import com.google.common.base.Supplier; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Multimap; import com.google.common.collect.Multimaps; import com.google.common.util.concurrent.ListenableFuture; import com.metamx.http.client.response.HttpResponseHandler; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBufferFactory; import org.jboss.netty.buffer.HeapChannelBufferFactory; import org.jboss.netty.handler.codec.base64.Base64; import org.jboss.netty.handler.codec.http.HttpHeaders; import org.jboss.netty.handler.codec.http.HttpMethod; import java.net.URL; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collection; import java.util.List; public class RequestBuilder { private static final ChannelBufferFactory factory = HeapChannelBufferFactory.getInstance(); private final HttpClient client; private final HttpMethod method; private final URL url; private final Multimap<String, Object> headers = Multimaps.newListMultimap( Maps.<String, Collection<Object>>newHashMap(), new Supplier<List<Object>>() { @Override public List<Object> get() { return Lists.newArrayList(); } } ); private volatile ChannelBuffer content = null; public RequestBuilder( HttpClient client, HttpMethod method, URL url ) { this.client = client; this.method = method; this.url = url; } public RequestBuilder setHeader(String header, Object value) { headers.replaceValues(header, Arrays.asList(value)); return this; } public RequestBuilder setHeaderValues(String header, Iterable<Object> value) { headers.replaceValues(header, value); return this; } public RequestBuilder addHeader(String header, Object value) { headers.put(header, value); return this; } public RequestBuilder addHeaderValues(String header, Iterable<Object> value) { headers.putAll(header, value); return this; } public RequestBuilder setContent(byte[] bytes) { return setContent(null, bytes); } public RequestBuilder setContent(byte[] bytes, int offset, int length) { return setContent(null, bytes, offset, length); } public RequestBuilder setContent(ChannelBuffer content) { return setContent(null, content); } public RequestBuilder setContent(String contentType, byte[] bytes) { return setContent(contentType, bytes, 0, bytes.length); } public RequestBuilder setContent(String contentType, byte[] bytes, int offset, int length) { return setContent(contentType, factory.getBuffer(bytes, offset, length)); } public RequestBuilder setContent(String contentType, ChannelBuffer content) { if (contentType != null) { addHeader(HttpHeaders.Names.CONTENT_TYPE, contentType); } this.content = content; setHeader(HttpHeaders.Names.CONTENT_LENGTH, content.writerIndex()); return this; } public RequestBuilder setBasicAuthentication(String username, String password) { final String base64Value = base64Encode(String.format("%s:%s", username, password)); setHeader(HttpHeaders.Names.AUTHORIZATION, String.format("Basic %s", base64Value)); return this; } private String base64Encode(final String value) { final ChannelBufferFactory bufferFactory = HeapChannelBufferFactory.getInstance(); return Base64 .encode(bufferFactory.getBuffer(ByteBuffer.wrap(value.getBytes(Charsets.UTF_8))), false) .toString(Charsets.UTF_8); } public <IntermediateType, Final> ListenableFuture<Final> go( HttpResponseHandler<IntermediateType, Final> responseHandler ) { return client.go(new Request<IntermediateType, Final>(method, url, headers, content, responseHandler)); } }
package com.meujornal.models.usuarios; import java.io.Serializable; import javax.persistence.Column; import javax.persistence.Convert; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.Id; import javax.validation.constraints.NotNull; import javax.validation.constraints.Pattern; import javax.validation.constraints.Size; import org.hibernate.validator.constraints.Email; import org.hibernate.validator.constraints.NotBlank; import com.google.common.base.Objects; import com.meujornal.infrastructure.persistence.converters.RoleConverter; @Entity public class Usuario implements Serializable { private static final long serialVersionUID = -8000608833275588277L; @Id @GeneratedValue private Long id; @NotBlank @Size(max = 60) private String nome; @NotBlank @Pattern(regexp = "^(?=.*?[a-z]{3,})[a-z0-9\\-_]+$") @Size(max = 30) @Column(unique = true) private String nomeDeUsuario; @NotBlank private String senha; @NotBlank @Email @Column(unique = true) private String email; @NotNull @Convert(converter = RoleConverter.class) private Role papel = Role.USER; @NotBlank private String perguntaDeSeguranca; @NotBlank private String respostaDaPerguntaDeSeguranca; public Long getId() { return id; } public String getNome() { return nome; } public void setNome(String nome) { this.nome = nome; } public String getNomeDeUsuario() { return nomeDeUsuario; } public void setNomeDeUsuario(String nomeDeUsuario) { this.nomeDeUsuario = nomeDeUsuario; } public String getSenha() { return senha; } public void setSenha(String senha) { this.senha = SensitiveDataEncoder.encode(senha); } public String getEmail() { return email; } public void setEmail(String email) { this.email = email; } public Role getPapel() { return papel; } public void setPapel(Role papel) { this.papel = papel; } public String getPerguntaDeSeguranca() { return perguntaDeSeguranca; } public void setPerguntaDeSeguranca(String perguntaDeSeguranca) { this.perguntaDeSeguranca = perguntaDeSeguranca; } public String getRespostaDaPerguntaDeSeguranca() { return respostaDaPerguntaDeSeguranca; } public void setRespostaDaPerguntaDeSeguranca( String respostaDaPerguntaDeSeguranca) { this.respostaDaPerguntaDeSeguranca = SensitiveDataEncoder .encode(respostaDaPerguntaDeSeguranca); } @Override public String toString() { return Objects.toStringHelper(this).addValue(nome) .addValue(nomeDeUsuario).addValue(email).addValue(papel) .toString(); } }
package com.microsoft.sqlserver.jdbc; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.Reader; import java.io.Serializable; import java.io.UnsupportedEncodingException; import java.math.BigDecimal; import java.math.BigInteger; import java.math.RoundingMode; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketAddress; import java.net.SocketException; import java.net.SocketOption; import java.net.SocketTimeoutException; import java.nio.Buffer; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.channels.SelectionKey; import java.nio.channels.Selector; import java.nio.channels.SocketChannel; import java.nio.charset.Charset; import java.security.KeyStore; import java.security.Provider; import java.security.Security; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.sql.Timestamp; import java.text.MessageFormat; import java.time.LocalDate; import java.time.OffsetDateTime; import java.time.OffsetTime; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; import java.util.Collection; import java.util.GregorianCalendar; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.SimpleTimeZone; import java.util.TimeZone; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.logging.Level; import java.util.logging.Logger; import javax.net.SocketFactory; import javax.net.ssl.KeyManager; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLSocket; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509TrustManager; import com.microsoft.sqlserver.jdbc.SQLServerConnection.FedAuthTokenCommand; import com.microsoft.sqlserver.jdbc.dataclassification.SensitivityClassification; /** * ExtendedSocketOptions provides methods to keep track of keep alive and socket information. * */ final class ExtendedSocketOptions { private static class ExtSocketOption<T> implements SocketOption<T> { private final String name; private final Class<T> type; ExtSocketOption(String name, Class<T> type) { this.name = name; this.type = type; } @Override public String name() { return name; } @Override public Class<T> type() { return type; } @Override public String toString() { return name; } } private ExtendedSocketOptions() {} /** * Keep-Alive idle time. * * <p> * The value of this socket option is an {@code Integer} that is the number of seconds of idle time before * keep-alive initiates a probe. The socket option is specific to stream-oriented sockets using the TCP/IP protocol. * The exact semantics of this socket option are system dependent. * * <p> * When the {@link java.net.StandardSocketOptions#SO_KEEPALIVE SO_KEEPALIVE} option is enabled, TCP probes a * connection that has been idle for some amount of time. The default value for this idle period is system * dependent, but is typically 2 hours. The {@code TCP_KEEPIDLE} option can be used to affect this value for a given * socket. * * @since 11 */ public static final SocketOption<Integer> TCP_KEEPIDLE = new ExtSocketOption<Integer>("TCP_KEEPIDLE", Integer.class); /** * Keep-Alive retransmission interval time. * * <p> * The value of this socket option is an {@code Integer} that is the number of seconds to wait before retransmitting * a keep-alive probe. The socket option is specific to stream-oriented sockets using the TCP/IP protocol. The exact * semantics of this socket option are system dependent. * * <p> * When the {@link java.net.StandardSocketOptions#SO_KEEPALIVE SO_KEEPALIVE} option is enabled, TCP probes a * connection that has been idle for some amount of time. If the remote system does not respond to a keep-alive * probe, TCP retransmits the probe after some amount of time. The default value for this retransmission interval is * system dependent, but is typically 75 seconds. The {@code TCP_KEEPINTERVAL} option can be used to affect this * value for a given socket. * * @since 11 */ public static final SocketOption<Integer> TCP_KEEPINTERVAL = new ExtSocketOption<Integer>("TCP_KEEPINTERVAL", Integer.class); } final class TDS { // TDS protocol versions static final int VER_DENALI = 0x74000004; // TDS 7.4 static final int VER_KATMAI = 0x730B0003; // TDS 7.3B(includes null bit compression) static final int VER_YUKON = 0x72090002; // TDS 7.2 static final int VER_UNKNOWN = 0x00000000; // Unknown/uninitialized static final int TDS_RET_STAT = 0x79; static final int TDS_COLMETADATA = 0x81; static final int TDS_TABNAME = 0xA4; static final int TDS_COLINFO = 0xA5; static final int TDS_ORDER = 0xA9; static final int TDS_ERR = 0xAA; static final int TDS_MSG = 0xAB; static final int TDS_RETURN_VALUE = 0xAC; static final int TDS_LOGIN_ACK = 0xAD; static final int TDS_FEATURE_EXTENSION_ACK = 0xAE; static final int TDS_ROW = 0xD1; static final int TDS_NBCROW = 0xD2; static final int TDS_ENV_CHG = 0xE3; static final int TDS_SESSION_STATE = 0xE4; static final int TDS_SSPI = 0xED; static final int TDS_DONE = 0xFD; static final int TDS_DONEPROC = 0xFE; static final int TDS_DONEINPROC = 0xFF; static final int TDS_FEDAUTHINFO = 0xEE; static final int TDS_SQLRESCOLSRCS = 0xa2; static final int TDS_SQLDATACLASSIFICATION = 0xa3; // FedAuth static final byte TDS_FEATURE_EXT_FEDAUTH = 0x02; static final int TDS_FEDAUTH_LIBRARY_SECURITYTOKEN = 0x01; static final int TDS_FEDAUTH_LIBRARY_ADAL = 0x02; static final int TDS_FEDAUTH_LIBRARY_RESERVED = 0x7F; static final byte ADALWORKFLOW_ACTIVEDIRECTORYPASSWORD = 0x01; static final byte ADALWORKFLOW_ACTIVEDIRECTORYINTEGRATED = 0x02; static final byte ADALWORKFLOW_ACTIVEDIRECTORYMSI = 0x03; static final byte ADALWORKFLOW_ACTIVEDIRECTORYINTERACTIVE = 0x03; static final byte ADALWORKFLOW_ACTIVEDIRECTORYSERVICEPRINCIPAL = 0x01; // Using the Password byte as that is the // closest we have. static final byte FEDAUTH_INFO_ID_STSURL = 0x01; // FedAuthInfoData is token endpoint URL from which to acquire fed // auth token static final byte FEDAUTH_INFO_ID_SPN = 0x02; // FedAuthInfoData is the SPN to use for acquiring fed auth token // AE constants // 0x03 is for x_eFeatureExtensionId_Rcs static final byte TDS_FEATURE_EXT_AE = 0x04; static final byte COLUMNENCRYPTION_NOT_SUPPORTED = 0x00; // column encryption not supported static final byte COLUMNENCRYPTION_VERSION1 = 0x01; // column encryption without enclave static final byte COLUMNENCRYPTION_VERSION2 = 0x02; // column encryption with enclave static final int CUSTOM_CIPHER_ALGORITHM_ID = 0; // max version // 0x06 is for x_eFeatureExtensionId_LoginToken // 0x07 is for x_eFeatureExtensionId_ClientSideTelemetry // Data Classification constants static final byte TDS_FEATURE_EXT_DATACLASSIFICATION = 0x09; static final byte DATA_CLASSIFICATION_NOT_ENABLED = 0x00; static final byte MAX_SUPPORTED_DATA_CLASSIFICATION_VERSION = 0x02; static final byte DATA_CLASSIFICATION_VERSION_ADDED_RANK_SUPPORT = 0x02; static final int AES_256_CBC = 1; static final int AEAD_AES_256_CBC_HMAC_SHA256 = 2; static final int AE_METADATA = 0x08; static final byte TDS_FEATURE_EXT_UTF8SUPPORT = 0x0A; static final byte TDS_FEATURE_EXT_AZURESQLDNSCACHING = 0x0B; static final byte TDS_FEATURE_EXT_SESSIONRECOVERY = 0x01; static final int TDS_TVP = 0xF3; static final int TVP_ROW = 0x01; static final int TVP_NULL_TOKEN = 0xFFFF; static final int TVP_STATUS_DEFAULT = 0x02; static final int TVP_ORDER_UNIQUE_TOKEN = 0x10; // TVP_ORDER_UNIQUE_TOKEN flags static final byte TVP_ORDERASC_FLAG = 0x1; static final byte TVP_ORDERDESC_FLAG = 0x2; static final byte TVP_UNIQUE_FLAG = 0x4; // TVP flags, may be used in other places static final int FLAG_NULLABLE = 0x01; static final int FLAG_TVP_DEFAULT_COLUMN = 0x200; static final int FEATURE_EXT_TERMINATOR = -1; // Sql_variant length static final int SQL_VARIANT_LENGTH = 8009; static final String getTokenName(int tdsTokenType) { switch (tdsTokenType) { case TDS_RET_STAT: return "TDS_RET_STAT (0x79)"; case TDS_COLMETADATA: return "TDS_COLMETADATA (0x81)"; case TDS_TABNAME: return "TDS_TABNAME (0xA4)"; case TDS_COLINFO: return "TDS_COLINFO (0xA5)"; case TDS_ORDER: return "TDS_ORDER (0xA9)"; case TDS_ERR: return "TDS_ERR (0xAA)"; case TDS_MSG: return "TDS_MSG (0xAB)"; case TDS_RETURN_VALUE: return "TDS_RETURN_VALUE (0xAC)"; case TDS_LOGIN_ACK: return "TDS_LOGIN_ACK (0xAD)"; case TDS_FEATURE_EXTENSION_ACK: return "TDS_FEATURE_EXTENSION_ACK (0xAE)"; case TDS_ROW: return "TDS_ROW (0xD1)"; case TDS_NBCROW: return "TDS_NBCROW (0xD2)"; case TDS_ENV_CHG: return "TDS_ENV_CHG (0xE3)"; case TDS_SESSION_STATE: return "TDS_SESSION_STATE (0xE4)"; case TDS_SSPI: return "TDS_SSPI (0xED)"; case TDS_DONE: return "TDS_DONE (0xFD)"; case TDS_DONEPROC: return "TDS_DONEPROC (0xFE)"; case TDS_DONEINPROC: return "TDS_DONEINPROC (0xFF)"; case TDS_FEDAUTHINFO: return "TDS_FEDAUTHINFO (0xEE)"; case TDS_FEATURE_EXT_DATACLASSIFICATION: return "TDS_FEATURE_EXT_DATACLASSIFICATION (0x09)"; case TDS_FEATURE_EXT_UTF8SUPPORT: return "TDS_FEATURE_EXT_UTF8SUPPORT (0x0A)"; case TDS_FEATURE_EXT_AZURESQLDNSCACHING: return "TDS_FEATURE_EXT_AZURESQLDNSCACHING (0x0B)"; case TDS_FEATURE_EXT_SESSIONRECOVERY: return "TDS_FEATURE_EXT_SESSIONRECOVERY (0x01)"; default: return "unknown token (0x" + Integer.toHexString(tdsTokenType).toUpperCase() + ")"; } } // RPC ProcIDs for use with RPCRequest (PKT_RPC) calls static final short PROCID_SP_CURSOR = 1; static final short PROCID_SP_CURSOROPEN = 2; static final short PROCID_SP_CURSORPREPARE = 3; static final short PROCID_SP_CURSOREXECUTE = 4; static final short PROCID_SP_CURSORPREPEXEC = 5; static final short PROCID_SP_CURSORUNPREPARE = 6; static final short PROCID_SP_CURSORFETCH = 7; static final short PROCID_SP_CURSOROPTION = 8; static final short PROCID_SP_CURSORCLOSE = 9; static final short PROCID_SP_EXECUTESQL = 10; static final short PROCID_SP_PREPARE = 11; static final short PROCID_SP_EXECUTE = 12; static final short PROCID_SP_PREPEXEC = 13; static final short PROCID_SP_PREPEXECRPC = 14; static final short PROCID_SP_UNPREPARE = 15; // Constants for use with cursor RPCs static final short SP_CURSOR_OP_UPDATE = 1; static final short SP_CURSOR_OP_DELETE = 2; static final short SP_CURSOR_OP_INSERT = 4; static final short SP_CURSOR_OP_REFRESH = 8; static final short SP_CURSOR_OP_LOCK = 16; static final short SP_CURSOR_OP_SETPOSITION = 32; static final short SP_CURSOR_OP_ABSOLUTE = 64; // Constants for server-cursored result sets. // See the Engine Cursors Functional Specification for details. static final int FETCH_FIRST = 1; static final int FETCH_NEXT = 2; static final int FETCH_PREV = 4; static final int FETCH_LAST = 8; static final int FETCH_ABSOLUTE = 16; static final int FETCH_RELATIVE = 32; static final int FETCH_REFRESH = 128; static final int FETCH_INFO = 256; static final int FETCH_PREV_NOADJUST = 512; static final byte RPC_OPTION_NO_METADATA = (byte) 0x02; // Transaction manager request types static final short TM_GET_DTC_ADDRESS = 0; static final short TM_PROPAGATE_XACT = 1; static final short TM_BEGIN_XACT = 5; static final short TM_PROMOTE_PROMOTABLE_XACT = 6; static final short TM_COMMIT_XACT = 7; static final short TM_ROLLBACK_XACT = 8; static final short TM_SAVE_XACT = 9; static final byte PKT_QUERY = 1; static final byte PKT_RPC = 3; static final byte PKT_REPLY = 4; static final byte PKT_CANCEL_REQ = 6; static final byte PKT_BULK = 7; static final byte PKT_DTC = 14; static final byte PKT_LOGON70 = 16; // 0x10 static final byte PKT_SSPI = 17; static final byte PKT_PRELOGIN = 18; // 0x12 static final byte PKT_FEDAUTH_TOKEN_MESSAGE = 8; // Authentication token for federated authentication static final byte STATUS_NORMAL = 0x00; static final byte STATUS_BIT_EOM = 0x01; static final byte STATUS_BIT_ATTENTION = 0x02;// this is called ignore bit in TDS spec static final byte STATUS_BIT_RESET_CONN = 0x08; // Various TDS packet size constants static final int INVALID_PACKET_SIZE = -1; static final int INITIAL_PACKET_SIZE = 4096; static final int MIN_PACKET_SIZE = 512; static final int MAX_PACKET_SIZE = 32767; static final int DEFAULT_PACKET_SIZE = 8000; static final int SERVER_PACKET_SIZE = 0; // Accept server's configured packet size // TDS packet header size and offsets static final int PACKET_HEADER_SIZE = 8; static final int PACKET_HEADER_MESSAGE_TYPE = 0; static final int PACKET_HEADER_MESSAGE_STATUS = 1; static final int PACKET_HEADER_MESSAGE_LENGTH = 2; static final int PACKET_HEADER_SPID = 4; static final int PACKET_HEADER_SEQUENCE_NUM = 6; static final int PACKET_HEADER_WINDOW = 7; // Reserved/Not used // MARS header length: // 2 byte header type // 8 byte transaction descriptor // 4 byte outstanding request count static final int MARS_HEADER_LENGTH = 18; // 2 byte header type, 8 byte transaction descriptor, static final int TRACE_HEADER_LENGTH = 26; // header length (4) + header type (2) + guid (16) + Sequence number size static final short HEADERTYPE_TRACE = 3; // trace header type // Message header length static final int MESSAGE_HEADER_LENGTH = MARS_HEADER_LENGTH + 4; // length includes message header itself static final byte B_PRELOGIN_OPTION_VERSION = 0x00; static final byte B_PRELOGIN_OPTION_ENCRYPTION = 0x01; static final byte B_PRELOGIN_OPTION_INSTOPT = 0x02; static final byte B_PRELOGIN_OPTION_THREADID = 0x03; static final byte B_PRELOGIN_OPTION_MARS = 0x04; static final byte B_PRELOGIN_OPTION_TRACEID = 0x05; static final byte B_PRELOGIN_OPTION_FEDAUTHREQUIRED = 0x06; static final byte B_PRELOGIN_OPTION_TERMINATOR = (byte) 0xFF; // Login option byte 1 static final byte LOGIN_OPTION1_ORDER_X86 = 0x00; static final byte LOGIN_OPTION1_ORDER_6800 = 0x01; static final byte LOGIN_OPTION1_CHARSET_ASCII = 0x00; static final byte LOGIN_OPTION1_CHARSET_EBCDIC = 0x02; static final byte LOGIN_OPTION1_FLOAT_IEEE_754 = 0x00; static final byte LOGIN_OPTION1_FLOAT_VAX = 0x04; static final byte LOGIN_OPTION1_FLOAT_ND5000 = 0x08; static final byte LOGIN_OPTION1_DUMPLOAD_ON = 0x00; static final byte LOGIN_OPTION1_DUMPLOAD_OFF = 0x10; static final byte LOGIN_OPTION1_USE_DB_ON = 0x00; static final byte LOGIN_OPTION1_USE_DB_OFF = 0x20; static final byte LOGIN_OPTION1_INIT_DB_WARN = 0x00; static final byte LOGIN_OPTION1_INIT_DB_FATAL = 0x40; static final byte LOGIN_OPTION1_SET_LANG_OFF = 0x00; static final byte LOGIN_OPTION1_SET_LANG_ON = (byte) 0x80; // Login option byte 2 static final byte LOGIN_OPTION2_INIT_LANG_WARN = 0x00; static final byte LOGIN_OPTION2_INIT_LANG_FATAL = 0x01; static final byte LOGIN_OPTION2_ODBC_OFF = 0x00; static final byte LOGIN_OPTION2_ODBC_ON = 0x02; static final byte LOGIN_OPTION2_TRAN_BOUNDARY_OFF = 0x00; static final byte LOGIN_OPTION2_TRAN_BOUNDARY_ON = 0x04; static final byte LOGIN_OPTION2_CACHE_CONNECTION_OFF = 0x00; static final byte LOGIN_OPTION2_CACHE_CONNECTION_ON = 0x08; static final byte LOGIN_OPTION2_USER_NORMAL = 0x00; static final byte LOGIN_OPTION2_USER_SERVER = 0x10; static final byte LOGIN_OPTION2_USER_REMUSER = 0x20; static final byte LOGIN_OPTION2_USER_SQLREPL_OFF = 0x00; static final byte LOGIN_OPTION2_USER_SQLREPL_ON = 0x30; static final byte LOGIN_OPTION2_INTEGRATED_SECURITY_OFF = 0x00; static final byte LOGIN_OPTION2_INTEGRATED_SECURITY_ON = (byte) 0x80; // Login option byte 3 static final byte LOGIN_OPTION3_DEFAULT = 0x00; static final byte LOGIN_OPTION3_CHANGE_PASSWORD = 0x01; static final byte LOGIN_OPTION3_SEND_YUKON_BINARY_XML = 0x02; static final byte LOGIN_OPTION3_USER_INSTANCE = 0x04; static final byte LOGIN_OPTION3_UNKNOWN_COLLATION_HANDLING = 0x08; static final byte LOGIN_OPTION3_FEATURE_EXTENSION = 0x10; // Login type flag (bits 5 - 7 reserved for future use) static final byte LOGIN_SQLTYPE_DEFAULT = 0x00; static final byte LOGIN_SQLTYPE_TSQL = 0x01; static final byte LOGIN_SQLTYPE_ANSI_V1 = 0x02; static final byte LOGIN_SQLTYPE_ANSI89_L1 = 0x03; static final byte LOGIN_SQLTYPE_ANSI89_L2 = 0x04; static final byte LOGIN_SQLTYPE_ANSI89_IEF = 0x05; static final byte LOGIN_SQLTYPE_ANSI89_ENTRY = 0x06; static final byte LOGIN_SQLTYPE_ANSI89_TRANS = 0x07; static final byte LOGIN_SQLTYPE_ANSI89_INTER = 0x08; static final byte LOGIN_SQLTYPE_ANSI89_FULL = 0x09; static final byte LOGIN_OLEDB_OFF = 0x00; static final byte LOGIN_OLEDB_ON = 0x10; static final byte LOGIN_READ_ONLY_INTENT = 0x20; static final byte LOGIN_READ_WRITE_INTENT = 0x00; static final byte ENCRYPT_OFF = 0x00; static final byte ENCRYPT_ON = 0x01; static final byte ENCRYPT_NOT_SUP = 0x02; static final byte ENCRYPT_REQ = 0x03; static final byte ENCRYPT_CLIENT_CERT = (byte) 0x80; static final byte ENCRYPT_INVALID = (byte) 0xFF; static final String getEncryptionLevel(int level) { switch (level) { case ENCRYPT_OFF: return "OFF"; case ENCRYPT_ON: return "ON"; case ENCRYPT_NOT_SUP: return "NOT SUPPORTED"; case ENCRYPT_REQ: return "REQUIRED"; default: return "unknown encryption level (0x" + Integer.toHexString(level).toUpperCase() + ")"; } } // Prelogin packet length, including the tds header, // version, encrpytion, and traceid data sessions. // For detailed info, please check the definition of // preloginRequest in Prelogin function. static final byte B_PRELOGIN_MESSAGE_LENGTH = 67; static final byte B_PRELOGIN_MESSAGE_LENGTH_WITH_FEDAUTH = 73; // Scroll options and concurrency options lifted out // of the the Yukon cursors spec for sp_cursoropen. final static int SCROLLOPT_KEYSET = 1; final static int SCROLLOPT_DYNAMIC = 2; final static int SCROLLOPT_FORWARD_ONLY = 4; final static int SCROLLOPT_STATIC = 8; final static int SCROLLOPT_FAST_FORWARD = 16; final static int SCROLLOPT_PARAMETERIZED_STMT = 4096; final static int SCROLLOPT_AUTO_FETCH = 8192; final static int SCROLLOPT_AUTO_CLOSE = 16384; final static int CCOPT_READ_ONLY = 1; final static int CCOPT_SCROLL_LOCKS = 2; final static int CCOPT_OPTIMISTIC_CC = 4; final static int CCOPT_OPTIMISTIC_CCVAL = 8; final static int CCOPT_ALLOW_DIRECT = 8192; final static int CCOPT_UPDT_IN_PLACE = 16384; // Result set rows include an extra, "hidden" ROWSTAT column which indicates // the overall success or failure of the row fetch operation. With a keyset // cursor, the value in the ROWSTAT column indicates whether the row has been // deleted from the database. static final int ROWSTAT_FETCH_SUCCEEDED = 1; static final int ROWSTAT_FETCH_MISSING = 2; // ColumnInfo status final static int COLINFO_STATUS_EXPRESSION = 0x04; final static int COLINFO_STATUS_KEY = 0x08; final static int COLINFO_STATUS_HIDDEN = 0x10; final static int COLINFO_STATUS_DIFFERENT_NAME = 0x20; final static int MAX_FRACTIONAL_SECONDS_SCALE = 7; final static Timestamp MAX_TIMESTAMP = Timestamp.valueOf("2079-06-06 23:59:59"); final static Timestamp MIN_TIMESTAMP = Timestamp.valueOf("1900-01-01 00:00:00"); static int nanosSinceMidnightLength(int scale) { final int[] scaledLengths = {3, 3, 3, 4, 4, 5, 5, 5}; assert scale >= 0; assert scale <= MAX_FRACTIONAL_SECONDS_SCALE; return scaledLengths[scale]; } final static int DAYS_INTO_CE_LENGTH = 3; final static int MINUTES_OFFSET_LENGTH = 2; // Number of days in a "normal" (non-leap) year according to SQL Server. final static int DAYS_PER_YEAR = 365; final static int BASE_YEAR_1900 = 1900; final static int BASE_YEAR_1970 = 1970; final static String BASE_DATE_1970 = "1970-01-01"; final static LocalDate BASE_LOCAL_DATE = LocalDate.of(1, 1, 1); final static LocalDate BASE_LOCAL_DATE_1900 = LocalDate.of(1900, 1, 1); static int timeValueLength(int scale) { return nanosSinceMidnightLength(scale); } static int datetime2ValueLength(int scale) { return DAYS_INTO_CE_LENGTH + nanosSinceMidnightLength(scale); } static int datetimeoffsetValueLength(int scale) { return DAYS_INTO_CE_LENGTH + MINUTES_OFFSET_LENGTH + nanosSinceMidnightLength(scale); } // TDS is just a namespace - it can't be instantiated. private TDS() {} } class Nanos { static final int PER_SECOND = 1000000000; static final int PER_MAX_SCALE_INTERVAL = PER_SECOND / (int) Math.pow(10, TDS.MAX_FRACTIONAL_SECONDS_SCALE); static final int PER_MILLISECOND = PER_SECOND / 1000; static final long PER_DAY = 24 * 60 * 60 * (long) PER_SECOND; private Nanos() {} } // Constants relating to the historically accepted Julian-Gregorian calendar cutover date (October 15, 1582). // Used in processing SQL Server temporal data types whose date component may precede that date. // Scoping these constants to a class defers their initialization to first use. class GregorianChange { // Cutover date for a pure Gregorian calendar - that is, a proleptic Gregorian calendar with // Gregorian leap year behavior throughout its entire range. This is the cutover date is used // with temporal server values, which are represented in terms of number of days relative to a // base date. static final java.util.Date PURE_CHANGE_DATE = new java.util.Date(Long.MIN_VALUE); // The standard Julian to Gregorian cutover date (October 15, 1582) that the JDBC temporal // classes (Time, Date, Timestamp) assume when converting to and from their UTC milliseconds // representations. static final java.util.Date STANDARD_CHANGE_DATE = (new GregorianCalendar(Locale.US)).getGregorianChange(); // A hint as to the number of days since 1/1/0001, past which we do not need to // not rationalize the difference between SQL Server behavior (pure Gregorian) // and Java behavior (standard Gregorian). // Not having to rationalize the difference has a substantial (measured) performance benefit // for temporal getters. // The hint does not need to be exact, as long as it's later than the actual change date. static final int DAYS_SINCE_BASE_DATE_HINT = DDC.daysSinceBaseDate(1583, 1, 1); // Extra days that need to added to a pure gregorian date, post the gergorian // cut over date, to match the default julian-gregorain calendar date of java. static final int EXTRA_DAYS_TO_BE_ADDED; static { // This issue refers to the following bugs in java(same issue). // The issue is fixed in JRE 1.7 // and exists in all the older versions. // Due to the above bug, in older JVM versions(1.6 and before), // the date calculation is incorrect at the Gregorian cut over date. // i.e. the next date after Oct 4th 1582 is Oct 17th 1582, where as // it should have been Oct 15th 1582. // We intentionally do not make a check based on JRE version. // If we do so, our code would break if the bug is fixed in a later update // to an older JRE. So, we check for the existence of the bug instead. GregorianCalendar cal = new GregorianCalendar(Locale.US); cal.clear(); cal.set(1, Calendar.FEBRUARY, 577738, 0, 0, 0);// 577738 = 1+577737(no of days since epoch that brings us to oct // 15th 1582) if (cal.get(Calendar.DAY_OF_MONTH) == 15) { // If the date calculation is correct(the above bug is fixed), // post the default gregorian cut over date, the pure gregorian date // falls short by two days for all dates compared to julian-gregorian date. // so, we add two extra days for functional correctness. // Note: other ways, in which this issue can be fixed instead of // trying to detect the JVM bug is // a) use unoptimized code path in the function convertTemporalToObject // b) use cal.add api instead of cal.set api in the current optimized code path // In both the above approaches, the code is about 6-8 times slower, // resulting in an overall perf regression of about (10-30)% for perf test cases EXTRA_DAYS_TO_BE_ADDED = 2; } else EXTRA_DAYS_TO_BE_ADDED = 0; } private GregorianChange() {} } final class UTC { // UTC/GMT time zone singleton. static final TimeZone timeZone = new SimpleTimeZone(0, "UTC"); private UTC() {} } /** * TDS Channel */ final class TDSChannel implements Serializable { /** * Always update serialVersionUID when prompted. */ private static final long serialVersionUID = -866497813437384090L; private static final Logger logger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.TDS.Channel"); final Logger getLogger() { return logger; } private final String traceID; final public String toString() { return traceID; } private final SQLServerConnection con; private final TDSWriter tdsWriter; final TDSWriter getWriter() { return tdsWriter; } final TDSReader getReader(TDSCommand command) { return new TDSReader(this, con, command); } // Socket for raw TCP/IP communications with SQL Server private Socket tcpSocket; // Socket for SSL-encrypted communications with SQL Server private SSLSocket sslSocket; /* * Socket providing the communications interface to the driver. For SSL-encrypted connections, this is the SSLSocket * wrapped around the TCP socket. For unencrypted connections, it is just the TCP socket itself. */ @SuppressWarnings("unused") private Socket channelSocket; // Implementation of a Socket proxy that can switch from TDS-wrapped I/O // (using the TDSChannel itself) during SSL handshake to raw I/O over // the TCP/IP socket. ProxySocket proxySocket = null; // I/O streams for raw TCP/IP communications with SQL Server private ProxyInputStream tcpInputStream; private OutputStream tcpOutputStream; // I/O streams providing the communications interface to the driver. // For SSL-encrypted connections, these are streams obtained from // the SSL socket above. They wrap the underlying TCP streams. // For unencrypted connections, they are just the TCP streams themselves. private ProxyInputStream inputStream; private OutputStream outputStream; /** TDS packet payload logger */ private static Logger packetLogger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.TDS.DATA"); private final boolean isLoggingPackets = packetLogger.isLoggable(Level.FINEST); final boolean isLoggingPackets() { return isLoggingPackets; } // Number of TDS messages sent to and received from the server int numMsgsSent = 0; int numMsgsRcvd = 0; // Last SPID received from the server. Used for logging and to tag subsequent outgoing // packets to facilitate diagnosing problems from the server side. private int spid = 0; void setSPID(int spid) { this.spid = spid; } int getSPID() { return spid; } void resetPooledConnection() { tdsWriter.resetPooledConnection(); } TDSChannel(SQLServerConnection con) { this.con = con; traceID = "TDSChannel (" + con.toString() + ")"; this.tcpSocket = null; this.sslSocket = null; this.channelSocket = null; this.tcpInputStream = null; this.tcpOutputStream = null; this.inputStream = null; this.outputStream = null; this.tdsWriter = new TDSWriter(this, con); } /** * Opens the physical communications channel (TCP/IP socket and I/O streams) to the SQL Server. * * @return InetSocketAddress of the connection socket. */ final InetSocketAddress open(String host, int port, int timeoutMillis, boolean useParallel, boolean useTnir, boolean isTnirFirstAttempt, int timeoutMillisForFullTimeout) throws SQLServerException { if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + ": Opening TCP socket..."); SocketFinder socketFinder = new SocketFinder(traceID, con); channelSocket = tcpSocket = socketFinder.findSocket(host, port, timeoutMillis, useParallel, useTnir, isTnirFirstAttempt, timeoutMillisForFullTimeout); try { // Set socket options tcpSocket.setTcpNoDelay(true); tcpSocket.setKeepAlive(true); DriverJDBCVersion.setSocketOptions(tcpSocket, this); // set SO_TIMEOUT int socketTimeout = con.getSocketTimeoutMilliseconds(); tcpSocket.setSoTimeout(socketTimeout); inputStream = tcpInputStream = new ProxyInputStream(tcpSocket.getInputStream()); outputStream = tcpOutputStream = tcpSocket.getOutputStream(); } catch (IOException ex) { SQLServerException.ConvertConnectExceptionToSQLServerException(host, port, con, ex); } return (InetSocketAddress) channelSocket.getRemoteSocketAddress(); } /** * Disables SSL on this TDS channel. */ synchronized void disableSSL() { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Disabling SSL..."); // Guard in case of disableSSL being called before enableSSL if (proxySocket == null) { if (logger.isLoggable(Level.INFO)) logger.finer(toString() + " proxySocket is null, exit early"); return; } /* * The mission: To close the SSLSocket and release everything that it is holding onto other than the TCP/IP * socket and streams. The challenge: Simply closing the SSLSocket tries to do additional, unnecessary shutdown * I/O over the TCP/IP streams that are bound to the socket proxy, resulting in a not responding and confusing * SQL Server. Solution: Rewire the ProxySocket's input and output streams (one more time) to closed streams. * SSLSocket sees that the streams are already closed and does not attempt to do any further I/O on them before * closing itself. */ // Create a couple of cheap closed streams InputStream is = new ByteArrayInputStream(new byte[0]); try { is.close(); } catch (IOException e) { // No reason to expect a brand new ByteArrayInputStream not to close, // but just in case... logger.fine("Ignored error closing InputStream: " + e.getMessage()); } OutputStream os = new ByteArrayOutputStream(); try { os.close(); } catch (IOException e) { // No reason to expect a brand new ByteArrayOutputStream not to close, // but just in case... logger.fine("Ignored error closing OutputStream: " + e.getMessage()); } // Rewire the proxy socket to the closed streams if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Rewiring proxy streams for SSL socket close"); proxySocket.setStreams(is, os); // Now close the SSL socket. It will see that the proxy socket's streams // are closed and not try to do any further I/O over them. try { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Closing SSL socket"); sslSocket.close(); } catch (IOException e) { // Don't care if we can't close the SSL socket. We're done with it anyway. logger.fine("Ignored error closing SSLSocket: " + e.getMessage()); } // Do not close the proxy socket. Doing so would close our TCP socket // to which the proxy socket is bound. Instead, just null out the reference // to free up the few resources it holds onto. proxySocket = null; // Finally, with all of the SSL support out of the way, put the TDSChannel // back to using the TCP/IP socket and streams directly. inputStream = tcpInputStream; outputStream = tcpOutputStream; channelSocket = tcpSocket; sslSocket = null; if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " SSL disabled"); } /** * Used during SSL handshake, this class implements an InputStream that reads SSL handshake response data (framed in * TDS messages) from the TDS channel. */ private class SSLHandshakeInputStream extends InputStream { private final TDSReader tdsReader; private final SSLHandshakeOutputStream sslHandshakeOutputStream; private final Logger logger; private final String logContext; SSLHandshakeInputStream(TDSChannel tdsChannel, SSLHandshakeOutputStream sslHandshakeOutputStream) { this.tdsReader = tdsChannel.getReader(null); this.sslHandshakeOutputStream = sslHandshakeOutputStream; this.logger = tdsChannel.getLogger(); this.logContext = tdsChannel.toString() + " (SSLHandshakeInputStream):"; } /** * If there is no handshake response data available to be read from existing packets then this method ensures * that the SSL handshake output stream has been flushed to the server, and reads another packet (starting the * next TDS response message). * * Note that simply using TDSReader.ensurePayload isn't sufficient as it does not automatically start the new * response message. */ private void ensureSSLPayload() throws IOException { if (0 == tdsReader.available()) { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " No handshake response bytes available. Flushing SSL handshake output stream."); try { sslHandshakeOutputStream.endMessage(); } catch (SQLServerException e) { logger.finer(logContext + " Ending TDS message threw exception:" + e.getMessage()); throw new IOException(e.getMessage()); } if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Reading first packet of SSL handshake response"); try { tdsReader.readPacket(); } catch (SQLServerException e) { logger.finer(logContext + " Reading response packet threw exception:" + e.getMessage()); throw new IOException(e.getMessage()); } } } public long skip(long n) throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Skipping " + n + " bytes..."); if (n <= 0) return 0; if (n > Integer.MAX_VALUE) n = Integer.MAX_VALUE; ensureSSLPayload(); try { tdsReader.skip((int) n); } catch (SQLServerException e) { logger.finer(logContext + " Skipping bytes threw exception:" + e.getMessage()); throw new IOException(e.getMessage()); } return n; } private final byte oneByte[] = new byte[1]; public int read() throws IOException { int bytesRead; while (0 == (bytesRead = readInternal(oneByte, 0, oneByte.length))); assert 1 == bytesRead || -1 == bytesRead; return 1 == bytesRead ? oneByte[0] : -1; } public int read(byte[] b) throws IOException { return readInternal(b, 0, b.length); } public int read(byte b[], int offset, int maxBytes) throws IOException { return readInternal(b, offset, maxBytes); } private int readInternal(byte b[], int offset, int maxBytes) throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Reading " + maxBytes + " bytes..."); ensureSSLPayload(); try { tdsReader.readBytes(b, offset, maxBytes); } catch (SQLServerException e) { logger.finer(logContext + " Reading bytes threw exception:" + e.getMessage()); throw new IOException(e.getMessage()); } return maxBytes; } } /** * Used during SSL handshake, this class implements an OutputStream that writes SSL handshake request data (framed * in TDS messages) to the TDS channel. */ private class SSLHandshakeOutputStream extends OutputStream { private final TDSWriter tdsWriter; /** Flag indicating when it is necessary to start a new prelogin TDS message */ private boolean messageStarted; private final Logger logger; private final String logContext; SSLHandshakeOutputStream(TDSChannel tdsChannel) { this.tdsWriter = tdsChannel.getWriter(); this.messageStarted = false; this.logger = tdsChannel.getLogger(); this.logContext = tdsChannel.toString() + " (SSLHandshakeOutputStream):"; } public void flush() throws IOException { // It seems that the security provider implementation in some JVMs // (notably SunJSSE in the 6.0 JVM) likes to add spurious calls to // flush the SSL handshake output stream during SSL handshaking. // We need to ignore these calls because the SSL handshake payload // needs to be completely encapsulated in TDS. The SSL handshake // input stream always ensures that this output stream has been flushed // before trying to read the response. if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Ignored a request to flush the stream"); } void endMessage() throws SQLServerException { // We should only be asked to end the message if we have started one assert messageStarted; if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Finishing TDS message"); // Flush any remaining bytes through the writer. Since there may be fewer bytes // ready to send than a full TDS packet, we end the message here and start a new // one later if additional handshake data needs to be sent. tdsWriter.endMessage(); messageStarted = false; } private final byte singleByte[] = new byte[1]; public void write(int b) throws IOException { singleByte[0] = (byte) (b & 0xFF); writeInternal(singleByte, 0, singleByte.length); } public void write(byte[] b) throws IOException { writeInternal(b, 0, b.length); } public void write(byte[] b, int off, int len) throws IOException { writeInternal(b, off, len); } private void writeInternal(byte[] b, int off, int len) throws IOException { try { // Start out the handshake request in a new prelogin message. Subsequent // writes just add handshake data to the request until flushed. if (!messageStarted) { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Starting new TDS packet..."); tdsWriter.startMessage(null, TDS.PKT_PRELOGIN); messageStarted = true; } if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Writing " + len + " bytes..."); tdsWriter.writeBytes(b, off, len); } catch (SQLServerException e) { logger.finer(logContext + " Writing bytes threw exception:" + e.getMessage()); throw new IOException(e.getMessage()); } } } /** * This class implements an InputStream that just forwards all of its methods to an underlying InputStream. * * It is more predictable than FilteredInputStream which forwards some of its read methods directly to the * underlying stream, but not others. */ private final class ProxyInputStream extends InputStream { private InputStream filteredStream; /** * Bytes that have been read by a poll(s). */ private int[] cachedBytes = new int[10];; /** * How many bytes have been cached. */ private int cachedLength = 0; ProxyInputStream(InputStream is) { filteredStream = is; } final void setFilteredStream(InputStream is) { filteredStream = is; } /** * Poll the stream to verify connectivity. * * @return true if the stream is readable. * @throws IOException * If an I/O exception occurs. */ public synchronized boolean poll() { synchronized (this) { int b; try { b = filteredStream.read(); } catch (SocketTimeoutException e) { // Not a disconnected socket, so we're good to go return true; } catch (IOException e) { return false; } if (logger.isLoggable(Level.FINEST)) { logger.finest(toString() + "poll() - read() returned " + b); } if (b == -1) // end-of-stream return false; // if we got here, a byte was read and we need to save it // Increase the size of the cache, if needed (should be very rare). if (cachedBytes.length <= cachedLength) { int[] temp = new int[cachedBytes.length + 10]; for (int i = 0; i < cachedBytes.length; i++) { temp[i] = cachedBytes[i]; } cachedBytes = temp; } cachedBytes[cachedLength] = b; cachedLength++; return true; } } private int getOneFromCache() { int result = cachedBytes[0]; for (int i = 0; i < cachedLength; i++) { cachedBytes[i] = cachedBytes[i + 1]; } cachedLength return result; } public long skip(long n) throws IOException { synchronized (this) { long bytesSkipped = 0; if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Skipping " + n + " bytes"); while (cachedLength > 0 && bytesSkipped < n) { bytesSkipped++; getOneFromCache(); } if (bytesSkipped < n) { bytesSkipped += filteredStream.skip(n - bytesSkipped); } if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Skipped " + n + " bytes"); return bytesSkipped; } } public int available() throws IOException { int bytesAvailable = filteredStream.available() + cachedLength; if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " " + bytesAvailable + " bytes available"); return bytesAvailable; } private final byte oneByte[] = new byte[1]; public int read() throws IOException { int bytesRead; while (0 == (bytesRead = readInternal(oneByte, 0, oneByte.length))); assert 1 == bytesRead || -1 == bytesRead; return 1 == bytesRead ? oneByte[0] : -1; } public int read(byte[] b) throws IOException { return readInternal(b, 0, b.length); } public int read(byte[] b, int offset, int maxBytes) throws IOException { return readInternal(b, offset, maxBytes); } private int readInternal(byte[] b, int offset, int maxBytes) throws IOException { synchronized (this) { int bytesRead; if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Reading " + maxBytes + " bytes"); // Optimize for nothing cached if (cachedLength == 0) { try { bytesRead = filteredStream.read(b, offset, maxBytes); } catch (IOException e) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Reading bytes threw exception:" + e.getMessage()); throw e; } } else { int offsetBytesToSkipInCache = Math.min(offset, cachedLength); for (int i = 0; i < offsetBytesToSkipInCache; i++) { getOneFromCache(); } byte[] bytesFromCache = new byte[Math.min(maxBytes, cachedLength)]; for (int i = 0; i < bytesFromCache.length; i++) { bytesFromCache[i] = (byte) getOneFromCache(); } try { byte[] bytesFromStream = new byte[maxBytes - bytesFromCache.length]; int bytesReadFromStream = filteredStream.read(bytesFromStream, offset - offsetBytesToSkipInCache, maxBytes - bytesFromCache.length); bytesRead = bytesFromCache.length + bytesReadFromStream; System.arraycopy(bytesFromCache, 0, b, 0, bytesFromCache.length); if (bytesReadFromStream >= 0) System.arraycopy(bytesFromStream, 0, b, bytesFromCache.length, bytesReadFromStream); } catch (IOException e) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " " + e.getMessage()); logger.finer(toString() + " Reading bytes threw exception:" + e.getMessage()); throw e; } } if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Read " + bytesRead + " bytes"); return bytesRead; } } public boolean markSupported() { boolean markSupported = filteredStream.markSupported(); if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Returning markSupported: " + markSupported); return markSupported; } public void mark(int readLimit) { synchronized (this) { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Marking next " + readLimit + " bytes"); filteredStream.mark(readLimit); } } public void reset() throws IOException { synchronized (this) { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Resetting to previous mark"); filteredStream.reset(); } } public void close() throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Closing"); filteredStream.close(); } } /** * This class implements an OutputStream that just forwards all of its methods to an underlying OutputStream. * * This class essentially does what FilteredOutputStream does, but is more efficient for our usage. * FilteredOutputStream transforms block writes to sequences of single-byte writes. */ final class ProxyOutputStream extends OutputStream { private OutputStream filteredStream; ProxyOutputStream(OutputStream os) { filteredStream = os; } final void setFilteredStream(OutputStream os) { filteredStream = os; } public void close() throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Closing"); filteredStream.close(); } public void flush() throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Flushing"); filteredStream.flush(); } private final byte singleByte[] = new byte[1]; public void write(int b) throws IOException { singleByte[0] = (byte) (b & 0xFF); writeInternal(singleByte, 0, singleByte.length); } public void write(byte[] b) throws IOException { writeInternal(b, 0, b.length); } public void write(byte[] b, int off, int len) throws IOException { writeInternal(b, off, len); } private void writeInternal(byte[] b, int off, int len) throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Writing " + len + " bytes"); filteredStream.write(b, off, len); } } /** * This class implements a Socket whose I/O streams can be switched from using a TDSChannel for I/O to using its * underlying TCP/IP socket. * * The SSL socket binds to a ProxySocket. The initial SSL handshake is done over TDSChannel I/O streams so that the * handshake payload is framed in TDS packets. The I/O streams are then switched to TCP/IP I/O streams using * setStreams, and SSL communications continue directly over the TCP/IP I/O streams. * * Most methods other than those for getting the I/O streams are simply forwarded to the TDSChannel's underlying * TCP/IP socket. Methods that change the socket binding or provide direct channel access are disallowed. */ private class ProxySocket extends Socket { private final TDSChannel tdsChannel; private final Logger logger; private final String logContext; private final ProxyInputStream proxyInputStream; private final ProxyOutputStream proxyOutputStream; ProxySocket(TDSChannel tdsChannel) { this.tdsChannel = tdsChannel; this.logger = tdsChannel.getLogger(); this.logContext = tdsChannel.toString() + " (ProxySocket):"; // Create the I/O streams SSLHandshakeOutputStream sslHandshakeOutputStream = new SSLHandshakeOutputStream(tdsChannel); SSLHandshakeInputStream sslHandshakeInputStream = new SSLHandshakeInputStream(tdsChannel, sslHandshakeOutputStream); this.proxyOutputStream = new ProxyOutputStream(sslHandshakeOutputStream); this.proxyInputStream = new ProxyInputStream(sslHandshakeInputStream); } void setStreams(InputStream is, OutputStream os) { proxyInputStream.setFilteredStream(is); proxyOutputStream.setFilteredStream(os); } public InputStream getInputStream() throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Getting input stream"); return proxyInputStream; } public OutputStream getOutputStream() throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Getting output stream"); return proxyOutputStream; } // Allow methods that should just forward to the underlying TCP socket or return fixed values public InetAddress getInetAddress() { return tdsChannel.tcpSocket.getInetAddress(); } public boolean getKeepAlive() throws SocketException { return tdsChannel.tcpSocket.getKeepAlive(); } public InetAddress getLocalAddress() { return tdsChannel.tcpSocket.getLocalAddress(); } public int getLocalPort() { return tdsChannel.tcpSocket.getLocalPort(); } public SocketAddress getLocalSocketAddress() { return tdsChannel.tcpSocket.getLocalSocketAddress(); } public boolean getOOBInline() throws SocketException { return tdsChannel.tcpSocket.getOOBInline(); } public int getPort() { return tdsChannel.tcpSocket.getPort(); } public int getReceiveBufferSize() throws SocketException { return tdsChannel.tcpSocket.getReceiveBufferSize(); } public SocketAddress getRemoteSocketAddress() { return tdsChannel.tcpSocket.getRemoteSocketAddress(); } public boolean getReuseAddress() throws SocketException { return tdsChannel.tcpSocket.getReuseAddress(); } public int getSendBufferSize() throws SocketException { return tdsChannel.tcpSocket.getSendBufferSize(); } public int getSoLinger() throws SocketException { return tdsChannel.tcpSocket.getSoLinger(); } public int getSoTimeout() throws SocketException { return tdsChannel.tcpSocket.getSoTimeout(); } public boolean getTcpNoDelay() throws SocketException { return tdsChannel.tcpSocket.getTcpNoDelay(); } public int getTrafficClass() throws SocketException { return tdsChannel.tcpSocket.getTrafficClass(); } public boolean isBound() { return true; } public boolean isClosed() { return false; } public boolean isConnected() { return true; } public boolean isInputShutdown() { return false; } public boolean isOutputShutdown() { return false; } public String toString() { return tdsChannel.tcpSocket.toString(); } public SocketChannel getChannel() { return null; } // Disallow calls to methods that would change the underlying TCP socket public void bind(SocketAddress bindPoint) throws IOException { logger.finer(logContext + " Disallowed call to bind. Throwing IOException."); throw new IOException(); } public void connect(SocketAddress endpoint) throws IOException { logger.finer(logContext + " Disallowed call to connect (without timeout). Throwing IOException."); throw new IOException(); } public void connect(SocketAddress endpoint, int timeout) throws IOException { logger.finer(logContext + " Disallowed call to connect (with timeout). Throwing IOException."); throw new IOException(); } // Ignore calls to methods that would otherwise allow the SSL socket // to directly manipulate the underlying TCP socket public void close() throws IOException { if (logger.isLoggable(Level.FINER)) logger.finer(logContext + " Ignoring close"); } public void setReceiveBufferSize(int size) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setReceiveBufferSize size:" + size); } public void setSendBufferSize(int size) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setSendBufferSize size:" + size); } public void setReuseAddress(boolean on) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setReuseAddress"); } public void setSoLinger(boolean on, int linger) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setSoLinger"); } public void setSoTimeout(int timeout) throws SocketException { tdsChannel.tcpSocket.setSoTimeout(timeout); } public void setTcpNoDelay(boolean on) throws SocketException { tdsChannel.tcpSocket.setTcpNoDelay(on); } public void setTrafficClass(int tc) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setTrafficClass"); } public void shutdownInput() throws IOException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring shutdownInput"); } public void shutdownOutput() throws IOException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring shutdownOutput"); } public void sendUrgentData(int data) throws IOException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring sendUrgentData"); } public void setKeepAlive(boolean on) throws SocketException { tdsChannel.tcpSocket.setKeepAlive(on); } public void setOOBInline(boolean on) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setOOBInline"); } } /** * This class implements an X509TrustManager that always accepts the X509Certificate chain offered to it. * * A PermissiveX509TrustManager is used to "verify" the authenticity of the server when the trustServerCertificate * connection property is set to true. */ private final class PermissiveX509TrustManager implements X509TrustManager { private final TDSChannel tdsChannel; private final Logger logger; private final String logContext; PermissiveX509TrustManager(TDSChannel tdsChannel) { this.tdsChannel = tdsChannel; this.logger = tdsChannel.getLogger(); this.logContext = tdsChannel.toString() + " (PermissiveX509TrustManager):"; } public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException { if (logger.isLoggable(Level.FINER)) logger.finer(logContext + " Trusting client certificate (!)"); } public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { if (logger.isLoggable(Level.FINER)) logger.finer(logContext + " Trusting server certificate"); } public X509Certificate[] getAcceptedIssuers() { return new X509Certificate[0]; } } /** * This class implements an X509TrustManager that hostname for validation. * * This validates the subject name in the certificate with the host name */ private final class HostNameOverrideX509TrustManager implements X509TrustManager { private final Logger logger; private final String logContext; private final X509TrustManager defaultTrustManager; private String hostName; HostNameOverrideX509TrustManager(TDSChannel tdsChannel, X509TrustManager tm, String hostName) { this.logger = tdsChannel.getLogger(); this.logContext = tdsChannel.toString() + " (HostNameOverrideX509TrustManager):"; defaultTrustManager = tm; // canonical name is in lower case so convert this to lowercase too. this.hostName = hostName.toLowerCase(Locale.ENGLISH); } // Parse name in RFC 2253 format // Returns the common name if successful, null if failed to find the common name. // The parser tuned to be safe than sorry so if it sees something it cant parse correctly it returns null private String parseCommonName(String distinguishedName) { int index; // canonical name converts entire name to lowercase index = distinguishedName.indexOf("cn="); if (index == -1) { return null; } distinguishedName = distinguishedName.substring(index + 3); // Parse until a comma or end is reached // Note the parser will handle gracefully (essentially will return empty string) , inside the quotes (e.g // cn="Foo, bar") however // RFC 952 says that the hostName cant have commas however the parser should not (and will not) crash if it // sees a , within quotes. for (index = 0; index < distinguishedName.length(); index++) { if (distinguishedName.charAt(index) == ',') { break; } } String commonName = distinguishedName.substring(0, index); // strip any quotes if (commonName.length() > 1 && ('\"' == commonName.charAt(0))) { if ('\"' == commonName.charAt(commonName.length() - 1)) commonName = commonName.substring(1, commonName.length() - 1); else { // Be safe the name is not ended in " return null so the common Name wont match commonName = null; } } return commonName; } private boolean validateServerName(String nameInCert) { // Failed to get the common name from DN or empty CN if (null == nameInCert) { if (logger.isLoggable(Level.FINER)) { logger.finer(logContext + " Failed to parse the name from the certificate or name is empty."); } return false; } // We do not allow wildcards in IDNs (xn--). if (!nameInCert.startsWith("xn--") && nameInCert.contains("*")) { int hostIndex = 0, certIndex = 0, match = 0, startIndex = -1, periodCount = 0; while (hostIndex < hostName.length()) { if ('.' == hostName.charAt(hostIndex)) { periodCount++; } if (certIndex < nameInCert.length() && hostName.charAt(hostIndex) == nameInCert.charAt(certIndex)) { hostIndex++; certIndex++; } else if (certIndex < nameInCert.length() && '*' == nameInCert.charAt(certIndex)) { startIndex = certIndex; match = hostIndex; certIndex++; } else if (startIndex != -1 && 0 == periodCount) { certIndex = startIndex + 1; match++; hostIndex = match; } else { logFailMessage(nameInCert); return false; } } if (nameInCert.length() == certIndex && periodCount > 1) { logSuccessMessage(nameInCert); return true; } else { logFailMessage(nameInCert); return false; } } // Verify that the name in certificate matches exactly with the host name if (!nameInCert.equals(hostName)) { logFailMessage(nameInCert); return false; } logSuccessMessage(nameInCert); return true; } private void logFailMessage(String nameInCert) { if (logger.isLoggable(Level.FINER)) { logger.finer(logContext + " The name in certificate " + nameInCert + " does not match with the server name " + hostName + "."); } } private void logSuccessMessage(String nameInCert) { if (logger.isLoggable(Level.FINER)) { logger.finer(logContext + " The name in certificate:" + nameInCert + " validated against server name " + hostName + "."); } } public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Forwarding ClientTrusted."); defaultTrustManager.checkClientTrusted(chain, authType); // Explicitly validate the expiry dates for (X509Certificate cert : chain) { cert.checkValidity(); } } public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Forwarding Trusting server certificate"); defaultTrustManager.checkServerTrusted(chain, authType); // Explicitly validate the expiry dates for (X509Certificate cert : chain) { cert.checkValidity(); } if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " default serverTrusted succeeded proceeding with server name validation"); validateServerNameInCertificate(chain[0]); } private void validateServerNameInCertificate(X509Certificate cert) throws CertificateException { String nameInCertDN = cert.getSubjectX500Principal().getName("canonical"); if (logger.isLoggable(Level.FINER)) { logger.finer(logContext + " Validating the server name:" + hostName); logger.finer(logContext + " The DN name in certificate:" + nameInCertDN); } boolean isServerNameValidated; String dnsNameInSANCert = ""; // the name in cert is in RFC2253 format parse it to get the actual subject name String subjectCN = parseCommonName(nameInCertDN); isServerNameValidated = validateServerName(subjectCN); if (!isServerNameValidated) { Collection<List<?>> sanCollection = cert.getSubjectAlternativeNames(); if (sanCollection != null) { // find a subjectAlternateName entry corresponding to DNS Name for (List<?> sanEntry : sanCollection) { if (sanEntry != null && sanEntry.size() >= 2) { Object key = sanEntry.get(0); Object value = sanEntry.get(1); if (logger.isLoggable(Level.FINER)) { logger.finer(logContext + "Key: " + key + "; KeyClass:" + (key != null ? key.getClass() : null) + ";value: " + value + "; valueClass:" + (value != null ? value.getClass() : null)); } // From // "Note that the Collection returned may contain // more than one name of the same type." // So, more than one entry of dnsNameType can be present. // Java docs guarantee that the first entry in the list will be an integer. // 2 is the sequence no of a dnsName if ((key != null) && (key instanceof Integer) && ((Integer) key == 2)) { // As per RFC2459, the DNSName will be in the // "preferred name syntax" as specified by RFC // 1034 and the name can be in upper or lower case. // And no significance is attached to case. // Java docs guarantee that the second entry in the list // will be a string for dnsName if (value != null && value instanceof String) { dnsNameInSANCert = (String) value; // Use English locale to avoid Turkish i issues. // Note that, this conversion was not necessary for // cert.getSubjectX500Principal().getName("canonical"); // as the above API already does this by default as per documentation. dnsNameInSANCert = dnsNameInSANCert.toLowerCase(Locale.ENGLISH); isServerNameValidated = validateServerName(dnsNameInSANCert); if (isServerNameValidated) { if (logger.isLoggable(Level.FINER)) { logger.finer(logContext + " found a valid name in certificate: " + dnsNameInSANCert); } break; } } if (logger.isLoggable(Level.FINER)) { logger.finer(logContext + " the following name in certificate does not match the serverName: " + value); } } } else { if (logger.isLoggable(Level.FINER)) { logger.finer(logContext + " found an invalid san entry: " + sanEntry); } } } } } if (!isServerNameValidated) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_certNameFailed")); Object[] msgArgs = {hostName, dnsNameInSANCert}; throw new CertificateException(form.format(msgArgs)); } } public X509Certificate[] getAcceptedIssuers() { return defaultTrustManager.getAcceptedIssuers(); } } enum SSLHandhsakeState { SSL_HANDHSAKE_NOT_STARTED, SSL_HANDHSAKE_STARTED, SSL_HANDHSAKE_COMPLETE } /** * Enables SSL Handshake. * * @param host * Server Host Name for SSL Handshake * @param port * Server Port for SSL Handshake * @param clientCertificate * Client certificate path * @param clientKey * Private key file path * @param clientKeyPassword * Private key file's password * @throws SQLServerException */ void enableSSL(String host, int port, String clientCertificate, String clientKey, String clientKeyPassword) throws SQLServerException { // If enabling SSL fails, which it can for a number of reasons, the following items // are used in logging information to the TDS channel logger to help diagnose the problem. Provider tmfProvider = null; // TrustManagerFactory provider Provider sslContextProvider = null; // SSLContext provider Provider ksProvider = null; // KeyStore provider String tmfDefaultAlgorithm = null; // Default algorithm (typically X.509) used by the TrustManagerFactory SSLHandhsakeState handshakeState = SSLHandhsakeState.SSL_HANDHSAKE_NOT_STARTED; boolean isFips = false; String trustStoreType = null; String sslProtocol = null; // If anything in here fails, terminate the connection and throw an exception try { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Enabling SSL..."); String trustStoreFileName = con.activeConnectionProperties .getProperty(SQLServerDriverStringProperty.TRUST_STORE.toString()); String trustStorePassword = con.activeConnectionProperties .getProperty(SQLServerDriverStringProperty.TRUST_STORE_PASSWORD.toString()); String hostNameInCertificate = con.activeConnectionProperties .getProperty(SQLServerDriverStringProperty.HOSTNAME_IN_CERTIFICATE.toString()); trustStoreType = con.activeConnectionProperties .getProperty(SQLServerDriverStringProperty.TRUST_STORE_TYPE.toString()); if (StringUtils.isEmpty(trustStoreType)) { trustStoreType = SQLServerDriverStringProperty.TRUST_STORE_TYPE.getDefaultValue(); } isFips = Boolean.valueOf( con.activeConnectionProperties.getProperty(SQLServerDriverBooleanProperty.FIPS.toString())); sslProtocol = con.activeConnectionProperties .getProperty(SQLServerDriverStringProperty.SSL_PROTOCOL.toString()); if (isFips) { validateFips(trustStoreType, trustStoreFileName); } assert TDS.ENCRYPT_OFF == con.getRequestedEncryptionLevel() || // Login only SSL TDS.ENCRYPT_ON == con.getRequestedEncryptionLevel(); // Full SSL assert TDS.ENCRYPT_OFF == con.getNegotiatedEncryptionLevel() || // Login only SSL TDS.ENCRYPT_ON == con.getNegotiatedEncryptionLevel() || // Full SSL TDS.ENCRYPT_REQ == con.getNegotiatedEncryptionLevel(); // Full SSL // If encryption wasn't negotiated or trust server certificate is specified, // then we'll "validate" the server certificate using a naive TrustManager that trusts // everything it sees. TrustManager[] tm = null; if (TDS.ENCRYPT_OFF == con.getNegotiatedEncryptionLevel() || con.trustServerCertificate()) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " SSL handshake will trust any certificate"); tm = new TrustManager[] {new PermissiveX509TrustManager(this)}; } // Otherwise, we'll check if a specific TrustManager implementation has been requested and // if so instantiate it, optionally specifying a constructor argument to customize it. else if (con.getTrustManagerClass() != null) { Object[] msgArgs = {"trustManagerClass", "javax.net.ssl.TrustManager"}; tm = new TrustManager[] {Util.newInstance(TrustManager.class, con.getTrustManagerClass(), con.getTrustManagerConstructorArg(), msgArgs)}; } // Otherwise, we'll validate the certificate using a real TrustManager obtained // from the a security provider that is capable of validating X.509 certificates. else { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " SSL handshake will validate server certificate"); KeyStore ks = null; // If we are using the system default trustStore and trustStorePassword // then we can skip all of the KeyStore loading logic below. // The security provider's implementation takes care of everything for us. if (null == trustStoreFileName && null == trustStorePassword) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Using system default trust store and password"); } // Otherwise either the trustStore, trustStorePassword, or both was specified. // In that case, we need to load up a KeyStore ourselves. else { // First, obtain an interface to a KeyStore that can load trust material // stored in Java Key Store (JKS) format. if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Finding key store interface"); ks = KeyStore.getInstance(trustStoreType); ksProvider = ks.getProvider(); // Next, load up the trust store file from the specified location. // Note: This function returns a null InputStream if the trust store cannot // be loaded. This is by design. See the method comment and documentation // for KeyStore.load for details. InputStream is = loadTrustStore(trustStoreFileName); // Finally, load the KeyStore with the trust material (if any) from the // InputStream and close the stream. if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Loading key store"); try { ks.load(is, (null == trustStorePassword) ? null : trustStorePassword.toCharArray()); } finally { // We are also done with the trust store input stream. if (null != is) { try { is.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.fine(toString() + " Ignoring error closing trust material InputStream..."); } } } } // Either we now have a KeyStore populated with trust material or we are using the // default source of trust material (cacerts). Either way, we are now ready to // use a TrustManagerFactory to create a TrustManager that uses the trust material // to validate the server certificate. // Next step is to get a TrustManagerFactory that can produce TrustManagers // that understands X.509 certificates. TrustManagerFactory tmf = null; if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Locating X.509 trust manager factory"); tmfDefaultAlgorithm = TrustManagerFactory.getDefaultAlgorithm(); tmf = TrustManagerFactory.getInstance(tmfDefaultAlgorithm); tmfProvider = tmf.getProvider(); // Tell the TrustManagerFactory to give us TrustManagers that we can use to // validate the server certificate using the trust material in the KeyStore. if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Getting trust manager"); tmf.init(ks); tm = tmf.getTrustManagers(); // if the host name in cert provided use it or use the host name Only if it is not FIPS if (!isFips) { if (null != hostNameInCertificate) { tm = new TrustManager[] {new HostNameOverrideX509TrustManager(this, (X509TrustManager) tm[0], hostNameInCertificate)}; } else { tm = new TrustManager[] { new HostNameOverrideX509TrustManager(this, (X509TrustManager) tm[0], host)}; } } } // end if (!con.trustServerCertificate()) // Now, with a real or fake TrustManager in hand, get a context for creating a // SSL sockets through a SSL socket factory. We require at least TLS support. SSLContext sslContext = null; if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Getting TLS or better SSL context"); KeyManager[] km = (null != clientCertificate && clientCertificate.length() > 0) ? SQLServerCertificateUtils .getKeyManagerFromFile(clientCertificate, clientKey, clientKeyPassword) : null; sslContext = SSLContext.getInstance(sslProtocol); sslContextProvider = sslContext.getProvider(); if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Initializing SSL context"); sslContext.init(km, tm, null); // Got the SSL context. Now create an SSL socket over our own proxy socket // which we can toggle between TDS-encapsulated and raw communications. // Initially, the proxy is set to encapsulate the SSL handshake in TDS packets. proxySocket = new ProxySocket(this); if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Creating SSL socket"); // don't close proxy when SSL socket is closed sslSocket = (SSLSocket) sslContext.getSocketFactory().createSocket(proxySocket, host, port, false); // At long last, start the SSL handshake ... if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Starting SSL handshake"); // TLS 1.2 intermittent exception happens here. handshakeState = SSLHandhsakeState.SSL_HANDHSAKE_STARTED; sslSocket.startHandshake(); handshakeState = SSLHandhsakeState.SSL_HANDHSAKE_COMPLETE; // After SSL handshake is complete, rewire proxy socket to use raw TCP/IP streams ... if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Rewiring proxy streams after handshake"); proxySocket.setStreams(inputStream, outputStream); // ... and rewire TDSChannel to use SSL streams. if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Getting SSL InputStream"); inputStream = new ProxyInputStream(sslSocket.getInputStream()); if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Getting SSL OutputStream"); outputStream = sslSocket.getOutputStream(); // SSL is now enabled; switch over the channel socket channelSocket = sslSocket; // Check the TLS version String tlsProtocol = sslSocket.getSession().getProtocol(); if (SSLProtocol.TLS_V10.toString().equalsIgnoreCase(tlsProtocol) || SSLProtocol.TLS_V11.toString().equalsIgnoreCase(tlsProtocol)) { String warningMsg = tlsProtocol + " was negotiated. Please update server and client to use TLSv1.2 at minimum."; logger.warning(warningMsg); con.addWarning(warningMsg); } if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " SSL enabled"); } catch (Exception e) { // Log the original exception and its source at FINER level if (logger.isLoggable(Level.FINER)) logger.log(Level.FINER, e.getMessage(), e); // If enabling SSL fails, the following information may help diagnose the problem. // Do not use Level INFO or above which is sent to standard output/error streams. // This is because due to an intermittent TLS 1.2 connection issue, we will be retrying the connection and // do not want to print this message in console. if (logger.isLoggable(Level.FINER)) logger.log(Level.FINER, "java.security path: " + JAVA_SECURITY + "\n" + "Security providers: " + Arrays.asList(Security.getProviders()) + "\n" + ((null != sslContextProvider) ? ("SSLContext provider info: " + sslContextProvider.getInfo() + "\n" + "SSLContext provider services:\n" + sslContextProvider.getServices() + "\n") : "") + ((null != tmfProvider) ? ("TrustManagerFactory provider info: " + tmfProvider.getInfo() + "\n") : "") + ((null != tmfDefaultAlgorithm) ? ("TrustManagerFactory default algorithm: " + tmfDefaultAlgorithm + "\n") : "") + ((null != ksProvider) ? ("KeyStore provider info: " + ksProvider.getInfo() + "\n") : "") + "java.ext.dirs: " + System.getProperty("java.ext.dirs")); // Retrieve the localized error message if possible. String localizedMessage = e.getLocalizedMessage(); String errMsg = (localizedMessage != null) ? localizedMessage : e.getMessage(); /* * Retrieve the error message of the cause too because actual error message can be wrapped into a different * message when re-thrown from underlying InputStream. */ String causeErrMsg = null; Throwable cause = e.getCause(); if (cause != null) { String causeLocalizedMessage = cause.getLocalizedMessage(); causeErrMsg = (causeLocalizedMessage != null) ? causeLocalizedMessage : cause.getMessage(); } MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_sslFailed")); Object[] msgArgs = {errMsg}; /* * The error message may have a connection id appended to it. Extract the message only for comparison. This * client connection id is appended in method checkAndAppendClientConnId(). */ if (errMsg != null && errMsg.contains(SQLServerException.LOG_CLIENT_CONNECTION_ID_PREFIX)) { errMsg = errMsg.substring(0, errMsg.indexOf(SQLServerException.LOG_CLIENT_CONNECTION_ID_PREFIX)); } if (causeErrMsg != null && causeErrMsg.contains(SQLServerException.LOG_CLIENT_CONNECTION_ID_PREFIX)) { causeErrMsg = causeErrMsg.substring(0, causeErrMsg.indexOf(SQLServerException.LOG_CLIENT_CONNECTION_ID_PREFIX)); } // Isolate the TLS1.2 intermittent connection error. if (e instanceof IOException && (SSLHandhsakeState.SSL_HANDHSAKE_STARTED == handshakeState) && (SQLServerException.getErrString("R_truncatedServerResponse").equals(errMsg) || SQLServerException.getErrString("R_truncatedServerResponse").equals(causeErrMsg))) { con.terminate(SQLServerException.DRIVER_ERROR_INTERMITTENT_TLS_FAILED, form.format(msgArgs), e); } else { con.terminate(SQLServerException.DRIVER_ERROR_SSL_FAILED, form.format(msgArgs), e); } } } /** * Validate FIPS if fips set as true * * Valid FIPS settings: * <LI>Encrypt should be true * <LI>trustServerCertificate should be false * <LI>if certificate is not installed TrustStoreType should be present. * * @param trustStoreType * @param trustStoreFileName * @throws SQLServerException * @since 6.1.4 */ private void validateFips(final String trustStoreType, final String trustStoreFileName) throws SQLServerException { boolean isValid = false; boolean isEncryptOn; boolean isValidTrustStoreType; boolean isValidTrustStore; boolean isTrustServerCertificate; String strError = SQLServerException.getErrString("R_invalidFipsConfig"); isEncryptOn = (TDS.ENCRYPT_ON == con.getRequestedEncryptionLevel()); isValidTrustStoreType = !StringUtils.isEmpty(trustStoreType); isValidTrustStore = !StringUtils.isEmpty(trustStoreFileName); isTrustServerCertificate = con.trustServerCertificate(); if (isEncryptOn && !isTrustServerCertificate) { isValid = true; if (isValidTrustStore && !isValidTrustStoreType) { // In case of valid trust store we need to check TrustStoreType. isValid = false; if (logger.isLoggable(Level.FINER)) logger.finer(toString() + "TrustStoreType is required alongside with TrustStore."); } } if (!isValid) { throw new SQLServerException(strError, null, 0, null); } } private final static String SEPARATOR = System.getProperty("file.separator"); private final static String JAVA_HOME = System.getProperty("java.home"); private final static String JAVA_SECURITY = JAVA_HOME + SEPARATOR + "lib" + SEPARATOR + "security"; private final static String JSSECACERTS = JAVA_SECURITY + SEPARATOR + "jssecacerts"; private final static String CACERTS = JAVA_SECURITY + SEPARATOR + "cacerts"; /** * Loads the contents of a trust store into an InputStream. * * When a location to a trust store is specified, this method attempts to load that store. Otherwise, it looks for * and attempts to load the default trust store using essentially the same logic (outlined in the JSSE Reference * Guide) as the default X.509 TrustManagerFactory. * * @return an InputStream containing the contents of the loaded trust store * @return null if the trust store cannot be loaded. * * Note: It is by design that this function returns null when the trust store cannot be loaded rather than * throwing an exception. The reason is that KeyStore.load, which uses the returned InputStream, interprets * a null InputStream to mean that there are no trusted certificates, which mirrors the behavior of the * default (no trust store, no password specified) path. */ final InputStream loadTrustStore(String trustStoreFileName) { FileInputStream is = null; // First case: Trust store filename was specified if (null != trustStoreFileName) { try { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Opening specified trust store: " + trustStoreFileName); is = new FileInputStream(trustStoreFileName); } catch (FileNotFoundException e) { if (logger.isLoggable(Level.FINE)) logger.fine(toString() + " Trust store not found: " + e.getMessage()); // If the trustStoreFileName connection property is set, but the file is not found, // then treat it as if the file was empty so that the TrustManager reports // that no certificate is found. } } // Second case: Trust store filename derived from javax.net.ssl.trustStore system property else if (null != (trustStoreFileName = System.getProperty("javax.net.ssl.trustStore"))) { try { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Opening default trust store (from javax.net.ssl.trustStore): " + trustStoreFileName); is = new FileInputStream(trustStoreFileName); } catch (FileNotFoundException e) { if (logger.isLoggable(Level.FINE)) logger.fine(toString() + " Trust store not found: " + e.getMessage()); // If the javax.net.ssl.trustStore property is set, but the file is not found, // then treat it as if the file was empty so that the TrustManager reports // that no certificate is found. } } // Third case: No trust store specified and no system property set. Use jssecerts/cacerts. else { try { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Opening default trust store: " + JSSECACERTS); is = new FileInputStream(JSSECACERTS); } catch (FileNotFoundException e) { if (logger.isLoggable(Level.FINE)) logger.fine(toString() + " Trust store not found: " + e.getMessage()); } // No jssecerts. Try again with cacerts... if (null == is) { try { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Opening default trust store: " + CACERTS); is = new FileInputStream(CACERTS); } catch (FileNotFoundException e) { if (logger.isLoggable(Level.FINE)) logger.fine(toString() + " Trust store not found: " + e.getMessage()); // No jssecerts or cacerts. Treat it as if the trust store is empty so that // the TrustManager reports that no certificate is found. } } } return is; } /** * Attempts to poll the input stream to see if the network socket is still connected. * * @return */ final Boolean networkSocketStillConnected() { int origSoTimeout; synchronized (inputStream) { synchronized (outputStream) { if (logger.isLoggable(Level.FINEST)) { logger.finest(toString() + "(networkSocketStillConnected) Checking for socket disconnect."); } try { origSoTimeout = channelSocket.getSoTimeout(); } catch (SocketException e) { if (logger.isLoggable(Level.FINE)) { logger.fine(toString() + "(networkSocketStillConnected) channelSocket.getSoTimeout() failed. Unable to poll connection:" + e.getMessage()); } return false; } try { channelSocket.setSoTimeout(1); boolean pollResult = inputStream.poll(); channelSocket.setSoTimeout(origSoTimeout); if (logger.isLoggable(Level.FINEST)) { if (pollResult) { logger.finest(toString() + "(networkSocketStillConnected) Network still connected."); } else { logger.finest(toString() + "(networkSocketStillConnected) Network disconnected:"); } } return pollResult; } catch (SocketException se) { // Should never get here since the first one would have failed. if (logger.isLoggable(Level.FINE)) { logger.fine( toString() + "(networkSocketStillConnected) getSoTimeout failed:" + se.getMessage()); } return false; } } } } final int read(byte[] data, int offset, int length) throws SQLServerException { try { synchronized (inputStream) { con.idleNetworkTracker.markNetworkActivity(); return inputStream.read(data, offset, length); } } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.fine(toString() + " read failed:" + e.getMessage()); if (e instanceof SocketTimeoutException) { con.terminate(SQLServerException.ERROR_SOCKET_TIMEOUT, e.getMessage(), e); } else { con.terminate(SQLServerException.DRIVER_ERROR_IO_FAILED, e.getMessage(), e); } return 0; // Keep the compiler happy. } } final void write(byte[] data, int offset, int length) throws SQLServerException { try { synchronized (outputStream) { con.idleNetworkTracker.markNetworkActivity(); outputStream.write(data, offset, length); } } catch (IOException e) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " write failed:" + e.getMessage()); con.terminate(SQLServerException.DRIVER_ERROR_IO_FAILED, e.getMessage(), e); } } final void flush() throws SQLServerException { try { con.idleNetworkTracker.markNetworkActivity(); outputStream.flush(); } catch (IOException e) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " flush failed:" + e.getMessage()); con.terminate(SQLServerException.DRIVER_ERROR_IO_FAILED, e.getMessage(), e); } } final void close() { if (null != sslSocket) disableSSL(); if (null != inputStream) { if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Closing inputStream..."); try { inputStream.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.log(Level.FINE, this.toString() + ": Ignored error closing inputStream", e); } } if (null != outputStream) { if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Closing outputStream..."); try { outputStream.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.log(Level.FINE, this.toString() + ": Ignored error closing outputStream", e); } } if (null != tcpSocket) { if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + ": Closing TCP socket..."); try { tcpSocket.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.log(Level.FINE, this.toString() + ": Ignored error closing socket", e); } } } /** * Logs TDS packet data to the com.microsoft.sqlserver.jdbc.TDS.DATA logger * * @param data * the buffer containing the TDS packet payload data to log * @param nStartOffset * offset into the above buffer from where to start logging * @param nLength * length (in bytes) of payload * @param messageDetail * other loggable details about the payload */ void logPacket(byte data[], int nStartOffset, int nLength, String messageDetail) { assert 0 <= nLength && nLength <= data.length; assert 0 <= nStartOffset && nStartOffset <= data.length; final char hexChars[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; final char printableChars[] = {'.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', ' ', '!', '\"', ' '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_', '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'}; // Log message body lines have this form: // "XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX ................" // 012345678911111111112222222222333333333344444444445555555555666666 // 01234567890123456789012345678901234567890123456789012345 final char lineTemplate[] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'}; char logLine[] = new char[lineTemplate.length]; System.arraycopy(lineTemplate, 0, logLine, 0, lineTemplate.length); // Logging builds up a string buffer for the entire log trace // before writing it out. So use an initial size large enough // that the buffer doesn't have to resize itself. StringBuilder logMsg = new StringBuilder(messageDetail.length() + // Message detail 4 * nLength + // 2-digit hex + space + ASCII, per byte 4 * (1 + nLength / 16) + // 2 extra spaces + CR/LF, per line (16 bytes per line) 80); // Extra fluff: IP:Port, Connection #, SPID, ... // Format the headline like so: // /157.55.121.182:2983 Connection 1, SPID 53, Message info here ... // Note: the log formatter itself timestamps what we write so we don't have // to do it again here. logMsg.append(tcpSocket.getLocalAddress().toString()).append(":").append(tcpSocket.getLocalPort()) .append(" SPID:").append(spid).append(" ").append(messageDetail).append("\r\n"); // Fill in the body of the log message, line by line, 16 bytes per line. int nBytesLogged = 0; int nBytesThisLine; while (true) { // Fill up the line with as many bytes as we can (up to 16 bytes) for (nBytesThisLine = 0; nBytesThisLine < 16 && nBytesLogged < nLength; nBytesThisLine++, nBytesLogged++) { int nUnsignedByteVal = (data[nStartOffset + nBytesLogged] + 256) % 256; logLine[3 * nBytesThisLine] = hexChars[nUnsignedByteVal / 16]; logLine[3 * nBytesThisLine + 1] = hexChars[nUnsignedByteVal % 16]; logLine[50 + nBytesThisLine] = printableChars[nUnsignedByteVal]; } // Pad out the remainder with whitespace for (int nBytesJustified = nBytesThisLine; nBytesJustified < 16; nBytesJustified++) { logLine[3 * nBytesJustified] = ' '; logLine[3 * nBytesJustified + 1] = ' '; } logMsg.append(logLine, 0, 50 + nBytesThisLine); if (nBytesLogged == nLength) break; logMsg.append("\r\n"); } if (packetLogger.isLoggable(Level.FINEST)) { packetLogger.finest(logMsg.toString()); } } /** * Get the current socket SO_TIMEOUT value. * * @return the current socket timeout value * @throws IOException * thrown if the socket timeout cannot be read */ final int getNetworkTimeout() throws IOException { return tcpSocket.getSoTimeout(); } /** * Set the socket SO_TIMEOUT value. * * @param timeout * the socket timeout in milliseconds * @throws IOException * thrown if the socket timeout cannot be set */ final void setNetworkTimeout(int timeout) throws IOException { tcpSocket.setSoTimeout(timeout); } } /** * SocketFinder is used to find a server socket to which a connection can be made. This class abstracts the logic of * finding a socket from TDSChannel class. * * In the case when useParallel is set to true, this is achieved by trying to make parallel connections to multiple IP * addresses. This class is responsible for spawning multiple threads and keeping track of the search result and the * connected socket or exception to be thrown. * * In the case where multiSubnetFailover is false, we try our old logic of trying to connect to the first ip address * * Typical usage of this class is SocketFinder sf = new SocketFinder(traceId, conn); Socket = sf.getSocket(hostName, * port, timeout); */ final class SocketFinder { /** * Indicates the result of a search */ enum Result { UNKNOWN, // search is still in progress SUCCESS, // found a socket FAILURE// failed in finding a socket } // Thread pool - the values in the constructor are chosen based on the // explanation given in design_connection_director_multisubnet.doc private static final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>()); // When parallel connections are to be used, use minimum timeout slice of 1500 milliseconds. private static final int minTimeoutForParallelConnections = 1500; // lock used for synchronization while updating // data within a socketFinder object private final Object socketFinderlock = new Object(); // lock on which the parent thread would wait // after spawning threads. private final Object parentThreadLock = new Object(); // indicates whether the socketFinder has succeeded or failed // in finding a socket or is still trying to find a socket private volatile Result result = Result.UNKNOWN; // total no of socket connector threads // spawned by a socketFinder object private int noOfSpawnedThreads = 0; // no of threads that finished their socket connection // attempts and notified socketFinder about their result private int noOfThreadsThatNotified = 0; // If a valid connected socket is found, this value would be non-null, // else this would be null private volatile Socket selectedSocket = null; // This would be one of the exceptions returned by the // socketConnector threads private volatile IOException selectedException = null; // Logging variables private static final Logger logger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.SocketFinder"); private final String traceID; // maximum number of IP Addresses supported private static final int ipAddressLimit = 64; // necessary for raising exceptions so that the connection pool can be notified private final SQLServerConnection conn; /** * Constructs a new SocketFinder object with appropriate traceId * * @param callerTraceID * traceID of the caller * @param sqlServerConnection * the SQLServer connection */ SocketFinder(String callerTraceID, SQLServerConnection sqlServerConnection) { traceID = "SocketFinder(" + callerTraceID + ")"; conn = sqlServerConnection; } /** * Used to find a socket to which a connection can be made * * @param hostName * @param portNumber * @param timeoutInMilliSeconds * @return connected socket * @throws IOException */ Socket findSocket(String hostName, int portNumber, int timeoutInMilliSeconds, boolean useParallel, boolean useTnir, boolean isTnirFirstAttempt, int timeoutInMilliSecondsForFullTimeout) throws SQLServerException { assert timeoutInMilliSeconds != 0 : "The driver does not allow a time out of 0"; try { InetAddress[] inetAddrs = null; if (!useParallel) { // MSF is false. TNIR could be true or false. DBMirroring could be true or false. // For TNIR first attempt, we should do existing behavior including how host name is resolved. if (useTnir && isTnirFirstAttempt) { return getDefaultSocket(hostName, portNumber, SQLServerConnection.TnirFirstAttemptTimeoutMs); } else if (!useTnir) { return getDefaultSocket(hostName, portNumber, timeoutInMilliSeconds); } } // inetAddrs is only used if useParallel is true or TNIR is true. Skip resolving address if that's not the // case. if (useParallel || useTnir) { // Ignore TNIR if host resolves to more than 64 IPs. Make sure we are using original timeout for this. inetAddrs = InetAddress.getAllByName(hostName); if ((useTnir) && (inetAddrs.length > ipAddressLimit)) { useTnir = false; timeoutInMilliSeconds = timeoutInMilliSecondsForFullTimeout; } } // Code reaches here only if MSF = true or (TNIR = true and not TNIR first attempt) if (logger.isLoggable(Level.FINER)) { StringBuilder loggingString = new StringBuilder(this.toString()); loggingString.append(" Total no of InetAddresses: "); loggingString.append(inetAddrs.length); loggingString.append(". They are: "); for (InetAddress inetAddr : inetAddrs) { loggingString.append(inetAddr.toString()).append(";"); } logger.finer(loggingString.toString()); } if (inetAddrs.length > ipAddressLimit) { MessageFormat form = new MessageFormat( SQLServerException.getErrString("R_ipAddressLimitWithMultiSubnetFailover")); Object[] msgArgs = {Integer.toString(ipAddressLimit)}; String errorStr = form.format(msgArgs); // we do not want any retry to happen here. So, terminate the connection // as the config is unsupported. conn.terminate(SQLServerException.DRIVER_ERROR_UNSUPPORTED_CONFIG, errorStr); } if (inetAddrs.length == 1) { // Single address so do not start any threads return getConnectedSocket(inetAddrs[0], portNumber, timeoutInMilliSeconds); } timeoutInMilliSeconds = Math.max(timeoutInMilliSeconds, minTimeoutForParallelConnections); if (Util.isIBM()) { if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + "Using Java NIO with timeout:" + timeoutInMilliSeconds); } findSocketUsingJavaNIO(inetAddrs, portNumber, timeoutInMilliSeconds); } else { if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + "Using Threading with timeout:" + timeoutInMilliSeconds); } findSocketUsingThreading(inetAddrs, portNumber, timeoutInMilliSeconds); } // If the thread continued execution due to timeout, the result may not be known. // In that case, update the result to failure. Note that this case is possible // for both IPv4 and IPv6. // Using double-checked locking for performance reasons. if (result.equals(Result.UNKNOWN)) { synchronized (socketFinderlock) { if (result.equals(Result.UNKNOWN)) { result = Result.FAILURE; if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + " The parent thread updated the result to failure"); } } } } // After we reach this point, there is no need for synchronization any more. // Because, the result would be known(success/failure). // And no threads would update SocketFinder // as their function calls would now be no-ops. if (result.equals(Result.FAILURE)) { if (selectedException == null) { if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + " There is no selectedException. The wait calls timed out before any connect call returned or timed out."); } String message = SQLServerException.getErrString("R_connectionTimedOut"); selectedException = new IOException(message); } throw selectedException; } } catch (InterruptedException ex) { // re-interrupt the current thread, in order to restore the thread's interrupt status. Thread.currentThread().interrupt(); close(selectedSocket); SQLServerException.ConvertConnectExceptionToSQLServerException(hostName, portNumber, conn, ex); } catch (IOException ex) { close(selectedSocket); // The code below has been moved from connectHelper. // If we do not move it, the functions open(caller of findSocket) // and findSocket will have to // declare both IOException and SQLServerException in the throws clause // as we throw custom SQLServerExceptions(eg:IPAddressLimit, wrapping other exceptions // like interruptedException) in findSocket. // That would be a bit awkward, because connecthelper(the caller of open) // just wraps IOException into SQLServerException and throws SQLServerException. // Instead, it would be good to wrap all exceptions at one place - Right here, their origin. SQLServerException.ConvertConnectExceptionToSQLServerException(hostName, portNumber, conn, ex); } assert result.equals(Result.SUCCESS); assert selectedSocket != null : "Bug in code. Selected Socket cannot be null here."; return selectedSocket; } /** * This function uses java NIO to connect to all the addresses in inetAddrs with in a specified timeout. If it * succeeds in connecting, it closes all the other open sockets and updates the result to success. * * @param inetAddrs * the array of inetAddress to which connection should be made * @param portNumber * the port number at which connection should be made * @param timeoutInMilliSeconds * @throws IOException */ private void findSocketUsingJavaNIO(InetAddress[] inetAddrs, int portNumber, int timeoutInMilliSeconds) throws IOException { // The driver does not allow a time out of zero. // Also, the unit of time the user can specify in the driver is seconds. // So, even if the user specifies 1 second(least value), the least possible // value that can come here as timeoutInMilliSeconds is 500 milliseconds. assert timeoutInMilliSeconds != 0 : "The timeout cannot be zero"; assert inetAddrs.length != 0 : "Number of inetAddresses should not be zero in this function"; Selector selector = null; LinkedList<SocketChannel> socketChannels = new LinkedList<>(); SocketChannel selectedChannel = null; try { selector = Selector.open(); for (InetAddress inetAddr : inetAddrs) { SocketChannel sChannel = SocketChannel.open(); socketChannels.add(sChannel); // make the channel non-blocking sChannel.configureBlocking(false); // register the channel for connect event @SuppressWarnings("unused") int ops = SelectionKey.OP_CONNECT; SelectionKey key = sChannel.register(selector, ops); sChannel.connect(new InetSocketAddress(inetAddr, portNumber)); if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + " initiated connection to address: " + inetAddr + ", portNumber: " + portNumber); } long timerNow = System.currentTimeMillis(); long timerExpire = timerNow + timeoutInMilliSeconds; // Denotes the no of channels that still need to processed int noOfOutstandingChannels = inetAddrs.length; while (true) { long timeRemaining = timerExpire - timerNow; // if the timeout expired or a channel is selected or there are no more channels left to processes if ((timeRemaining <= 0) || (selectedChannel != null) || (noOfOutstandingChannels <= 0)) break; // denotes the no of channels that are ready to be processed. i.e. they are either connected // or encountered an exception while trying to connect int readyChannels = selector.select(timeRemaining); if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + " no of channels ready: " + readyChannels); // There are no real time guarantees on the time out of the select API used above. // This check is necessary // a) to guard against cases where the select returns faster than expected. // b) for cases where no channels could connect with in the time out if (readyChannels != 0) { Set<SelectionKey> selectedKeys = selector.selectedKeys(); Iterator<SelectionKey> keyIterator = selectedKeys.iterator(); while (keyIterator.hasNext()) { SelectionKey key = keyIterator.next(); SocketChannel ch = (SocketChannel) key.channel(); if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + " processing the channel :" + ch);// this traces the IP by // default boolean connected = false; try { connected = ch.finishConnect(); // ch.finishConnect should either return true or throw an exception // as we have subscribed for OP_CONNECT. assert connected : "finishConnect on channel:" + ch + " cannot be false"; selectedChannel = ch; if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + " selected the channel :" + selectedChannel); break; } catch (IOException ex) { if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + " the exception: " + ex.getClass() + " with message: " + ex.getMessage() + " occurred while processing the channel: " + ch); updateSelectedException(ex, this.toString()); // close the channel pro-actively so that we do not // rely to network resources ch.close(); } // unregister the key and remove from the selector's selectedKeys key.cancel(); keyIterator.remove(); noOfOutstandingChannels } } timerNow = System.currentTimeMillis(); } } catch (IOException ex) { // in case of an exception, close the selected channel. // All other channels will be closed in the finally block, // as they need to be closed irrespective of a success/failure close(selectedChannel); throw ex; } finally { // close the selector // As per java docs, on selector.close(), any uncancelled keys still // associated with this // selector are invalidated, their channels are deregistered, and any other // resources associated with this selector are released. // So, its not necessary to cancel each key again close(selector); // Close all channels except the selected one. // As we close channels pro-actively in the try block, // its possible that we close a channel twice. // Closing a channel second time is a no-op. // This code is should be in the finally block to guard against cases where // we pre-maturely exit try block due to an exception in selector or other places. for (SocketChannel s : socketChannels) { if (s != selectedChannel) { close(s); } } } // if a channel was selected, make the necessary updates if (selectedChannel != null) { // Note that this must be done after selector is closed. Otherwise, selectedChannel.configureBlocking(true); selectedSocket = selectedChannel.socket(); result = Result.SUCCESS; } } private SocketFactory socketFactory = null; private SocketFactory getSocketFactory() throws IOException { if (socketFactory == null) { String socketFactoryClass = conn.getSocketFactoryClass(); if (socketFactoryClass == null) { socketFactory = SocketFactory.getDefault(); } else { String socketFactoryConstructorArg = conn.getSocketFactoryConstructorArg(); try { Object[] msgArgs = {"socketFactoryClass", "javax.net.SocketFactory"}; socketFactory = Util.newInstance(SocketFactory.class, socketFactoryClass, socketFactoryConstructorArg, msgArgs); } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new IOException(e); } } } return socketFactory; } // This method contains the old logic of connecting to // a socket of one of the IPs corresponding to a given host name. // In the old code below, the logic around 0 timeout has been removed as // 0 timeout is not allowed. The code has been re-factored so that the logic // is common for hostName or InetAddress. private Socket getDefaultSocket(String hostName, int portNumber, int timeoutInMilliSeconds) throws IOException { // Open the socket, with or without a timeout, throwing an UnknownHostException // if there is a failure to resolve the host name to an InetSocketAddress. // Note that Socket(host, port) throws an UnknownHostException if the host name // cannot be resolved, but that InetSocketAddress(host, port) does not - it sets // the returned InetSocketAddress as unresolved. InetSocketAddress addr = new InetSocketAddress(hostName, portNumber); if (addr.isUnresolved()) { if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + "Failed to resolve host name: " + hostName + ". Using IP address from DNS cache."); } InetSocketAddress cacheEntry = SQLServerConnection.getDNSEntry(hostName); addr = (null != cacheEntry) ? cacheEntry : addr; } return getConnectedSocket(addr, timeoutInMilliSeconds); } private Socket getConnectedSocket(InetAddress inetAddr, int portNumber, int timeoutInMilliSeconds) throws IOException { InetSocketAddress addr = new InetSocketAddress(inetAddr, portNumber); return getConnectedSocket(addr, timeoutInMilliSeconds); } private Socket getConnectedSocket(InetSocketAddress addr, int timeoutInMilliSeconds) throws IOException { assert timeoutInMilliSeconds != 0 : "timeout cannot be zero"; if (addr.isUnresolved()) throw new java.net.UnknownHostException(); selectedSocket = getSocketFactory().createSocket(); if (!selectedSocket.isConnected()) { selectedSocket.connect(addr, timeoutInMilliSeconds); } return selectedSocket; } private void findSocketUsingThreading(InetAddress[] inetAddrs, int portNumber, int timeoutInMilliSeconds) throws IOException, InterruptedException { assert timeoutInMilliSeconds != 0 : "The timeout cannot be zero"; assert inetAddrs.length != 0 : "Number of inetAddresses should not be zero in this function"; LinkedList<Socket> sockets = new LinkedList<>(); LinkedList<SocketConnector> socketConnectors = new LinkedList<>(); try { // create a socket, inetSocketAddress and a corresponding socketConnector per inetAddress noOfSpawnedThreads = inetAddrs.length; for (InetAddress inetAddress : inetAddrs) { Socket s = getSocketFactory().createSocket(); sockets.add(s); InetSocketAddress inetSocketAddress = new InetSocketAddress(inetAddress, portNumber); SocketConnector socketConnector = new SocketConnector(s, inetSocketAddress, timeoutInMilliSeconds, this); socketConnectors.add(socketConnector); } // acquire parent lock and spawn all threads synchronized (parentThreadLock) { for (SocketConnector sc : socketConnectors) { threadPoolExecutor.execute(sc); } long timerNow = System.currentTimeMillis(); long timerExpire = timerNow + timeoutInMilliSeconds; // The below loop is to guard against the spurious wake up problem while (true) { long timeRemaining = timerExpire - timerNow; if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + " TimeRemaining:" + timeRemaining + "; Result:" + result + "; Max. open thread count: " + threadPoolExecutor.getLargestPoolSize() + "; Current open thread count:" + threadPoolExecutor.getActiveCount()); } // if there is no time left or if the result is determined, break. // Note that a dirty read of result is totally fine here. // Since this thread holds the parentThreadLock, even if we do a dirty // read here, the child thread, after updating the result, would not be // able to call notify on the parentThreadLock // (and thus finish execution) as it would be waiting on parentThreadLock // held by this thread(the parent thread). // So, this thread will wait again and then be notified by the childThread. // On the other hand, if we try to take socketFinderLock here to avoid // dirty read, we would introduce a dead lock due to the // reverse order of locking in updateResult method. if (timeRemaining <= 0 || (!result.equals(Result.UNKNOWN))) break; parentThreadLock.wait(timeRemaining); if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + " The parent thread wokeup."); } timerNow = System.currentTimeMillis(); } } } finally { // Close all sockets except the selected one. // As we close sockets pro-actively in the child threads, // its possible that we close a socket twice. // Closing a socket second time is a no-op. // If a child thread is waiting on the connect call on a socket s, // closing the socket s here ensures that an exception is thrown // in the child thread immediately. This mitigates the problem // of thread explosion by ensuring that unnecessary threads die // quickly without waiting for "min(timeOut, 21)" seconds for (Socket s : sockets) { if (s != selectedSocket) { close(s); } } } if (selectedSocket != null) { result = Result.SUCCESS; } } /** * search result */ Result getResult() { return result; } void close(Selector selector) { if (null != selector) { if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + ": Closing Selector"); try { selector.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.log(Level.FINE, this.toString() + ": Ignored the following error while closing Selector", e); } } } void close(Socket socket) { if (null != socket) { if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + ": Closing TCP socket:" + socket); try { socket.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.log(Level.FINE, this.toString() + ": Ignored the following error while closing socket", e); } } } void close(SocketChannel socketChannel) { if (null != socketChannel) { if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + ": Closing TCP socket channel:" + socketChannel); try { socketChannel.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.log(Level.FINE, this.toString() + "Ignored the following error while closing socketChannel", e); } } } /** * Used by socketConnector threads to notify the socketFinder of their connection attempt result(a connected socket * or exception). It updates the result, socket and exception variables of socketFinder object. This method notifies * the parent thread if a socket is found or if all the spawned threads have notified. It also closes a socket if it * is not selected for use by socketFinder. * * @param socket * the SocketConnector's socket * @param exception * Exception that occurred in socket connector thread * @param threadId * Id of the calling Thread for diagnosis */ void updateResult(Socket socket, IOException exception, String threadId) { if (result.equals(Result.UNKNOWN)) { if (logger.isLoggable(Level.FINER)) { logger.finer("The following child thread is waiting for socketFinderLock:" + threadId); } synchronized (socketFinderlock) { if (logger.isLoggable(Level.FINER)) { logger.finer("The following child thread acquired socketFinderLock:" + threadId); } if (result.equals(Result.UNKNOWN)) { // if the connection was successful and no socket has been // selected yet if (exception == null && selectedSocket == null) { selectedSocket = socket; result = Result.SUCCESS; if (logger.isLoggable(Level.FINER)) { logger.finer("The socket of the following thread has been chosen:" + threadId); } } // if an exception occurred if (exception != null) { updateSelectedException(exception, threadId); } } noOfThreadsThatNotified++; // if all threads notified, but the result is still unknown, // update the result to failure if ((noOfThreadsThatNotified >= noOfSpawnedThreads) && result.equals(Result.UNKNOWN)) { result = Result.FAILURE; } if (!result.equals(Result.UNKNOWN)) { // 1) Note that at any point of time, there is only one // thread(parent/child thread) competing for parentThreadLock. // 2) The only time where a child thread could be waiting on // parentThreadLock is before the wait call in the parentThread // 3) After the above happens, the parent thread waits to be // notified on parentThreadLock. After being notified, // it would be the ONLY thread competing for the lock. // for the following reasons // a) The parentThreadLock is taken while holding the socketFinderLock. // So, all child threads, except one, block on socketFinderLock // (not parentThreadLock) // b) After parentThreadLock is notified by a child thread, the result // would be known(Refer the double-checked locking done at the // start of this method). So, all child threads would exit // as no-ops and would never compete with parent thread // for acquiring parentThreadLock // 4) As the parent thread is the only thread that competes for the // parentThreadLock, it need not wait to acquire the lock once it wakes // up and gets scheduled. // This results in better performance as it would close unnecessary // sockets and thus help child threads die quickly. if (logger.isLoggable(Level.FINER)) { logger.finer("The following child thread is waiting for parentThreadLock:" + threadId); } synchronized (parentThreadLock) { if (logger.isLoggable(Level.FINER)) { logger.finer("The following child thread acquired parentThreadLock:" + threadId); } parentThreadLock.notifyAll(); } if (logger.isLoggable(Level.FINER)) { logger.finer( "The following child thread released parentThreadLock and notified the parent thread:" + threadId); } } } if (logger.isLoggable(Level.FINER)) { logger.finer("The following child thread released socketFinderLock:" + threadId); } } } /** * Updates the selectedException if * <p> * a) selectedException is null * <p> * b) ex is a non-socketTimeoutException and selectedException is a socketTimeoutException * <p> * If there are multiple exceptions, that are not related to socketTimeout the first non-socketTimeout exception is * picked. If all exceptions are related to socketTimeout, the first exception is picked. Note: This method is not * thread safe. The caller should ensure thread safety. * * @param ex * the IOException * @param traceId * the traceId of the thread */ public void updateSelectedException(IOException ex, String traceId) { boolean updatedException = false; if (selectedException == null || (!(ex instanceof SocketTimeoutException)) && (selectedException instanceof SocketTimeoutException)) { selectedException = ex; updatedException = true; } if (updatedException) { if (logger.isLoggable(Level.FINER)) { logger.finer("The selected exception is updated to the following: ExceptionType:" + ex.getClass() + "; ExceptionMessage:" + ex.getMessage() + "; by the following thread:" + traceId); } } } /** * Used fof tracing * * @return traceID string */ public String toString() { return traceID; } } /** * This is used to connect a socket in a separate thread */ final class SocketConnector implements Runnable { // socket on which connection attempt would be made private final Socket socket; // the socketFinder associated with this connector private final SocketFinder socketFinder; // inetSocketAddress to connect to private final InetSocketAddress inetSocketAddress; // timeout in milliseconds private final int timeoutInMilliseconds; // Logging variables private static final Logger logger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.SocketConnector"); private final String traceID; // Id of the thread. used for diagnosis private final String threadID; // a counter used to give unique IDs to each connector thread. // this will have the id of the thread that was last created. private static long lastThreadID = 0; /** * Constructs a new SocketConnector object with the associated socket and socketFinder */ SocketConnector(Socket socket, InetSocketAddress inetSocketAddress, int timeOutInMilliSeconds, SocketFinder socketFinder) { this.socket = socket; this.inetSocketAddress = inetSocketAddress; this.timeoutInMilliseconds = timeOutInMilliSeconds; this.socketFinder = socketFinder; this.threadID = Long.toString(nextThreadID()); this.traceID = "SocketConnector:" + this.threadID + "(" + socketFinder.toString() + ")"; } /** * If search for socket has not finished, this function tries to connect a socket(with a timeout) synchronously. It * further notifies the socketFinder the result of the connection attempt */ public void run() { IOException exception = null; // Note that we do not need socketFinder lock here // as we update nothing in socketFinder based on the condition. // So, its perfectly fine to make a dirty read. SocketFinder.Result result = socketFinder.getResult(); if (result.equals(SocketFinder.Result.UNKNOWN)) { try { if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + " connecting to InetSocketAddress:" + inetSocketAddress + " with timeout:" + timeoutInMilliseconds); } socket.connect(inetSocketAddress, timeoutInMilliseconds); } catch (IOException ex) { if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + " exception:" + ex.getClass() + " with message:" + ex.getMessage() + " occurred while connecting to InetSocketAddress:" + inetSocketAddress); } exception = ex; } socketFinder.updateResult(socket, exception, this.toString()); } } /** * Used for tracing * * @return traceID string */ public String toString() { return traceID; } /** * Generates the next unique thread id. */ private static synchronized long nextThreadID() { if (lastThreadID == Long.MAX_VALUE) { if (logger.isLoggable(Level.FINER)) logger.finer("Resetting the Id count"); lastThreadID = 1; } else { lastThreadID++; } return lastThreadID; } } /** * TDSWriter implements the client to server TDS data pipe. */ final class TDSWriter { private static Logger logger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.TDS.Writer"); private final String traceID; final public String toString() { return traceID; } private final TDSChannel tdsChannel; private final SQLServerConnection con; // Flag to indicate whether data written via writeXXX() calls // is loggable. Data is normally loggable. But sensitive // data, such as user credentials, should never be logged for // security reasons. private boolean dataIsLoggable = true; void setDataLoggable(boolean value) { dataIsLoggable = value; } SharedTimer getSharedTimer() throws SQLServerException { return con.getSharedTimer(); } private TDSCommand command = null; // TDS message type (Query, RPC, DTC, etc.) sent at the beginning // of every TDS message header. Value is set when starting a new // TDS message of the specified type. private byte tdsMessageType; private volatile int sendResetConnection = 0; // Size (in bytes) of the TDS packets to/from the server. // This size is normally fixed for the life of the connection, // but it can change once after the logon packet because packet // size negotiation happens at logon time. private int currentPacketSize = 0; // Size of the TDS packet header, which is: // byte type // byte status // short length // short SPID // byte packet // byte window private final static int TDS_PACKET_HEADER_SIZE = 8; private final static byte[] placeholderHeader = new byte[TDS_PACKET_HEADER_SIZE]; // Intermediate array used to convert typically "small" values such as fixed-length types // (byte, int, long, etc.) and Strings from their native form to bytes for sending to // the channel buffers. private byte valueBytes[] = new byte[256]; // Monotonically increasing packet number associated with the current message private int packetNum = 0; // Bytes for sending decimal/numeric data private final static int BYTES4 = 4; private final static int BYTES8 = 8; private final static int BYTES12 = 12; private final static int BYTES16 = 16; public final static int BIGDECIMAL_MAX_LENGTH = 0x11; // is set to true when EOM is sent for the current message. // Note that this variable will never be accessed from multiple threads // simultaneously and so it need not be volatile private boolean isEOMSent = false; boolean isEOMSent() { return isEOMSent; } // Packet data buffers private ByteBuffer stagingBuffer; private ByteBuffer socketBuffer; private ByteBuffer logBuffer; // Intermediate arrays // It is assumed, startMessage is called before use, to alloc arrays private char[] streamCharBuffer; private byte[] streamByteBuffer; private CryptoMetadata cryptoMeta = null; TDSWriter(TDSChannel tdsChannel, SQLServerConnection con) { this.tdsChannel = tdsChannel; this.con = con; traceID = "TDSWriter@" + Integer.toHexString(hashCode()) + " (" + con.toString() + ")"; } /** * Checks If tdsMessageType is RPC or QUERY * * @return boolean */ boolean checkIfTdsMessageTypeIsBatchOrRPC() { return tdsMessageType == TDS.PKT_QUERY || tdsMessageType == TDS.PKT_RPC; } // TDS message start/end operations void preparePacket() throws SQLServerException { if (tdsChannel.isLoggingPackets()) { Arrays.fill(logBuffer.array(), (byte) 0xFE); ((Buffer) logBuffer).clear(); } // Write a placeholder packet header. This will be replaced // with the real packet header when the packet is flushed. writeBytes(placeholderHeader); } /** * Start a new TDS message. */ void writeMessageHeader() throws SQLServerException { // TDS 7.2 & later: // Include ALL_Headers/MARS header in message's first packet // Note: The PKT_BULK message does not nees this ALL_HEADERS if ((TDS.PKT_QUERY == tdsMessageType || TDS.PKT_DTC == tdsMessageType || TDS.PKT_RPC == tdsMessageType)) { boolean includeTraceHeader = false; int totalHeaderLength = TDS.MESSAGE_HEADER_LENGTH; if (TDS.PKT_QUERY == tdsMessageType || TDS.PKT_RPC == tdsMessageType) { if (con.isDenaliOrLater() && Util.isActivityTraceOn() && !ActivityCorrelator.getCurrent().isSentToServer()) { includeTraceHeader = true; totalHeaderLength += TDS.TRACE_HEADER_LENGTH; } } writeInt(totalHeaderLength); // allHeaders.TotalLength (DWORD) writeInt(TDS.MARS_HEADER_LENGTH); // MARS header length (DWORD) writeShort((short) 2); // allHeaders.HeaderType(MARS header) (USHORT) writeBytes(con.getTransactionDescriptor()); writeInt(1); // marsHeader.OutstandingRequestCount if (includeTraceHeader) { writeInt(TDS.TRACE_HEADER_LENGTH); // trace header length (DWORD) writeTraceHeaderData(); ActivityCorrelator.setCurrentActivityIdSentFlag(); // set the flag to indicate this ActivityId is sent } } } void writeTraceHeaderData() throws SQLServerException { ActivityId activityId = ActivityCorrelator.getCurrent(); final byte[] actIdByteArray = Util.asGuidByteArray(activityId.getId()); long seqNum = activityId.getSequence(); writeShort(TDS.HEADERTYPE_TRACE); // trace header type writeBytes(actIdByteArray, 0, actIdByteArray.length); // guid part of ActivityId writeInt((int) seqNum); // sequence number of ActivityId if (logger.isLoggable(Level.FINER)) logger.finer("Send Trace Header - ActivityID: " + activityId.toString()); } /** * Convenience method to prepare the TDS channel for writing and start a new TDS message. * * @param command * The TDS command * @param tdsMessageType * The TDS message type (PKT_QUERY, PKT_RPC, etc.) */ void startMessage(TDSCommand command, byte tdsMessageType) throws SQLServerException { this.command = command; this.tdsMessageType = tdsMessageType; this.packetNum = 0; this.isEOMSent = false; this.dataIsLoggable = true; // If the TDS packet size has changed since the last request // (which should really only happen after the login packet) // then allocate new buffers that are the correct size. int negotiatedPacketSize = con.getTDSPacketSize(); if (currentPacketSize != negotiatedPacketSize) { socketBuffer = ByteBuffer.allocate(negotiatedPacketSize).order(ByteOrder.LITTLE_ENDIAN); stagingBuffer = ByteBuffer.allocate(negotiatedPacketSize).order(ByteOrder.LITTLE_ENDIAN); logBuffer = ByteBuffer.allocate(negotiatedPacketSize).order(ByteOrder.LITTLE_ENDIAN); currentPacketSize = negotiatedPacketSize; streamCharBuffer = new char[2 * currentPacketSize]; streamByteBuffer = new byte[4 * currentPacketSize]; } ((Buffer) socketBuffer).position(((Buffer) socketBuffer).limit()); ((Buffer) stagingBuffer).clear(); preparePacket(); writeMessageHeader(); } final void endMessage() throws SQLServerException { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Finishing TDS message"); writePacket(TDS.STATUS_BIT_EOM); } // If a complete request has not been sent to the server, // the client MUST send the next packet with both ignore bit (0x02) and EOM bit (0x01) // set in the status to cancel the request. final boolean ignoreMessage() throws SQLServerException { if (packetNum > 0 || TDS.PKT_BULK == this.tdsMessageType) { assert !isEOMSent; if (logger.isLoggable(Level.FINER)) logger.finest(toString() + " Finishing TDS message by sending ignore bit and end of message"); writePacket(TDS.STATUS_BIT_EOM | TDS.STATUS_BIT_ATTENTION); return true; } return false; } final void resetPooledConnection() { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " resetPooledConnection"); sendResetConnection = TDS.STATUS_BIT_RESET_CONN; } // Primitive write operations void writeByte(byte value) throws SQLServerException { if (stagingBuffer.remaining() >= 1) { stagingBuffer.put(value); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.put(value); else ((Buffer) logBuffer).position(((Buffer) logBuffer).position() + 1); } } else { valueBytes[0] = value; writeWrappedBytes(valueBytes, 1); } } /** * writing sqlCollation information for sqlVariant type when sending character types. * * @param variantType * @throws SQLServerException */ void writeCollationForSqlVariant(SqlVariant variantType) throws SQLServerException { writeInt(variantType.getCollation().getCollationInfo()); writeByte((byte) (variantType.getCollation().getCollationSortID() & 0xFF)); } void writeChar(char value) throws SQLServerException { if (stagingBuffer.remaining() >= 2) { stagingBuffer.putChar(value); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.putChar(value); else ((Buffer) logBuffer).position(((Buffer) logBuffer).position() + 2); } } else { Util.writeShort((short) value, valueBytes, 0); writeWrappedBytes(valueBytes, 2); } } void writeShort(short value) throws SQLServerException { if (stagingBuffer.remaining() >= 2) { stagingBuffer.putShort(value); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.putShort(value); else ((Buffer) logBuffer).position(((Buffer) logBuffer).position() + 2); } } else { Util.writeShort(value, valueBytes, 0); writeWrappedBytes(valueBytes, 2); } } void writeInt(int value) throws SQLServerException { if (stagingBuffer.remaining() >= 4) { stagingBuffer.putInt(value); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.putInt(value); else ((Buffer) logBuffer).position(((Buffer) logBuffer).position() + 4); } } else { Util.writeInt(value, valueBytes, 0); writeWrappedBytes(valueBytes, 4); } } /** * Append a real value in the TDS stream. * * @param value * the data value */ void writeReal(float value) throws SQLServerException { writeInt(Float.floatToRawIntBits(value)); } /** * Append a double value in the TDS stream. * * @param value * the data value */ void writeDouble(double value) throws SQLServerException { if (stagingBuffer.remaining() >= 8) { stagingBuffer.putDouble(value); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.putDouble(value); else ((Buffer) logBuffer).position(((Buffer) logBuffer).position() + 8); } } else { long bits = Double.doubleToLongBits(value); long mask = 0xFF; int nShift = 0; for (int i = 0; i < 8; i++) { writeByte((byte) ((bits & mask) >> nShift)); nShift += 8; mask = mask << 8; } } } /** * Append a big decimal in the TDS stream. * * @param bigDecimalVal * the big decimal data value * @param srcJdbcType * the source JDBCType * @param precision * the precision of the data value * @param scale * the scale of the column * @throws SQLServerException */ void writeBigDecimal(BigDecimal bigDecimalVal, int srcJdbcType, int precision, int scale) throws SQLServerException { /* * Length including sign byte One 1-byte unsigned integer that represents the sign of the decimal value (0 => * Negative, 1 => positive) One 4-, 8-, 12-, or 16-byte signed integer that represents the decimal value * multiplied by 10^scale. */ /* * setScale of all BigDecimal value based on metadata as scale is not sent separately for individual value. Use * the rounding used in Server. Say, for BigDecimal("0.1"), if scale in metdadata is 0, then ArithmeticException * would be thrown if RoundingMode is not set */ bigDecimalVal = bigDecimalVal.setScale(scale, RoundingMode.HALF_UP); // data length + 1 byte for sign int bLength = BYTES16 + 1; writeByte((byte) (bLength)); // Byte array to hold all the data and padding bytes. byte[] bytes = new byte[bLength]; byte[] valueBytes = DDC.convertBigDecimalToBytes(bigDecimalVal, scale); // removing the precision and scale information from the valueBytes array System.arraycopy(valueBytes, 2, bytes, 0, valueBytes.length - 2); writeBytes(bytes); } /** * Append a money/smallmoney value in the TDS stream. * * @param moneyVal * the money data value. * @param srcJdbcType * the source JDBCType * @throws SQLServerException */ void writeMoney(BigDecimal moneyVal, int srcJdbcType) throws SQLServerException { moneyVal = moneyVal.setScale(4, RoundingMode.HALF_UP); int bLength; // Money types are 8 bytes, smallmoney are 4 bytes bLength = (srcJdbcType == microsoft.sql.Types.MONEY ? 8 : 4); writeByte((byte) (bLength)); byte[] valueBytes = DDC.convertMoneyToBytes(moneyVal, bLength); writeBytes(valueBytes); } /** * Append a big decimal inside sql_variant in the TDS stream. * * @param bigDecimalVal * the big decimal data value * @param srcJdbcType * the source JDBCType */ void writeSqlVariantInternalBigDecimal(BigDecimal bigDecimalVal, int srcJdbcType) throws SQLServerException { /* * Length including sign byte One 1-byte unsigned integer that represents the sign of the decimal value (0 => * Negative, 1 => positive) One 16-byte signed integer that represents the decimal value multiplied by 10^scale. * In sql_variant, we send the bigdecimal with precision 38, therefore we use 16 bytes for the maximum size of * this integer. */ boolean isNegative = (bigDecimalVal.signum() < 0); BigInteger bi = bigDecimalVal.unscaledValue(); if (isNegative) { bi = bi.negate(); } int bLength; bLength = BYTES16; writeByte((byte) (isNegative ? 0 : 1)); // Get the bytes of the BigInteger value. It is in reverse order, with // most significant byte in 0-th element. We need to reverse it first before sending over TDS. byte[] unscaledBytes = bi.toByteArray(); if (unscaledBytes.length > bLength) { // If precession of input is greater than maximum allowed (p><= 38) throw Exception MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {JDBCType.of(srcJdbcType)}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_LENGTH_MISMATCH, DriverError.NOT_SET, null); } // Byte array to hold all the reversed and padding bytes. byte[] bytes = new byte[bLength]; // We need to fill up the rest of the array with zeros, as unscaledBytes may have less bytes // than the required size for TDS. int remaining = bLength - unscaledBytes.length; // Reverse the bytes. int i, j; for (i = 0, j = unscaledBytes.length - 1; i < unscaledBytes.length;) bytes[i++] = unscaledBytes[j // Fill the rest of the array with zeros. for (; i < remaining; i++) { bytes[i] = (byte) 0x00; } writeBytes(bytes); } void writeSmalldatetime(String value) throws SQLServerException { GregorianCalendar calendar = initializeCalender(TimeZone.getDefault()); long utcMillis; // Value to which the calendar is to be set (in milliseconds 1/1/1970 00:00:00 GMT) java.sql.Timestamp timestampValue = java.sql.Timestamp.valueOf(value); utcMillis = timestampValue.getTime(); // Load the calendar with the desired value calendar.setTimeInMillis(utcMillis); // Number of days since the SQL Server Base Date (January 1, 1900) int daysSinceSQLBaseDate = DDC.daysSinceBaseDate(calendar.get(Calendar.YEAR), calendar.get(Calendar.DAY_OF_YEAR), TDS.BASE_YEAR_1900); // Next, figure out the number of milliseconds since midnight of the current day. int millisSinceMidnight = 1000 * calendar.get(Calendar.SECOND) + // Seconds into the current minute 60 * 1000 * calendar.get(Calendar.MINUTE) + // Minutes into the current hour 60 * 60 * 1000 * calendar.get(Calendar.HOUR_OF_DAY); // Hours into the current day // The last millisecond of the current day is always rounded to the first millisecond // of the next day because DATETIME is only accurate to 1/300th of a second. if (1000 * 60 * 60 * 24 - 1 <= millisSinceMidnight) { ++daysSinceSQLBaseDate; millisSinceMidnight = 0; } // Number of days since the SQL Server Base Date (January 1, 1900) writeShort((short) daysSinceSQLBaseDate); int secondsSinceMidnight = (millisSinceMidnight / 1000); int minutesSinceMidnight = (secondsSinceMidnight / 60); // Values that are 29.998 seconds or less are rounded down to the nearest minute minutesSinceMidnight = ((secondsSinceMidnight % 60) > 29.998) ? minutesSinceMidnight + 1 : minutesSinceMidnight; // Minutes since midnight writeShort((short) minutesSinceMidnight); } void writeDatetime(String value) throws SQLServerException { GregorianCalendar calendar = initializeCalender(TimeZone.getDefault()); long utcMillis; // Value to which the calendar is to be set (in milliseconds 1/1/1970 00:00:00 GMT) int subSecondNanos; java.sql.Timestamp timestampValue = java.sql.Timestamp.valueOf(value); utcMillis = timestampValue.getTime(); subSecondNanos = timestampValue.getNanos(); // Load the calendar with the desired value calendar.setTimeInMillis(utcMillis); // Number of days there have been since the SQL Base Date. // These are based on SQL Server algorithms int daysSinceSQLBaseDate = DDC.daysSinceBaseDate(calendar.get(Calendar.YEAR), calendar.get(Calendar.DAY_OF_YEAR), TDS.BASE_YEAR_1900); // Number of milliseconds since midnight of the current day. int millisSinceMidnight = (subSecondNanos + Nanos.PER_MILLISECOND / 2) / Nanos.PER_MILLISECOND + // Millis into // the current // second 1000 * calendar.get(Calendar.SECOND) + // Seconds into the current minute 60 * 1000 * calendar.get(Calendar.MINUTE) + // Minutes into the current hour 60 * 60 * 1000 * calendar.get(Calendar.HOUR_OF_DAY); // Hours into the current day // The last millisecond of the current day is always rounded to the first millisecond // of the next day because DATETIME is only accurate to 1/300th of a second. if (1000 * 60 * 60 * 24 - 1 <= millisSinceMidnight) { ++daysSinceSQLBaseDate; millisSinceMidnight = 0; } // Last-ditch verification that the value is in the valid range for the // DATETIMEN TDS data type (1/1/1753 to 12/31/9999). If it's not, then // throw an exception now so that statement execution is safely canceled. // Attempting to put an invalid value on the wire would result in a TDS // exception, which would close the connection. // These are based on SQL Server algorithms if (daysSinceSQLBaseDate < DDC.daysSinceBaseDate(1753, 1, TDS.BASE_YEAR_1900) || daysSinceSQLBaseDate >= DDC.daysSinceBaseDate(10000, 1, TDS.BASE_YEAR_1900)) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {SSType.DATETIME}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_DATETIME_FIELD_OVERFLOW, DriverError.NOT_SET, null); } // Number of days since the SQL Server Base Date (January 1, 1900) writeInt(daysSinceSQLBaseDate); // Milliseconds since midnight (at a resolution of three hundredths of a second) writeInt((3 * millisSinceMidnight + 5) / 10); } void writeDate(String value) throws SQLServerException { GregorianCalendar calendar = initializeCalender(TimeZone.getDefault()); long utcMillis; java.sql.Date dateValue = java.sql.Date.valueOf(value); utcMillis = dateValue.getTime(); // Load the calendar with the desired value calendar.setTimeInMillis(utcMillis); writeScaledTemporal(calendar, 0, // subsecond nanos (none for a date value) 0, // scale (dates are not scaled) SSType.DATE); } void writeTime(java.sql.Timestamp value, int scale) throws SQLServerException { GregorianCalendar calendar = initializeCalender(TimeZone.getDefault()); long utcMillis; // Value to which the calendar is to be set (in milliseconds 1/1/1970 00:00:00 GMT) int subSecondNanos; utcMillis = value.getTime(); subSecondNanos = value.getNanos(); // Load the calendar with the desired value calendar.setTimeInMillis(utcMillis); writeScaledTemporal(calendar, subSecondNanos, scale, SSType.TIME); } void writeDateTimeOffset(Object value, int scale, SSType destSSType) throws SQLServerException { GregorianCalendar calendar; TimeZone timeZone; // Time zone to associate with the value in the Gregorian calendar int subSecondNanos; int minutesOffset; /* * Out of all the supported temporal datatypes, DateTimeOffset is the only datatype that doesn't allow direct * casting from java.sql.timestamp (which was created from a String). DateTimeOffset was never required to be * constructed from a String, but with the introduction of extended bulk copy support for Azure DW, we now need * to support this scenario. Parse the DTO as string if it's coming from a CSV. */ if (value instanceof String) { // expected format: YYYY-MM-DD hh:mm:ss[.nnnnnnn] [{+|-}hh:mm] try { String stringValue = (String) value; int lastColon = stringValue.lastIndexOf(':'); String offsetString = stringValue.substring(lastColon - 3); /* * At this point, offsetString should look like +hh:mm or -hh:mm. Otherwise, the optional offset value * has not been provided. Parse accordingly. */ String timestampString; if (!offsetString.startsWith("+") && !offsetString.startsWith("-")) { minutesOffset = 0; timestampString = stringValue; } else { minutesOffset = 60 * Integer.valueOf(offsetString.substring(1, 3)) + Integer.valueOf(offsetString.substring(4, 6)); timestampString = stringValue.substring(0, lastColon - 4); if (offsetString.startsWith("-")) minutesOffset = -minutesOffset; } /* * If the target data type is DATETIMEOFFSET, then use UTC for the calendar that will hold the value, * since writeRPCDateTimeOffset expects a UTC calendar. Otherwise, when converting from DATETIMEOFFSET * to other temporal data types, use a local time zone determined by the minutes offset of the value, * since the writers for those types expect local calendars. */ timeZone = (SSType.DATETIMEOFFSET == destSSType) ? UTC.timeZone : new SimpleTimeZone(minutesOffset * 60 * 1000, ""); calendar = new GregorianCalendar(timeZone); int year = Integer.valueOf(timestampString.substring(0, 4)); int month = Integer.valueOf(timestampString.substring(5, 7)); int day = Integer.valueOf(timestampString.substring(8, 10)); int hour = Integer.valueOf(timestampString.substring(11, 13)); int minute = Integer.valueOf(timestampString.substring(14, 16)); int second = Integer.valueOf(timestampString.substring(17, 19)); subSecondNanos = (19 == timestampString.indexOf('.')) ? (new BigDecimal(timestampString.substring(19))) .scaleByPowerOfTen(9).intValue() : 0; calendar.setLenient(true); calendar.set(Calendar.YEAR, year); calendar.set(Calendar.MONTH, month - 1); calendar.set(Calendar.DAY_OF_MONTH, day); calendar.set(Calendar.HOUR_OF_DAY, hour); calendar.set(Calendar.MINUTE, minute); calendar.set(Calendar.SECOND, second); calendar.add(Calendar.MINUTE, -minutesOffset); } catch (NumberFormatException | IndexOutOfBoundsException e) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_ParsingDataError")); Object[] msgArgs = {value, JDBCType.DATETIMEOFFSET}; throw new SQLServerException(this, form.format(msgArgs), null, 0, false); } } else { long utcMillis; // Value to which the calendar is to be set (in milliseconds 1/1/1970 00:00:00 GMT) microsoft.sql.DateTimeOffset dtoValue = (microsoft.sql.DateTimeOffset) value; utcMillis = dtoValue.getTimestamp().getTime(); subSecondNanos = dtoValue.getTimestamp().getNanos(); minutesOffset = dtoValue.getMinutesOffset(); /* * If the target data type is DATETIMEOFFSET, then use UTC for the calendar that will hold the value, since * writeRPCDateTimeOffset expects a UTC calendar. Otherwise, when converting from DATETIMEOFFSET to other * temporal data types, use a local time zone determined by the minutes offset of the value, since the * writers for those types expect local calendars. */ timeZone = (SSType.DATETIMEOFFSET == destSSType) ? UTC.timeZone : new SimpleTimeZone(minutesOffset * 60 * 1000, ""); calendar = new GregorianCalendar(timeZone, Locale.US); calendar.setLenient(true); calendar.clear(); calendar.setTimeInMillis(utcMillis); } writeScaledTemporal(calendar, subSecondNanos, scale, SSType.DATETIMEOFFSET); writeShort((short) minutesOffset); } void writeOffsetDateTimeWithTimezone(OffsetDateTime offsetDateTimeValue, int scale) throws SQLServerException { GregorianCalendar calendar; TimeZone timeZone; long utcMillis; int subSecondNanos; int minutesOffset = 0; try { // offsetTimeValue.getOffset() returns a ZoneOffset object which has only hours and minutes // components. So the result of the division will be an integer always. SQL Server also supports // offsets in minutes precision. minutesOffset = offsetDateTimeValue.getOffset().getTotalSeconds() / 60; } catch (Exception e) { throw new SQLServerException(SQLServerException.getErrString("R_zoneOffsetError"), null, // SQLState is null // as this error is // generated in // the driver 0, // Use 0 instead of DriverError.NOT_SET to use the correct constructor e); } subSecondNanos = offsetDateTimeValue.getNano(); // writeScaledTemporal() expects subSecondNanos in 9 digits precssion // but getNano() used in OffsetDateTime returns precession based on nanoseconds read from csv // padding zeros to match the expectation of writeScaledTemporal() int padding = 9 - String.valueOf(subSecondNanos).length(); while (padding > 0) { subSecondNanos = subSecondNanos * 10; padding } // For TIME_WITH_TIMEZONE, use UTC for the calendar that will hold the value timeZone = UTC.timeZone; // The behavior is similar to microsoft.sql.DateTimeOffset // In Timestamp format, only YEAR needs to have 4 digits. The leading zeros for the rest of the fields can be // omitted. String offDateTimeStr = String.format("%04d", offsetDateTimeValue.getYear()) + '-' + offsetDateTimeValue.getMonthValue() + '-' + offsetDateTimeValue.getDayOfMonth() + ' ' + offsetDateTimeValue.getHour() + ':' + offsetDateTimeValue.getMinute() + ':' + offsetDateTimeValue.getSecond(); utcMillis = Timestamp.valueOf(offDateTimeStr).getTime(); calendar = initializeCalender(timeZone); calendar.setTimeInMillis(utcMillis); // Local timezone value in minutes int minuteAdjustment = ((TimeZone.getDefault().getRawOffset()) / (60 * 1000)); // check if date is in day light savings and add daylight saving minutes if (TimeZone.getDefault().inDaylightTime(calendar.getTime())) minuteAdjustment += (TimeZone.getDefault().getDSTSavings()) / (60 * 1000); // If the local time is negative then positive minutesOffset must be subtracted from calender minuteAdjustment += (minuteAdjustment < 0) ? (minutesOffset * (-1)) : minutesOffset; calendar.add(Calendar.MINUTE, minuteAdjustment); writeScaledTemporal(calendar, subSecondNanos, scale, SSType.DATETIMEOFFSET); writeShort((short) minutesOffset); } void writeOffsetTimeWithTimezone(OffsetTime offsetTimeValue, int scale) throws SQLServerException { GregorianCalendar calendar; TimeZone timeZone; long utcMillis; int subSecondNanos; int minutesOffset = 0; try { // offsetTimeValue.getOffset() returns a ZoneOffset object which has only hours and minutes // components. So the result of the division will be an integer always. SQL Server also supports // offsets in minutes precision. minutesOffset = offsetTimeValue.getOffset().getTotalSeconds() / 60; } catch (Exception e) { throw new SQLServerException(SQLServerException.getErrString("R_zoneOffsetError"), null, // SQLState is null // as this error is // generated in // the driver 0, // Use 0 instead of DriverError.NOT_SET to use the correct constructor e); } subSecondNanos = offsetTimeValue.getNano(); // writeScaledTemporal() expects subSecondNanos in 9 digits precssion // but getNano() used in OffsetDateTime returns precession based on nanoseconds read from csv // padding zeros to match the expectation of writeScaledTemporal() int padding = 9 - String.valueOf(subSecondNanos).length(); while (padding > 0) { subSecondNanos = subSecondNanos * 10; padding } // For TIME_WITH_TIMEZONE, use UTC for the calendar that will hold the value timeZone = UTC.timeZone; // Using TDS.BASE_YEAR_1900, based on SQL server behavious // If date only contains a time part, the return value is 1900, the base year. // In Timestamp format, leading zeros for the fields can be omitted. String offsetTimeStr = TDS.BASE_YEAR_1900 + "-01-01" + ' ' + offsetTimeValue.getHour() + ':' + offsetTimeValue.getMinute() + ':' + offsetTimeValue.getSecond(); utcMillis = Timestamp.valueOf(offsetTimeStr).getTime(); calendar = initializeCalender(timeZone); calendar.setTimeInMillis(utcMillis); int minuteAdjustment = (TimeZone.getDefault().getRawOffset()) / (60 * 1000); // check if date is in day light savings and add daylight saving minutes to Local timezone(in minutes) if (TimeZone.getDefault().inDaylightTime(calendar.getTime())) minuteAdjustment += ((TimeZone.getDefault().getDSTSavings()) / (60 * 1000)); // If the local time is negative then positive minutesOffset must be subtracted from calender minuteAdjustment += (minuteAdjustment < 0) ? (minutesOffset * (-1)) : minutesOffset; calendar.add(Calendar.MINUTE, minuteAdjustment); writeScaledTemporal(calendar, subSecondNanos, scale, SSType.DATETIMEOFFSET); writeShort((short) minutesOffset); } void writeLong(long value) throws SQLServerException { if (stagingBuffer.remaining() >= 8) { stagingBuffer.putLong(value); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.putLong(value); else ((Buffer) logBuffer).position(((Buffer) logBuffer).position() + 8); } } else { Util.writeLong(value, valueBytes, 0); writeWrappedBytes(valueBytes, 8); } } void writeBytes(byte[] value) throws SQLServerException { writeBytes(value, 0, value.length); } void writeBytes(byte[] value, int offset, int length) throws SQLServerException { assert length <= value.length; int bytesWritten = 0; int bytesToWrite; if (logger.isLoggable(Level.FINEST)) { logger.finest(toString() + " Writing " + length + " bytes"); } while ((bytesToWrite = length - bytesWritten) > 0) { if (0 == stagingBuffer.remaining()) writePacket(TDS.STATUS_NORMAL); if (bytesToWrite > stagingBuffer.remaining()) bytesToWrite = stagingBuffer.remaining(); stagingBuffer.put(value, offset + bytesWritten, bytesToWrite); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.put(value, offset + bytesWritten, bytesToWrite); else ((Buffer) logBuffer).position(((Buffer) logBuffer).position() + bytesToWrite); } bytesWritten += bytesToWrite; } } void writeWrappedBytes(byte value[], int valueLength) throws SQLServerException { // This function should only be used to write a value that is longer than // what remains in the current staging buffer. However, the value must // be short enough to fit in an empty buffer. assert valueLength <= value.length; int remaining = stagingBuffer.remaining(); assert remaining < valueLength; assert valueLength <= stagingBuffer.capacity(); // Fill any remaining space in the staging buffer remaining = stagingBuffer.remaining(); if (remaining > 0) { stagingBuffer.put(value, 0, remaining); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.put(value, 0, remaining); else ((Buffer) logBuffer).position(((Buffer) logBuffer).position() + remaining); } } writePacket(TDS.STATUS_NORMAL); // After swapping, the staging buffer should once again be empty, so the // remainder of the value can be written to it. stagingBuffer.put(value, remaining, valueLength - remaining); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.put(value, remaining, valueLength - remaining); else ((Buffer) logBuffer).position(((Buffer) logBuffer).position() + remaining); } } void writeString(String value) throws SQLServerException { int charsCopied = 0; int length = value.length(); while (charsCopied < length) { long bytesToCopy = 2 * (length - charsCopied); if (bytesToCopy > valueBytes.length) bytesToCopy = valueBytes.length; int bytesCopied = 0; try { while (bytesCopied < bytesToCopy) { char ch = value.charAt(charsCopied++); valueBytes[bytesCopied++] = (byte) ((ch >> 0) & 0xFF); valueBytes[bytesCopied++] = (byte) ((ch >> 8) & 0xFF); } writeBytes(valueBytes, 0, bytesCopied); } catch (ArrayIndexOutOfBoundsException e) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_indexOutOfRange")); Object[] msgArgs = {bytesCopied}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET); } } } void writeStream(InputStream inputStream, long advertisedLength, boolean writeChunkSizes) throws SQLServerException { assert DataTypes.UNKNOWN_STREAM_LENGTH == advertisedLength || advertisedLength >= 0; long actualLength = 0; final byte[] streamByteBuffer = new byte[4 * currentPacketSize]; int bytesRead = 0; int bytesToWrite; do { // Read in next chunk for (bytesToWrite = 0; -1 != bytesRead && bytesToWrite < streamByteBuffer.length; bytesToWrite += bytesRead) { try { bytesRead = inputStream.read(streamByteBuffer, bytesToWrite, streamByteBuffer.length - bytesToWrite); } catch (IOException e) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorReadingStream")); Object[] msgArgs = {e.toString()}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET); } if (-1 == bytesRead) break; // Check for invalid bytesRead returned from InputStream.read if (bytesRead < 0 || bytesRead > streamByteBuffer.length - bytesToWrite) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorReadingStream")); Object[] msgArgs = {SQLServerException.getErrString("R_streamReadReturnedInvalidValue")}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET); } } // Write it out if (writeChunkSizes) writeInt(bytesToWrite); writeBytes(streamByteBuffer, 0, bytesToWrite); actualLength += bytesToWrite; } while (-1 != bytesRead || bytesToWrite > 0); // If we were given an input stream length that we had to match and // the actual stream length did not match then cancel the request. if (DataTypes.UNKNOWN_STREAM_LENGTH != advertisedLength && actualLength != advertisedLength) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_mismatchedStreamLength")); Object[] msgArgs = {advertisedLength, actualLength}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_LENGTH_MISMATCH, DriverError.NOT_SET); } } /* * Adding another function for writing non-unicode reader instead of re-factoring the writeReader() for performance * efficiency. As this method will only be used in bulk copy, it needs to be efficient. Note: Any changes in * algorithm/logic should propagate to both writeReader() and writeNonUnicodeReader(). */ void writeNonUnicodeReader(Reader reader, long advertisedLength, boolean isDestBinary) throws SQLServerException { assert DataTypes.UNKNOWN_STREAM_LENGTH == advertisedLength || advertisedLength >= 0; long actualLength = 0; int charsRead = 0; int charsToWrite; int bytesToWrite; String streamString; do { // Read in next chunk for (charsToWrite = 0; -1 != charsRead && charsToWrite < currentPacketSize; charsToWrite += charsRead) { try { charsRead = reader.read(streamCharBuffer, charsToWrite, currentPacketSize - charsToWrite); } catch (IOException e) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorReadingStream")); Object[] msgArgs = {e.toString()}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET); } if (-1 == charsRead) break; // Check for invalid bytesRead returned from Reader.read if (charsRead < 0 || charsRead > currentPacketSize - charsToWrite) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorReadingStream")); Object[] msgArgs = {SQLServerException.getErrString("R_streamReadReturnedInvalidValue")}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET); } } if (!isDestBinary) { // Write it out // This also writes the PLP_TERMINATOR token after all the data in the the stream are sent. // The Do-While loop goes on one more time as charsToWrite is greater than 0 for the last chunk, and // in this last round the only thing that is written is an int value of 0, which is the PLP Terminator // token(0x00000000). // collation from database is the collation used Charset charSet = con.getDatabaseCollation().getCharset(); if (null == charSet) { writeInt(charsToWrite); for (int charsCopied = 0; charsCopied < charsToWrite; ++charsCopied) { streamByteBuffer[charsCopied] = (byte) (streamCharBuffer[charsCopied] & 0xFF); } writeBytes(streamByteBuffer, 0, charsToWrite); } else { bytesToWrite = 0; byte[] charBytes; for (int charsCopied = 0; charsCopied < charsToWrite; ++charsCopied) { charBytes = new String(streamCharBuffer[charsCopied] + "").getBytes(charSet); System.arraycopy(charBytes, 0, streamByteBuffer, bytesToWrite, charBytes.length); bytesToWrite += charBytes.length; } writeInt(bytesToWrite); writeBytes(streamByteBuffer, 0, bytesToWrite); } } else { bytesToWrite = charsToWrite; if (0 != charsToWrite) bytesToWrite = charsToWrite / 2; streamString = new String(streamCharBuffer, 0, currentPacketSize); byte[] bytes = ParameterUtils.HexToBin(streamString.trim()); writeInt(bytesToWrite); writeBytes(bytes, 0, bytesToWrite); } actualLength += charsToWrite; } while (-1 != charsRead || charsToWrite > 0); // If we were given an input stream length that we had to match and // the actual stream length did not match then cancel the request. if (DataTypes.UNKNOWN_STREAM_LENGTH != advertisedLength && actualLength != advertisedLength) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_mismatchedStreamLength")); Object[] msgArgs = {advertisedLength, actualLength}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_LENGTH_MISMATCH, DriverError.NOT_SET); } } /* * Note: There is another method with same code logic for non unicode reader, writeNonUnicodeReader(), implemented * for performance efficiency. Any changes in algorithm/logic should propagate to both writeReader() and * writeNonUnicodeReader(). */ void writeReader(Reader reader, long advertisedLength, boolean writeChunkSizes) throws SQLServerException { assert DataTypes.UNKNOWN_STREAM_LENGTH == advertisedLength || advertisedLength >= 0; long actualLength = 0; int charsRead = 0; int charsToWrite; do { // Read in next chunk for (charsToWrite = 0; -1 != charsRead && charsToWrite < streamCharBuffer.length; charsToWrite += charsRead) { try { charsRead = reader.read(streamCharBuffer, charsToWrite, streamCharBuffer.length - charsToWrite); } catch (IOException e) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorReadingStream")); Object[] msgArgs = {e.toString()}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET); } if (-1 == charsRead) break; // Check for invalid bytesRead returned from Reader.read if (charsRead < 0 || charsRead > streamCharBuffer.length - charsToWrite) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorReadingStream")); Object[] msgArgs = {SQLServerException.getErrString("R_streamReadReturnedInvalidValue")}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET); } } // Write it out if (writeChunkSizes) writeInt(2 * charsToWrite); // Convert from Unicode characters to bytes // Note: The following inlined code is much faster than the equivalent // call to (new String(streamCharBuffer)).getBytes("UTF-16LE") because it // saves a conversion to String and use of Charset in that conversion. for (int charsCopied = 0; charsCopied < charsToWrite; ++charsCopied) { streamByteBuffer[2 * charsCopied] = (byte) ((streamCharBuffer[charsCopied] >> 0) & 0xFF); streamByteBuffer[2 * charsCopied + 1] = (byte) ((streamCharBuffer[charsCopied] >> 8) & 0xFF); } writeBytes(streamByteBuffer, 0, 2 * charsToWrite); actualLength += charsToWrite; } while (-1 != charsRead || charsToWrite > 0); // If we were given an input stream length that we had to match and // the actual stream length did not match then cancel the request. if (DataTypes.UNKNOWN_STREAM_LENGTH != advertisedLength && actualLength != advertisedLength) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_mismatchedStreamLength")); Object[] msgArgs = {advertisedLength, actualLength}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_LENGTH_MISMATCH, DriverError.NOT_SET); } } GregorianCalendar initializeCalender(TimeZone timeZone) { GregorianCalendar calendar; // Create the calendar that will hold the value. For DateTimeOffset values, the calendar's // time zone is UTC. For other values, the calendar's time zone is a local time zone. calendar = new GregorianCalendar(timeZone, Locale.US); // Set the calendar lenient to allow setting the DAY_OF_YEAR and MILLISECOND fields // to roll other fields to their correct values. calendar.setLenient(true); // Clear the calendar of any existing state. The state of a new Calendar object always // reflects the current date, time, DST offset, etc. calendar.clear(); return calendar; } final void error(String reason, SQLState sqlState, DriverError driverError) throws SQLServerException { assert null != command; command.interrupt(reason); throw new SQLServerException(reason, sqlState, driverError, null); } /** * Sends an attention signal to the server, if necessary, to tell it to stop processing the current command on this * connection. * * If no packets of the command's request have yet been sent to the server, then no attention signal needs to be * sent. The interrupt will be handled entirely by the driver. * * This method does not need synchronization as it does not manipulate interrupt state and writing is guaranteed to * occur only from one thread at a time. */ final boolean sendAttention() throws SQLServerException { // If any request packets were already written to the server then send an // attention signal to the server to tell it to ignore the request or // cancel its execution. if (packetNum > 0) { // Ideally, we would want to add the following assert here. // But to add that the variable isEOMSent would have to be made // volatile as this piece of code would be reached from multiple // threads. So, not doing it to avoid perf hit. Note that // isEOMSent would be updated in writePacket everytime an EOM is sent // assert isEOMSent; if (logger.isLoggable(Level.FINE)) logger.fine(this + ": sending attention..."); ++tdsChannel.numMsgsSent; startMessage(command, TDS.PKT_CANCEL_REQ); endMessage(); return true; } return false; } private void writePacket(int tdsMessageStatus) throws SQLServerException { final boolean atEOM = (TDS.STATUS_BIT_EOM == (TDS.STATUS_BIT_EOM & tdsMessageStatus)); final boolean isCancelled = ((TDS.PKT_CANCEL_REQ == tdsMessageType) || ((tdsMessageStatus & TDS.STATUS_BIT_ATTENTION) == TDS.STATUS_BIT_ATTENTION)); // Before writing each packet to the channel, check if an interrupt has occurred. if (null != command && (!isCancelled)) command.checkForInterrupt(); writePacketHeader(tdsMessageStatus | sendResetConnection); sendResetConnection = 0; flush(atEOM); // If this is the last packet then flush the remainder of the request // through the socket. The first flush() call ensured that data currently // waiting in the socket buffer was sent, flipped the buffers, and started // sending data from the staging buffer (flipped to be the new socket buffer). // This flush() call ensures that all remaining data in the socket buffer is sent. if (atEOM) { flush(atEOM); isEOMSent = true; ++tdsChannel.numMsgsSent; } // If we just sent the first login request packet and SSL encryption was enabled // for login only, then disable SSL now. if (TDS.PKT_LOGON70 == tdsMessageType && 1 == packetNum && TDS.ENCRYPT_OFF == con.getNegotiatedEncryptionLevel()) { tdsChannel.disableSSL(); } // Notify the currently associated command (if any) that we have written the last // of the response packets to the channel. if (null != command && (!isCancelled) && atEOM) command.onRequestComplete(); } private void writePacketHeader(int tdsMessageStatus) { int tdsMessageLength = ((Buffer) stagingBuffer).position(); ++packetNum; // Write the TDS packet header back at the start of the staging buffer stagingBuffer.put(TDS.PACKET_HEADER_MESSAGE_TYPE, tdsMessageType); stagingBuffer.put(TDS.PACKET_HEADER_MESSAGE_STATUS, (byte) tdsMessageStatus); stagingBuffer.put(TDS.PACKET_HEADER_MESSAGE_LENGTH, (byte) ((tdsMessageLength >> 8) & 0xFF)); // Note: message // length is 16 // bits, stagingBuffer.put(TDS.PACKET_HEADER_MESSAGE_LENGTH + 1, (byte) ((tdsMessageLength >> 0) & 0xFF)); // written BIG // ENDIAN stagingBuffer.put(TDS.PACKET_HEADER_SPID, (byte) ((tdsChannel.getSPID() >> 8) & 0xFF)); // Note: SPID is 16 // bits, stagingBuffer.put(TDS.PACKET_HEADER_SPID + 1, (byte) ((tdsChannel.getSPID() >> 0) & 0xFF)); // written BIG // ENDIAN stagingBuffer.put(TDS.PACKET_HEADER_SEQUENCE_NUM, (byte) (packetNum % 256)); stagingBuffer.put(TDS.PACKET_HEADER_WINDOW, (byte) 0); // Window (Reserved/Not used) // Write the header to the log buffer too if logging. if (tdsChannel.isLoggingPackets()) { logBuffer.put(TDS.PACKET_HEADER_MESSAGE_TYPE, tdsMessageType); logBuffer.put(TDS.PACKET_HEADER_MESSAGE_STATUS, (byte) tdsMessageStatus); logBuffer.put(TDS.PACKET_HEADER_MESSAGE_LENGTH, (byte) ((tdsMessageLength >> 8) & 0xFF)); // Note: message // length is 16 // bits, logBuffer.put(TDS.PACKET_HEADER_MESSAGE_LENGTH + 1, (byte) ((tdsMessageLength >> 0) & 0xFF)); // written BIG // ENDIAN logBuffer.put(TDS.PACKET_HEADER_SPID, (byte) ((tdsChannel.getSPID() >> 8) & 0xFF)); // Note: SPID is 16 // bits, logBuffer.put(TDS.PACKET_HEADER_SPID + 1, (byte) ((tdsChannel.getSPID() >> 0) & 0xFF)); // written BIG // ENDIAN logBuffer.put(TDS.PACKET_HEADER_SEQUENCE_NUM, (byte) (packetNum % 256)); logBuffer.put(TDS.PACKET_HEADER_WINDOW, (byte) 0); // Window (Reserved/Not used); } } void flush(boolean atEOM) throws SQLServerException { // First, flush any data left in the socket buffer. tdsChannel.write(socketBuffer.array(), ((Buffer) socketBuffer).position(), socketBuffer.remaining()); ((Buffer) socketBuffer).position(((Buffer) socketBuffer).limit()); // If there is data in the staging buffer that needs to be written // to the socket, the socket buffer is now empty, so swap buffers // and start writing data from the staging buffer. if (((Buffer) stagingBuffer).position() >= TDS_PACKET_HEADER_SIZE) { // Swap the packet buffers ... ByteBuffer swapBuffer = stagingBuffer; stagingBuffer = socketBuffer; socketBuffer = swapBuffer; // ... and prepare to send data from the from the new socket // buffer (the old staging buffer). // We need to use flip() rather than rewind() here so that // the socket buffer's limit is properly set for the last // packet, which may be shorter than the other packets. ((Buffer) socketBuffer).flip(); ((Buffer) stagingBuffer).clear(); // If we are logging TDS packets then log the packet we're about // to send over the wire now. if (tdsChannel.isLoggingPackets()) { tdsChannel.logPacket(logBuffer.array(), 0, ((Buffer) socketBuffer).limit(), this.toString() + " sending packet (" + ((Buffer) socketBuffer).limit() + " bytes)"); } // Prepare for the next packet if (!atEOM) preparePacket(); // Finally, start sending data from the new socket buffer. tdsChannel.write(socketBuffer.array(), ((Buffer) socketBuffer).position(), socketBuffer.remaining()); ((Buffer) socketBuffer).position(((Buffer) socketBuffer).limit()); } } // Composite write operations /** * Write out elements common to all RPC values. * * @param sName * the optional parameter name * @param bOut * boolean true if the value that follows is being registered as an output parameter * @param tdsType * TDS type of the value that follows */ void writeRPCNameValType(String sName, boolean bOut, TDSType tdsType) throws SQLServerException { int nNameLen = 0; if (null != sName) nNameLen = sName.length() + 1; // The @ prefix is required for the param writeByte((byte) nNameLen); // param name len if (nNameLen > 0) { writeChar('@'); writeString(sName); } if (null != cryptoMeta) writeByte((byte) (bOut ? 1 | TDS.AE_METADATA : 0 | TDS.AE_METADATA)); // status else writeByte((byte) (bOut ? 1 : 0)); // status writeByte(tdsType.byteValue()); // type } /** * Append a boolean value in RPC transmission format. * * @param sName * the optional parameter name * @param booleanValue * the data value * @param bOut * boolean true if the data value is being registered as an output parameter */ void writeRPCBit(String sName, Boolean booleanValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.BITN); writeByte((byte) 1); // max length of datatype if (null == booleanValue) { writeByte((byte) 0); // len of data bytes } else { writeByte((byte) 1); // length of datatype writeByte((byte) (booleanValue ? 1 : 0)); } } /** * Append a short value in RPC transmission format. * * @param sName * the optional parameter name * @param byteValue * the data value * @param bOut * boolean true if the data value is being registered as an output parameter */ void writeRPCByte(String sName, Byte byteValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.INTN); writeByte((byte) 1); // max length of datatype if (null == byteValue) { writeByte((byte) 0); // len of data bytes } else { writeByte((byte) 1); // length of datatype writeByte(byteValue); } } /** * Append a short value in RPC transmission format. * * @param sName * the optional parameter name * @param shortValue * the data value * @param bOut * boolean true if the data value is being registered as an output parameter */ void writeRPCShort(String sName, Short shortValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.INTN); writeByte((byte) 2); // max length of datatype if (null == shortValue) { writeByte((byte) 0); // len of data bytes } else { writeByte((byte) 2); // length of datatype writeShort(shortValue); } } /** * Append an int value in RPC transmission format. * * @param sName * the optional parameter name * @param intValue * the data value * @param bOut * boolean true if the data value is being registered as an output parameter */ void writeRPCInt(String sName, Integer intValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.INTN); writeByte((byte) 4); // max length of datatype if (null == intValue) { writeByte((byte) 0); // len of data bytes } else { writeByte((byte) 4); // length of datatype writeInt(intValue); } } /** * Append a long value in RPC transmission format. * * @param sName * the optional parameter name * @param longValue * the data value * @param bOut * boolean true if the data value is being registered as an output parameter */ void writeRPCLong(String sName, Long longValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.INTN); writeByte((byte) 8); // max length of datatype if (null == longValue) { writeByte((byte) 0); // len of data bytes } else { writeByte((byte) 8); // length of datatype writeLong(longValue); } } /** * Append a real value in RPC transmission format. * * @param sName * the optional parameter name * @param floatValue * the data value * @param bOut * boolean true if the data value is being registered as an output parameter */ void writeRPCReal(String sName, Float floatValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.FLOATN); // Data and length if (null == floatValue) { writeByte((byte) 4); // max length writeByte((byte) 0); // actual length (0 == null) } else { writeByte((byte) 4); // max length writeByte((byte) 4); // actual length writeInt(Float.floatToRawIntBits(floatValue)); } } void writeRPCSqlVariant(String sName, SqlVariant sqlVariantValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.SQL_VARIANT); // Data and length if (null == sqlVariantValue) { writeInt(0); // max length writeInt(0); // actual length } } /** * Append a double value in RPC transmission format. * * @param sName * the optional parameter name * @param doubleValue * the data value * @param bOut * boolean true if the data value is being registered as an output parameter */ void writeRPCDouble(String sName, Double doubleValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.FLOATN); int l = 8; writeByte((byte) l); // max length of datatype // Data and length if (null == doubleValue) { writeByte((byte) 0); // len of data bytes } else { writeByte((byte) l); // len of data bytes long bits = Double.doubleToLongBits(doubleValue); long mask = 0xFF; int nShift = 0; for (int i = 0; i < 8; i++) { writeByte((byte) ((bits & mask) >> nShift)); nShift += 8; mask = mask << 8; } } } /** * Append a big decimal in RPC transmission format. * * @param sName * the optional parameter name * @param bdValue * the data value * @param nScale * the desired scale * @param bOut * boolean true if the data value is being registered as an output parameter */ void writeRPCBigDecimal(String sName, BigDecimal bdValue, int nScale, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.DECIMALN); writeByte((byte) 0x11); // maximum length writeByte((byte) SQLServerConnection.maxDecimalPrecision); // precision byte[] valueBytes = DDC.convertBigDecimalToBytes(bdValue, nScale); writeBytes(valueBytes, 0, valueBytes.length); } /** * Appends a standard v*max header for RPC parameter transmission. * * @param headerLength * the total length of the PLP data block. * @param isNull * true if the value is NULL. * @param collation * The SQL collation associated with the value that follows the v*max header. Null for non-textual types. */ void writeVMaxHeader(long headerLength, boolean isNull, SQLCollation collation) throws SQLServerException { // Send v*max length indicator 0xFFFF. writeShort((short) 0xFFFF); // Send collation if requested. if (null != collation) collation.writeCollation(this); // Handle null here and return, we're done here if it's null. if (isNull) { // Null header for v*max types is 0xFFFFFFFFFFFFFFFF. writeLong(0xFFFFFFFFFFFFFFFFL); } else if (DataTypes.UNKNOWN_STREAM_LENGTH == headerLength) { // Append v*max length. // UNKNOWN_PLP_LEN is 0xFFFFFFFFFFFFFFFE writeLong(0xFFFFFFFFFFFFFFFEL); // NOTE: Don't send the first chunk length, this will be calculated by caller. } else { // For v*max types with known length, length is <totallength8><chunklength4> // We're sending same total length as chunk length (as we're sending 1 chunk). writeLong(headerLength); } } /** * Utility for internal writeRPCString calls */ void writeRPCStringUnicode(String sValue) throws SQLServerException { writeRPCStringUnicode(null, sValue, false, null); } /** * Writes a string value as Unicode for RPC * * @param sName * the optional parameter name * @param sValue * the data value * @param bOut * boolean true if the data value is being registered as an output parameter * @param collation * the collation of the data value */ void writeRPCStringUnicode(String sName, String sValue, boolean bOut, SQLCollation collation) throws SQLServerException { boolean bValueNull = (sValue == null); int nValueLen = bValueNull ? 0 : (2 * sValue.length()); // Textual RPC requires a collation. If none is provided, as is the case when // the SSType is non-textual, then use the database collation by default. if (null == collation) collation = con.getDatabaseCollation(); /* * Use PLP encoding if either OUT params were specified or if the user query exceeds * DataTypes.SHORT_VARTYPE_MAX_BYTES */ if (nValueLen > DataTypes.SHORT_VARTYPE_MAX_BYTES || bOut) { writeRPCNameValType(sName, bOut, TDSType.NVARCHAR); // Handle Yukon v*max type header here. writeVMaxHeader(nValueLen, // Length bValueNull, // Is null? collation); // Send the data. if (!bValueNull) { if (nValueLen > 0) { writeInt(nValueLen); writeString(sValue); } // Send the terminator PLP chunk. writeInt(0); } } else { // non-PLP type // Write maximum length of data writeRPCNameValType(sName, bOut, TDSType.NVARCHAR); writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); collation.writeCollation(this); // Data and length if (bValueNull) { writeShort((short) -1); // actual len } else { // Write actual length of data writeShort((short) nValueLen); // If length is zero, we're done. if (0 != nValueLen) writeString(sValue); // data } } } void writeTVP(TVP value) throws SQLServerException { if (!value.isNull()) { writeByte((byte) 0); // status } else { // Default TVP writeByte((byte) TDS.TVP_STATUS_DEFAULT); // default TVP } writeByte((byte) TDS.TDS_TVP); /* * TVP_TYPENAME = DbName OwningSchema TypeName */ // Database where TVP type resides if (null != value.getDbNameTVP()) { writeByte((byte) value.getDbNameTVP().length()); writeString(value.getDbNameTVP()); } else writeByte((byte) 0x00); // empty DB name // Schema where TVP type resides if (null != value.getOwningSchemaNameTVP()) { writeByte((byte) value.getOwningSchemaNameTVP().length()); writeString(value.getOwningSchemaNameTVP()); } else writeByte((byte) 0x00); // empty Schema name // TVP type name if (null != value.getTVPName()) { writeByte((byte) value.getTVPName().length()); writeString(value.getTVPName()); } else writeByte((byte) 0x00); // empty TVP name if (!value.isNull()) { writeTVPColumnMetaData(value); // optional OrderUnique metadata writeTvpOrderUnique(value); } else { writeShort((short) TDS.TVP_NULL_TOKEN); } // TVP_END_TOKEN writeByte((byte) 0x00); try { writeTVPRows(value); } catch (NumberFormatException e) { throw new SQLServerException(SQLServerException.getErrString("R_TVPInvalidColumnValue"), e); } catch (ClassCastException e) { throw new SQLServerException(SQLServerException.getErrString("R_TVPInvalidColumnValue"), e); } } void writeTVPRows(TVP value) throws SQLServerException { boolean tdsWritterCached = false; ByteBuffer cachedTVPHeaders = null; TDSCommand cachedCommand = null; boolean cachedRequestComplete = false; boolean cachedInterruptsEnabled = false; boolean cachedProcessedResponse = false; if (!value.isNull()) { // ResultSet and Server Cursor // is used, the tdsWriter of the calling preparedStatement is overwritten by the SQLServerResultSet#next() // method when fetching new rows. // Therefore, we need to send TVP data row by row before fetching new row. if (TVPType.ResultSet == value.tvpType) { if ((null != value.sourceResultSet) && (value.sourceResultSet instanceof SQLServerResultSet)) { SQLServerResultSet sourceResultSet = (SQLServerResultSet) value.sourceResultSet; SQLServerStatement src_stmt = (SQLServerStatement) sourceResultSet.getStatement(); int resultSetServerCursorId = sourceResultSet.getServerCursorId(); if (con.equals(src_stmt.getConnection()) && 0 != resultSetServerCursorId) { cachedTVPHeaders = ByteBuffer.allocate(stagingBuffer.capacity()).order(stagingBuffer.order()); cachedTVPHeaders.put(stagingBuffer.array(), 0, ((Buffer) stagingBuffer).position()); cachedCommand = this.command; cachedRequestComplete = command.getRequestComplete(); cachedInterruptsEnabled = command.getInterruptsEnabled(); cachedProcessedResponse = command.getProcessedResponse(); tdsWritterCached = true; if (sourceResultSet.isForwardOnly()) { sourceResultSet.setFetchSize(1); } } } } Map<Integer, SQLServerMetaData> columnMetadata = value.getColumnMetadata(); Iterator<Entry<Integer, SQLServerMetaData>> columnsIterator; while (value.next()) { // restore command and TDS header, which have been overwritten by value.next() if (tdsWritterCached) { command = cachedCommand; ((Buffer) stagingBuffer).clear(); ((Buffer) logBuffer).clear(); writeBytes(cachedTVPHeaders.array(), 0, ((Buffer) cachedTVPHeaders).position()); } Object[] rowData = value.getRowData(); // ROW writeByte((byte) TDS.TVP_ROW); columnsIterator = columnMetadata.entrySet().iterator(); int currentColumn = 0; while (columnsIterator.hasNext()) { Map.Entry<Integer, SQLServerMetaData> columnPair = columnsIterator.next(); // If useServerDefault is set, client MUST NOT emit TvpColumnData for the associated column if (columnPair.getValue().useServerDefault) { currentColumn++; continue; } JDBCType jdbcType = JDBCType.of(columnPair.getValue().javaSqlType); String currentColumnStringValue = null; Object currentObject = null; if (null != rowData) { // if rowData has value for the current column, retrieve it. If not, current column will stay // null. if (rowData.length > currentColumn) { currentObject = rowData[currentColumn]; if (null != currentObject) { currentColumnStringValue = String.valueOf(currentObject); } } } writeInternalTVPRowValues(jdbcType, currentColumnStringValue, currentObject, columnPair, false); currentColumn++; } // send this row, read its response (throw exception in case of errors) and reset command status if (tdsWritterCached) { // TVP_END_TOKEN writeByte((byte) 0x00); writePacket(TDS.STATUS_BIT_EOM); TDSReader tdsReader = tdsChannel.getReader(command); int tokenType = tdsReader.peekTokenType(); if (TDS.TDS_ERR == tokenType) { SQLServerError databaseError = new SQLServerError(); databaseError.setFromTDS(tdsReader); SQLServerException.makeFromDatabaseError(con, null, databaseError.getErrorMessage(), databaseError, false); } command.setInterruptsEnabled(true); command.setRequestComplete(false); } } } // reset command status which have been overwritten if (tdsWritterCached) { command.setRequestComplete(cachedRequestComplete); command.setInterruptsEnabled(cachedInterruptsEnabled); command.setProcessedResponse(cachedProcessedResponse); } else { // TVP_END_TOKEN writeByte((byte) 0x00); } } private void writeInternalTVPRowValues(JDBCType jdbcType, String currentColumnStringValue, Object currentObject, Map.Entry<Integer, SQLServerMetaData> columnPair, boolean isSqlVariant) throws SQLServerException { boolean isShortValue, isNull; int dataLength; switch (jdbcType) { case BIGINT: if (null == currentColumnStringValue) writeByte((byte) 0); else { if (isSqlVariant) { writeTVPSqlVariantHeader(10, TDSType.INT8.byteValue(), (byte) 0); } else { writeByte((byte) 8); } writeLong(Long.valueOf(currentColumnStringValue).longValue()); } break; case BIT: if (null == currentColumnStringValue) writeByte((byte) 0); else { if (isSqlVariant) writeTVPSqlVariantHeader(3, TDSType.BIT1.byteValue(), (byte) 0); else writeByte((byte) 1); writeByte((byte) (Boolean.valueOf(currentColumnStringValue).booleanValue() ? 1 : 0)); } break; case INTEGER: if (null == currentColumnStringValue) writeByte((byte) 0); else { if (!isSqlVariant) writeByte((byte) 4); else writeTVPSqlVariantHeader(6, TDSType.INT4.byteValue(), (byte) 0); writeInt(Integer.valueOf(currentColumnStringValue).intValue()); } break; case SMALLINT: case TINYINT: if (null == currentColumnStringValue) writeByte((byte) 0); else { if (isSqlVariant) { writeTVPSqlVariantHeader(6, TDSType.INT4.byteValue(), (byte) 0); writeInt(Integer.valueOf(currentColumnStringValue)); } else { writeByte((byte) 2); // length of datatype writeShort(Short.valueOf(currentColumnStringValue).shortValue()); } } break; case DECIMAL: case NUMERIC: if (null == currentColumnStringValue) writeByte((byte) 0); else { if (isSqlVariant) { writeTVPSqlVariantHeader(21, TDSType.DECIMALN.byteValue(), (byte) 2); writeByte((byte) 38); // scale (byte)variantType.getScale() writeByte((byte) 4); // scale (byte)variantType.getScale() } else { writeByte((byte) TDSWriter.BIGDECIMAL_MAX_LENGTH); // maximum length } BigDecimal bdValue = new BigDecimal(currentColumnStringValue); /* * setScale of all BigDecimal value based on metadata as scale is not sent separately for individual * value. Use the rounding used in Server. Say, for BigDecimal("0.1"), if scale in metdadata is 0, * then ArithmeticException would be thrown if RoundingMode is not set */ bdValue = bdValue.setScale(columnPair.getValue().scale, RoundingMode.HALF_UP); byte[] valueBytes = DDC.convertBigDecimalToBytes(bdValue, bdValue.scale()); // 1-byte for sign and 16-byte for integer byte[] byteValue = new byte[17]; // removing the precision and scale information from the valueBytes array System.arraycopy(valueBytes, 2, byteValue, 0, valueBytes.length - 2); writeBytes(byteValue); } break; case DOUBLE: if (null == currentColumnStringValue) writeByte((byte) 0); // len of data bytes else { if (isSqlVariant) { writeTVPSqlVariantHeader(10, TDSType.FLOAT8.byteValue(), (byte) 0); writeDouble(Double.valueOf(currentColumnStringValue)); break; } writeByte((byte) 8); // len of data bytes long bits = Double.doubleToLongBits(Double.valueOf(currentColumnStringValue).doubleValue()); long mask = 0xFF; int nShift = 0; for (int i = 0; i < 8; i++) { writeByte((byte) ((bits & mask) >> nShift)); nShift += 8; mask = mask << 8; } } break; case FLOAT: case REAL: if (null == currentColumnStringValue) writeByte((byte) 0); else { if (isSqlVariant) { writeTVPSqlVariantHeader(6, TDSType.FLOAT4.byteValue(), (byte) 0); writeInt(Float.floatToRawIntBits(Float.valueOf(currentColumnStringValue).floatValue())); } else { writeByte((byte) 4); writeInt(Float.floatToRawIntBits(Float.valueOf(currentColumnStringValue).floatValue())); } } break; case DATE: case TIME: case TIMESTAMP: case DATETIMEOFFSET: case DATETIME: case SMALLDATETIME: case TIMESTAMP_WITH_TIMEZONE: case TIME_WITH_TIMEZONE: case CHAR: case VARCHAR: case NCHAR: case NVARCHAR: case LONGVARCHAR: case LONGNVARCHAR: case SQLXML: isShortValue = (2L * columnPair.getValue().precision) <= DataTypes.SHORT_VARTYPE_MAX_BYTES; isNull = (null == currentColumnStringValue); dataLength = isNull ? 0 : currentColumnStringValue.length() * 2; if (!isShortValue) { // check null if (isNull) { // Null header for v*max types is 0xFFFFFFFFFFFFFFFF. writeLong(0xFFFFFFFFFFFFFFFFL); } else if (isSqlVariant) { // for now we send as bigger type, but is sendStringParameterAsUnicode is set to false we can't // send nvarchar // since we are writing as nvarchar we need to write as tdstype.bigvarchar value because if we // want to supprot varchar(8000) it becomes as nvarchar, 8000*2 therefore we should send as // longvarchar, // but we cannot send more than 8000 cause sql_variant datatype in sql server does not support // then throw exception if user is sending more than that if (dataLength > 2 * DataTypes.SHORT_VARTYPE_MAX_BYTES) { MessageFormat form = new MessageFormat( SQLServerException.getErrString("R_invalidStringValue")); throw new SQLServerException(null, form.format(new Object[] {}), null, 0, false); } int length = currentColumnStringValue.length(); writeTVPSqlVariantHeader(9 + length, TDSType.BIGVARCHAR.byteValue(), (byte) 0x07); SQLCollation col = con.getDatabaseCollation(); // write collation for sql variant writeInt(col.getCollationInfo()); writeByte((byte) col.getCollationSortID()); writeShort((short) (length)); writeBytes(currentColumnStringValue.getBytes()); break; } else if (DataTypes.UNKNOWN_STREAM_LENGTH == dataLength) // Append v*max length. // UNKNOWN_PLP_LEN is 0xFFFFFFFFFFFFFFFE writeLong(0xFFFFFFFFFFFFFFFEL); else // For v*max types with known length, length is <totallength8><chunklength4> writeLong(dataLength); if (!isNull) { if (dataLength > 0) { writeInt(dataLength); writeString(currentColumnStringValue); } // Send the terminator PLP chunk. writeInt(0); } } else { if (isNull) writeShort((short) -1); // actual len else { if (isSqlVariant) { // for now we send as bigger type, but is sendStringParameterAsUnicoe is set to false we // can't send nvarchar // check for this int length = currentColumnStringValue.length() * 2; writeTVPSqlVariantHeader(9 + length, TDSType.NVARCHAR.byteValue(), (byte) 7); SQLCollation col = con.getDatabaseCollation(); // write collation for sql variant writeInt(col.getCollationInfo()); writeByte((byte) col.getCollationSortID()); int stringLength = currentColumnStringValue.length(); byte[] typevarlen = new byte[2]; typevarlen[0] = (byte) (2 * stringLength & 0xFF); typevarlen[1] = (byte) ((2 * stringLength >> 8) & 0xFF); writeBytes(typevarlen); writeString(currentColumnStringValue); break; } else { writeShort((short) dataLength); writeString(currentColumnStringValue); } } } break; case BINARY: case VARBINARY: case LONGVARBINARY: // Handle conversions as done in other types. isShortValue = columnPair.getValue().precision <= DataTypes.SHORT_VARTYPE_MAX_BYTES; isNull = (null == currentObject); if (currentObject instanceof String) dataLength = ParameterUtils.HexToBin(currentObject.toString()).length; else dataLength = isNull ? 0 : ((byte[]) currentObject).length; if (!isShortValue) { // check null if (isNull) // Null header for v*max types is 0xFFFFFFFFFFFFFFFF. writeLong(0xFFFFFFFFFFFFFFFFL); else if (DataTypes.UNKNOWN_STREAM_LENGTH == dataLength) // Append v*max length. // UNKNOWN_PLP_LEN is 0xFFFFFFFFFFFFFFFE writeLong(0xFFFFFFFFFFFFFFFEL); else // For v*max types with known length, length is <totallength8><chunklength4> writeLong(dataLength); if (!isNull) { if (dataLength > 0) { writeInt(dataLength); if (currentObject instanceof String) writeBytes(ParameterUtils.HexToBin(currentObject.toString())); else writeBytes((byte[]) currentObject); } // Send the terminator PLP chunk. writeInt(0); } } else { if (isNull) writeShort((short) -1); // actual len else { writeShort((short) dataLength); if (currentObject instanceof String) writeBytes(ParameterUtils.HexToBin(currentObject.toString())); else writeBytes((byte[]) currentObject); } } break; case SQL_VARIANT: boolean isShiloh = (8 >= con.getServerMajorVersion()); if (isShiloh) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_SQLVariantSupport")); throw new SQLServerException(null, form.format(new Object[] {}), null, 0, false); } JDBCType internalJDBCType; JavaType javaType = JavaType.of(currentObject); internalJDBCType = javaType.getJDBCType(SSType.UNKNOWN, jdbcType); writeInternalTVPRowValues(internalJDBCType, currentColumnStringValue, currentObject, columnPair, true); break; default: assert false : "Unexpected JDBC type " + jdbcType.toString(); } } /** * writes Header for sql_variant for TVP * * @param length * @param tdsType * @param probBytes * @throws SQLServerException */ private void writeTVPSqlVariantHeader(int length, byte tdsType, byte probBytes) throws SQLServerException { writeInt(length); writeByte(tdsType); writeByte(probBytes); } void writeTVPColumnMetaData(TVP value) throws SQLServerException { boolean isShortValue; // TVP_COLMETADATA writeShort((short) value.getTVPColumnCount()); Map<Integer, SQLServerMetaData> columnMetadata = value.getColumnMetadata(); /* * TypeColumnMetaData = UserType Flags TYPE_INFO ColName ; */ for (Entry<Integer, SQLServerMetaData> pair : columnMetadata.entrySet()) { JDBCType jdbcType = JDBCType.of(pair.getValue().javaSqlType); boolean useServerDefault = pair.getValue().useServerDefault; // ULONG ; UserType of column // The value will be 0x0000 with the exceptions of TIMESTAMP (0x0050) and alias types (greater than 0x00FF). writeInt(0); /* * Flags = fNullable ; Column is nullable - %x01 fCaseSen -- Ignored ; usUpdateable -- Ignored ; fIdentity ; * Column is identity column - %x10 fComputed ; Column is computed - %x20 usReservedODBC -- Ignored ; * fFixedLenCLRType-- Ignored ; fDefault ; Column is default value - %x200 usReserved -- Ignored ; */ short flags = TDS.FLAG_NULLABLE; if (useServerDefault) { flags |= TDS.FLAG_TVP_DEFAULT_COLUMN; } writeShort(flags); // Type info switch (jdbcType) { case BIGINT: writeByte(TDSType.INTN.byteValue()); writeByte((byte) 8); // max length of datatype break; case BIT: writeByte(TDSType.BITN.byteValue()); writeByte((byte) 1); // max length of datatype break; case INTEGER: writeByte(TDSType.INTN.byteValue()); writeByte((byte) 4); // max length of datatype break; case SMALLINT: case TINYINT: writeByte(TDSType.INTN.byteValue()); writeByte((byte) 2); // max length of datatype break; case DECIMAL: case NUMERIC: writeByte(TDSType.NUMERICN.byteValue()); writeByte((byte) 0x11); // maximum length writeByte((byte) pair.getValue().precision); writeByte((byte) pair.getValue().scale); break; case DOUBLE: writeByte(TDSType.FLOATN.byteValue()); writeByte((byte) 8); // max length of datatype break; case FLOAT: case REAL: writeByte(TDSType.FLOATN.byteValue()); writeByte((byte) 4); // max length of datatype break; case DATE: case TIME: case TIMESTAMP: case DATETIMEOFFSET: case DATETIME: case SMALLDATETIME: case TIMESTAMP_WITH_TIMEZONE: case TIME_WITH_TIMEZONE: case CHAR: case VARCHAR: case NCHAR: case NVARCHAR: case LONGVARCHAR: case LONGNVARCHAR: case SQLXML: writeByte(TDSType.NVARCHAR.byteValue()); isShortValue = (2L * pair.getValue().precision) <= DataTypes.SHORT_VARTYPE_MAX_BYTES; // Use PLP encoding on Yukon and later with long values if (!isShortValue) // PLP { // Handle Yukon v*max type header here. writeShort((short) 0xFFFF); con.getDatabaseCollation().writeCollation(this); } else // non PLP { writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); con.getDatabaseCollation().writeCollation(this); } break; case BINARY: case VARBINARY: case LONGVARBINARY: writeByte(TDSType.BIGVARBINARY.byteValue()); isShortValue = pair.getValue().precision <= DataTypes.SHORT_VARTYPE_MAX_BYTES; // Use PLP encoding on Yukon and later with long values if (!isShortValue) // PLP // Handle Yukon v*max type header here. writeShort((short) 0xFFFF); else // non PLP writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); break; case SQL_VARIANT: writeByte(TDSType.SQL_VARIANT.byteValue()); writeInt(TDS.SQL_VARIANT_LENGTH);// write length of sql variant 8009 break; default: assert false : "Unexpected JDBC type " + jdbcType.toString(); } // Column name - must be null (from TDS - TVP_COLMETADATA) writeByte((byte) 0x00); // [TVP_ORDER_UNIQUE] // [TVP_COLUMN_ORDERING] } } void writeTvpOrderUnique(TVP value) throws SQLServerException { /* * TVP_ORDER_UNIQUE = TVP_ORDER_UNIQUE_TOKEN (Count <Count>(ColNum OrderUniqueFlags)) */ Map<Integer, SQLServerMetaData> columnMetadata = value.getColumnMetadata(); Iterator<Entry<Integer, SQLServerMetaData>> columnsIterator = columnMetadata.entrySet().iterator(); LinkedList<TdsOrderUnique> columnList = new LinkedList<>(); while (columnsIterator.hasNext()) { byte flags = 0; Map.Entry<Integer, SQLServerMetaData> pair = columnsIterator.next(); SQLServerMetaData metaData = pair.getValue(); if (SQLServerSortOrder.Ascending == metaData.sortOrder) flags = TDS.TVP_ORDERASC_FLAG; else if (SQLServerSortOrder.Descending == metaData.sortOrder) flags = TDS.TVP_ORDERDESC_FLAG; if (metaData.isUniqueKey) flags |= TDS.TVP_UNIQUE_FLAG; // Remember this column if any flags were set if (0 != flags) columnList.add(new TdsOrderUnique(pair.getKey(), flags)); } // Write flagged columns if (!columnList.isEmpty()) { writeByte((byte) TDS.TVP_ORDER_UNIQUE_TOKEN); writeShort((short) columnList.size()); for (TdsOrderUnique column : columnList) { writeShort((short) (column.columnOrdinal + 1)); writeByte(column.flags); } } } private class TdsOrderUnique { int columnOrdinal; byte flags; TdsOrderUnique(int ordinal, byte flags) { this.columnOrdinal = ordinal; this.flags = flags; } } void setCryptoMetaData(CryptoMetadata cryptoMetaForBulk) { this.cryptoMeta = cryptoMetaForBulk; } CryptoMetadata getCryptoMetaData() { return cryptoMeta; } void writeEncryptedRPCByteArray(byte bValue[]) throws SQLServerException { boolean bValueNull = (bValue == null); long nValueLen = bValueNull ? 0 : bValue.length; boolean isShortValue = (nValueLen <= DataTypes.SHORT_VARTYPE_MAX_BYTES); boolean isPLP = (!isShortValue) && (nValueLen <= DataTypes.MAX_VARTYPE_MAX_BYTES); // Handle Shiloh types here. if (isShortValue) { writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); } else if (isPLP) { writeShort((short) DataTypes.SQL_USHORTVARMAXLEN); } else { writeInt(DataTypes.IMAGE_TEXT_MAX_BYTES); } // Data and length if (bValueNull) { writeShort((short) -1); // actual len } else { if (isShortValue) { writeShort((short) nValueLen); // actual len } else if (isPLP) { writeLong(nValueLen); // actual length } else { writeInt((int) nValueLen); // actual len } // If length is zero, we're done. if (0 != nValueLen) { if (isPLP) { writeInt((int) nValueLen); } writeBytes(bValue); } if (isPLP) { writeInt(0); // PLP_TERMINATOR, 0x00000000 } } } void writeEncryptedRPCPLP() throws SQLServerException { writeShort((short) DataTypes.SQL_USHORTVARMAXLEN); writeLong((long) 0); // actual length writeInt(0); // PLP_TERMINATOR, 0x00000000 } void writeCryptoMetaData() throws SQLServerException { writeByte(cryptoMeta.cipherAlgorithmId); writeByte(cryptoMeta.encryptionType.getValue()); writeInt(cryptoMeta.cekTableEntry.getColumnEncryptionKeyValues().get(0).databaseId); writeInt(cryptoMeta.cekTableEntry.getColumnEncryptionKeyValues().get(0).cekId); writeInt(cryptoMeta.cekTableEntry.getColumnEncryptionKeyValues().get(0).cekVersion); writeBytes(cryptoMeta.cekTableEntry.getColumnEncryptionKeyValues().get(0).cekMdVersion); writeByte(cryptoMeta.normalizationRuleVersion); } void writeRPCByteArray(String sName, byte bValue[], boolean bOut, JDBCType jdbcType, SQLCollation collation) throws SQLServerException { boolean bValueNull = (bValue == null); int nValueLen = bValueNull ? 0 : bValue.length; boolean isShortValue = (nValueLen <= DataTypes.SHORT_VARTYPE_MAX_BYTES); // Use PLP encoding on Yukon and later with long values and OUT parameters boolean usePLP = (!isShortValue || bOut); TDSType tdsType; if (null != cryptoMeta) { // send encrypted data as BIGVARBINARY tdsType = (isShortValue || usePLP) ? TDSType.BIGVARBINARY : TDSType.IMAGE; collation = null; } else switch (jdbcType) { case CHAR: case VARCHAR: case LONGVARCHAR: case CLOB: tdsType = (isShortValue || usePLP) ? TDSType.BIGVARCHAR : TDSType.TEXT; if (null == collation) collation = con.getDatabaseCollation(); break; case NCHAR: case NVARCHAR: case LONGNVARCHAR: case NCLOB: tdsType = (isShortValue || usePLP) ? TDSType.NVARCHAR : TDSType.NTEXT; if (null == collation) collation = con.getDatabaseCollation(); break; case BINARY: case VARBINARY: case LONGVARBINARY: case BLOB: default: tdsType = (isShortValue || usePLP) ? TDSType.BIGVARBINARY : TDSType.IMAGE; collation = null; break; } writeRPCNameValType(sName, bOut, tdsType); if (usePLP) { // Handle Yukon v*max type header here. writeVMaxHeader(nValueLen, bValueNull, collation); // Send the data. if (!bValueNull) { if (nValueLen > 0) { writeInt(nValueLen); writeBytes(bValue); } // Send the terminator PLP chunk. writeInt(0); } } else // non-PLP type { // Handle Shiloh types here. if (isShortValue) { writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); } else { writeInt(DataTypes.IMAGE_TEXT_MAX_BYTES); } if (null != collation) collation.writeCollation(this); // Data and length if (bValueNull) { writeShort((short) -1); // actual len } else { if (isShortValue) writeShort((short) nValueLen); // actual len else writeInt(nValueLen); // actual len // If length is zero, we're done. if (0 != nValueLen) writeBytes(bValue); } } } /** * Append a timestamp in RPC transmission format as a SQL Server DATETIME data type * * @param sName * the optional parameter name * @param cal * Pure Gregorian calendar containing the timestamp, including its associated time zone * @param subSecondNanos * the sub-second nanoseconds (0 - 999,999,999) * @param bOut * boolean true if the data value is being registered as an output parameter * */ void writeRPCDateTime(String sName, GregorianCalendar cal, int subSecondNanos, boolean bOut) throws SQLServerException { assert (subSecondNanos >= 0) && (subSecondNanos < Nanos.PER_SECOND) : "Invalid subNanoSeconds value: " + subSecondNanos; assert (cal != null) || (subSecondNanos == 0) : "Invalid subNanoSeconds value when calendar is null: " + subSecondNanos; writeRPCNameValType(sName, bOut, TDSType.DATETIMEN); writeByte((byte) 8); // max length of datatype if (null == cal) { writeByte((byte) 0); // len of data bytes return; } writeByte((byte) 8); // len of data bytes // We need to extract the Calendar's current date & time in terms // of the number of days since the SQL Base Date (1/1/1900) plus // the number of milliseconds since midnight in the current day. // We cannot rely on any pre-calculated value for the number of // milliseconds in a day or the number of milliseconds since the // base date to do this because days with DST changes are shorter // or longer than "normal" days. // ASSUMPTION: We assume we are dealing with a GregorianCalendar here. // If not, we have no basis in which to compare dates. E.g. if we // are dealing with a Chinese Calendar implementation which does not // use the same value for Calendar.YEAR as the GregorianCalendar, // we cannot meaningfully compute a value relative to 1/1/1900. // First, figure out how many days there have been since the SQL Base Date. // These are based on SQL Server algorithms int daysSinceSQLBaseDate = DDC.daysSinceBaseDate(cal.get(Calendar.YEAR), cal.get(Calendar.DAY_OF_YEAR), TDS.BASE_YEAR_1900); // Next, figure out the number of milliseconds since midnight of the current day. int millisSinceMidnight = (subSecondNanos + Nanos.PER_MILLISECOND / 2) / Nanos.PER_MILLISECOND + // Millis into // the current // second 1000 * cal.get(Calendar.SECOND) + // Seconds into the current minute 60 * 1000 * cal.get(Calendar.MINUTE) + // Minutes into the current hour 60 * 60 * 1000 * cal.get(Calendar.HOUR_OF_DAY); // Hours into the current day // The last millisecond of the current day is always rounded to the first millisecond // of the next day because DATETIME is only accurate to 1/300th of a second. if (millisSinceMidnight >= 1000 * 60 * 60 * 24 - 1) { ++daysSinceSQLBaseDate; millisSinceMidnight = 0; } // Last-ditch verification that the value is in the valid range for the // DATETIMEN TDS data type (1/1/1753 to 12/31/9999). If it's not, then // throw an exception now so that statement execution is safely canceled. // Attempting to put an invalid value on the wire would result in a TDS // exception, which would close the connection. // These are based on SQL Server algorithms if (daysSinceSQLBaseDate < DDC.daysSinceBaseDate(1753, 1, TDS.BASE_YEAR_1900) || daysSinceSQLBaseDate >= DDC.daysSinceBaseDate(10000, 1, TDS.BASE_YEAR_1900)) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {SSType.DATETIME}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_DATETIME_FIELD_OVERFLOW, DriverError.NOT_SET, null); } // And put it all on the wire... // Number of days since the SQL Server Base Date (January 1, 1900) writeInt(daysSinceSQLBaseDate); // Milliseconds since midnight (at a resolution of three hundredths of a second) writeInt((3 * millisSinceMidnight + 5) / 10); } void writeRPCTime(String sName, GregorianCalendar localCalendar, int subSecondNanos, int scale, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.TIMEN); writeByte((byte) scale); if (null == localCalendar) { writeByte((byte) 0); return; } writeByte((byte) TDS.timeValueLength(scale)); writeScaledTemporal(localCalendar, subSecondNanos, scale, SSType.TIME); } void writeRPCDate(String sName, GregorianCalendar localCalendar, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.DATEN); if (null == localCalendar) { writeByte((byte) 0); return; } writeByte((byte) TDS.DAYS_INTO_CE_LENGTH); writeScaledTemporal(localCalendar, 0, // subsecond nanos (none for a date value) 0, // scale (dates are not scaled) SSType.DATE); } void writeEncryptedRPCTime(String sName, GregorianCalendar localCalendar, int subSecondNanos, int scale, boolean bOut, SQLServerStatement statement) throws SQLServerException { if (con.getSendTimeAsDatetime()) { throw new SQLServerException(SQLServerException.getErrString("R_sendTimeAsDateTimeForAE"), null); } writeRPCNameValType(sName, bOut, TDSType.BIGVARBINARY); if (null == localCalendar) writeEncryptedRPCByteArray(null); else writeEncryptedRPCByteArray(writeEncryptedScaledTemporal(localCalendar, subSecondNanos, scale, SSType.TIME, (short) 0, statement)); writeByte(TDSType.TIMEN.byteValue()); writeByte((byte) scale); writeCryptoMetaData(); } void writeEncryptedRPCDate(String sName, GregorianCalendar localCalendar, boolean bOut, SQLServerStatement statement) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.BIGVARBINARY); if (null == localCalendar) writeEncryptedRPCByteArray(null); else writeEncryptedRPCByteArray(writeEncryptedScaledTemporal(localCalendar, 0, // subsecond nanos (none for a // date value) 0, // scale (dates are not scaled) SSType.DATE, (short) 0, statement)); writeByte(TDSType.DATEN.byteValue()); writeCryptoMetaData(); } void writeEncryptedRPCDateTime(String sName, GregorianCalendar cal, int subSecondNanos, boolean bOut, JDBCType jdbcType, SQLServerStatement statement) throws SQLServerException { assert (subSecondNanos >= 0) && (subSecondNanos < Nanos.PER_SECOND) : "Invalid subNanoSeconds value: " + subSecondNanos; assert (cal != null) || (subSecondNanos == 0) : "Invalid subNanoSeconds value when calendar is null: " + subSecondNanos; writeRPCNameValType(sName, bOut, TDSType.BIGVARBINARY); if (null == cal) writeEncryptedRPCByteArray(null); else writeEncryptedRPCByteArray(getEncryptedDateTimeAsBytes(cal, subSecondNanos, jdbcType, statement)); if (JDBCType.SMALLDATETIME == jdbcType) { writeByte(TDSType.DATETIMEN.byteValue()); writeByte((byte) 4); } else { writeByte(TDSType.DATETIMEN.byteValue()); writeByte((byte) 8); } writeCryptoMetaData(); } // getEncryptedDateTimeAsBytes is called if jdbcType/ssType is SMALLDATETIME or DATETIME byte[] getEncryptedDateTimeAsBytes(GregorianCalendar cal, int subSecondNanos, JDBCType jdbcType, SQLServerStatement statement) throws SQLServerException { int daysSinceSQLBaseDate = DDC.daysSinceBaseDate(cal.get(Calendar.YEAR), cal.get(Calendar.DAY_OF_YEAR), TDS.BASE_YEAR_1900); // Next, figure out the number of milliseconds since midnight of the current day. int millisSinceMidnight = (subSecondNanos + Nanos.PER_MILLISECOND / 2) / Nanos.PER_MILLISECOND + // Millis into // the current // second 1000 * cal.get(Calendar.SECOND) + // Seconds into the current minute 60 * 1000 * cal.get(Calendar.MINUTE) + // Minutes into the current hour 60 * 60 * 1000 * cal.get(Calendar.HOUR_OF_DAY); // Hours into the current day // The last millisecond of the current day is always rounded to the first millisecond // of the next day because DATETIME is only accurate to 1/300th of a second. if (millisSinceMidnight >= 1000 * 60 * 60 * 24 - 1) { ++daysSinceSQLBaseDate; millisSinceMidnight = 0; } if (JDBCType.SMALLDATETIME == jdbcType) { int secondsSinceMidnight = (millisSinceMidnight / 1000); int minutesSinceMidnight = (secondsSinceMidnight / 60); // Values that are 29.998 seconds or less are rounded down to the nearest minute minutesSinceMidnight = ((secondsSinceMidnight % 60) > 29.998) ? minutesSinceMidnight + 1 : minutesSinceMidnight; // minutesSinceMidnight for (23:59:30) int maxMinutesSinceMidnight_SmallDateTime = 1440; // Verification for smalldatetime to be within valid range of (1900.01.01) to (2079.06.06) // smalldatetime for unencrypted does not allow insertion of 2079.06.06 23:59:59 and it is rounded up // to 2079.06.07 00:00:00, therefore, we are checking minutesSinceMidnight for that condition. If it's not // within valid range, then // throw an exception now so that statement execution is safely canceled. // 157 is the calculated day of year from 06-06 , 1440 is minutesince midnight for (23:59:30) if ((daysSinceSQLBaseDate < DDC.daysSinceBaseDate(1900, 1, TDS.BASE_YEAR_1900) || daysSinceSQLBaseDate > DDC.daysSinceBaseDate(2079, 157, TDS.BASE_YEAR_1900)) || (daysSinceSQLBaseDate == DDC.daysSinceBaseDate(2079, 157, TDS.BASE_YEAR_1900) && minutesSinceMidnight >= maxMinutesSinceMidnight_SmallDateTime)) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {SSType.SMALLDATETIME}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_DATETIME_FIELD_OVERFLOW, DriverError.NOT_SET, null); } ByteBuffer days = ByteBuffer.allocate(2).order(ByteOrder.LITTLE_ENDIAN); days.putShort((short) daysSinceSQLBaseDate); ByteBuffer seconds = ByteBuffer.allocate(2).order(ByteOrder.LITTLE_ENDIAN); seconds.putShort((short) minutesSinceMidnight); byte[] value = new byte[4]; System.arraycopy(days.array(), 0, value, 0, 2); System.arraycopy(seconds.array(), 0, value, 2, 2); return SQLServerSecurityUtility.encryptWithKey(value, cryptoMeta, con, statement); } else if (JDBCType.DATETIME == jdbcType) { // Last-ditch verification that the value is in the valid range for the // DATETIMEN TDS data type (1/1/1753 to 12/31/9999). If it's not, then // throw an exception now so that statement execution is safely canceled. // Attempting to put an invalid value on the wire would result in a TDS // exception, which would close the connection. // These are based on SQL Server algorithms // And put it all on the wire... if (daysSinceSQLBaseDate < DDC.daysSinceBaseDate(1753, 1, TDS.BASE_YEAR_1900) || daysSinceSQLBaseDate >= DDC.daysSinceBaseDate(10000, 1, TDS.BASE_YEAR_1900)) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {SSType.DATETIME}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_DATETIME_FIELD_OVERFLOW, DriverError.NOT_SET, null); } // Number of days since the SQL Server Base Date (January 1, 1900) ByteBuffer days = ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN); days.putInt(daysSinceSQLBaseDate); ByteBuffer seconds = ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN); seconds.putInt((3 * millisSinceMidnight + 5) / 10); byte[] value = new byte[8]; System.arraycopy(days.array(), 0, value, 0, 4); System.arraycopy(seconds.array(), 0, value, 4, 4); return SQLServerSecurityUtility.encryptWithKey(value, cryptoMeta, con, statement); } assert false : "Unexpected JDBCType type " + jdbcType; return null; } void writeEncryptedRPCDateTime2(String sName, GregorianCalendar localCalendar, int subSecondNanos, int scale, boolean bOut, SQLServerStatement statement) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.BIGVARBINARY); if (null == localCalendar) writeEncryptedRPCByteArray(null); else writeEncryptedRPCByteArray(writeEncryptedScaledTemporal(localCalendar, subSecondNanos, scale, SSType.DATETIME2, (short) 0, statement)); writeByte(TDSType.DATETIME2N.byteValue()); writeByte((byte) (scale)); writeCryptoMetaData(); } void writeEncryptedRPCDateTimeOffset(String sName, GregorianCalendar utcCalendar, int minutesOffset, int subSecondNanos, int scale, boolean bOut, SQLServerStatement statement) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.BIGVARBINARY); if (null == utcCalendar) writeEncryptedRPCByteArray(null); else { assert 0 == utcCalendar.get(Calendar.ZONE_OFFSET); writeEncryptedRPCByteArray(writeEncryptedScaledTemporal(utcCalendar, subSecondNanos, scale, SSType.DATETIMEOFFSET, (short) minutesOffset, statement)); } writeByte(TDSType.DATETIMEOFFSETN.byteValue()); writeByte((byte) (scale)); writeCryptoMetaData(); } void writeRPCDateTime2(String sName, GregorianCalendar localCalendar, int subSecondNanos, int scale, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.DATETIME2N); writeByte((byte) scale); if (null == localCalendar) { writeByte((byte) 0); return; } writeByte((byte) TDS.datetime2ValueLength(scale)); writeScaledTemporal(localCalendar, subSecondNanos, scale, SSType.DATETIME2); } void writeRPCDateTimeOffset(String sName, GregorianCalendar utcCalendar, int minutesOffset, int subSecondNanos, int scale, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.DATETIMEOFFSETN); writeByte((byte) scale); if (null == utcCalendar) { writeByte((byte) 0); return; } assert 0 == utcCalendar.get(Calendar.ZONE_OFFSET); writeByte((byte) TDS.datetimeoffsetValueLength(scale)); writeScaledTemporal(utcCalendar, subSecondNanos, scale, SSType.DATETIMEOFFSET); writeShort((short) minutesOffset); } /** * Returns subSecondNanos rounded to the maximum precision supported. The maximum fractional scale is * MAX_FRACTIONAL_SECONDS_SCALE(7). Eg1: if you pass 456,790,123 the function would return 456,790,100 Eg2: if you * pass 456,790,150 the function would return 456,790,200 Eg3: if you pass 999,999,951 the function would return * 1,000,000,000 This is done to ensure that we have consistent rounding behaviour in setters and getters. Bug * #507919 */ private int getRoundedSubSecondNanos(int subSecondNanos) { int roundedNanos = ((subSecondNanos + (Nanos.PER_MAX_SCALE_INTERVAL / 2)) / Nanos.PER_MAX_SCALE_INTERVAL) * Nanos.PER_MAX_SCALE_INTERVAL; return roundedNanos; } /** * Writes to the TDS channel a temporal value as an instance instance of one of the scaled temporal SQL types: DATE, * TIME, DATETIME2, or DATETIMEOFFSET. * * @param cal * Calendar representing the value to write, except for any sub-second nanoseconds * @param subSecondNanos * the sub-second nanoseconds (0 - 999,999,999) * @param scale * the scale (in digits: 0 - 7) to use for the sub-second nanos component * @param ssType * the SQL Server data type (DATE, TIME, DATETIME2, or DATETIMEOFFSET) * * @throws SQLServerException * if an I/O error occurs or if the value is not in the valid range */ private void writeScaledTemporal(GregorianCalendar cal, int subSecondNanos, int scale, SSType ssType) throws SQLServerException { assert con.isKatmaiOrLater(); assert SSType.DATE == ssType || SSType.TIME == ssType || SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType : "Unexpected SSType: " + ssType; // First, for types with a time component, write the scaled nanos since midnight if (SSType.TIME == ssType || SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType) { assert subSecondNanos >= 0; assert subSecondNanos < Nanos.PER_SECOND; assert scale >= 0; assert scale <= TDS.MAX_FRACTIONAL_SECONDS_SCALE; int secondsSinceMidnight = cal.get(Calendar.SECOND) + 60 * cal.get(Calendar.MINUTE) + 60 * 60 * cal.get(Calendar.HOUR_OF_DAY); // Scale nanos since midnight to the desired scale, rounding the value as necessary long divisor = Nanos.PER_MAX_SCALE_INTERVAL * (long) Math.pow(10, TDS.MAX_FRACTIONAL_SECONDS_SCALE - scale); // The scaledNanos variable represents the fractional seconds of the value at the scale // indicated by the scale variable. So, for example, scaledNanos = 3 means 300 nanoseconds // at scale TDS.MAX_FRACTIONAL_SECONDS_SCALE, but 3000 nanoseconds at // TDS.MAX_FRACTIONAL_SECONDS_SCALE - 1 long scaledNanos = ((long) Nanos.PER_SECOND * secondsSinceMidnight + getRoundedSubSecondNanos(subSecondNanos) + divisor / 2) / divisor; // SQL Server rounding behavior indicates that it always rounds up unless // we are at the max value of the type(NOT every day), in which case it truncates. // If rounding nanos to the specified scale rolls the value to the next day ... if (Nanos.PER_DAY / divisor == scaledNanos) { // If the type is time, always truncate if (SSType.TIME == ssType) { --scaledNanos; } // If the type is datetime2 or datetimeoffset, truncate only if its the max value supported else { assert SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType : "Unexpected SSType: " + ssType; // ... then bump the date, provided that the resulting date is still within // the valid date range. // Extreme edge case (literally, the VERY edge...): // If nanos overflow rolls the date value out of range (that is, we have a value // a few nanoseconds later than 9999-12-31 23:59:59) then truncate the nanos // instead of rolling. // This case is very likely never hit by "real world" applications, but exists // here as a security measure to ensure that such values don't result in a // connection-closing TDS exception. cal.add(Calendar.SECOND, 1); if (cal.get(Calendar.YEAR) <= 9999) { scaledNanos = 0; } else { cal.add(Calendar.SECOND, -1); --scaledNanos; } } } // Encode the scaled nanos to TDS int encodedLength = TDS.nanosSinceMidnightLength(scale); byte[] encodedBytes = scaledNanosToEncodedBytes(scaledNanos, encodedLength); writeBytes(encodedBytes); } // Second, for types with a date component, write the days into the Common Era if (SSType.DATE == ssType || SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType) { // Computation of the number of days into the Common Era assumes that // the DAY_OF_YEAR field reflects a pure Gregorian calendar - one that // uses Gregorian leap year rules across the entire range of dates. // For the DAY_OF_YEAR field to accurately reflect pure Gregorian behavior, // we need to use a pure Gregorian calendar for dates that are Julian dates // under a standard Gregorian calendar and for (Gregorian) dates later than // the cutover date in the cutover year. if (cal.getTimeInMillis() < GregorianChange.STANDARD_CHANGE_DATE.getTime() || cal.getActualMaximum(Calendar.DAY_OF_YEAR) < TDS.DAYS_PER_YEAR) { int year = cal.get(Calendar.YEAR); int month = cal.get(Calendar.MONTH); int date = cal.get(Calendar.DATE); // Set the cutover as early as possible (pure Gregorian behavior) cal.setGregorianChange(GregorianChange.PURE_CHANGE_DATE); // Initialize the date field by field (preserving the "wall calendar" value) cal.set(year, month, date); } int daysIntoCE = DDC.daysSinceBaseDate(cal.get(Calendar.YEAR), cal.get(Calendar.DAY_OF_YEAR), 1); // Last-ditch verification that the value is in the valid range for the // DATE/DATETIME2/DATETIMEOFFSET TDS data type (1/1/0001 to 12/31/9999). // If it's not, then throw an exception now so that statement execution // is safely canceled. Attempting to put an invalid value on the wire // would result in a TDS exception, which would close the connection. if (daysIntoCE < 0 || daysIntoCE >= DDC.daysSinceBaseDate(10000, 1, 1)) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {ssType}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_DATETIME_FIELD_OVERFLOW, DriverError.NOT_SET, null); } byte encodedBytes[] = new byte[3]; encodedBytes[0] = (byte) ((daysIntoCE >> 0) & 0xFF); encodedBytes[1] = (byte) ((daysIntoCE >> 8) & 0xFF); encodedBytes[2] = (byte) ((daysIntoCE >> 16) & 0xFF); writeBytes(encodedBytes); } } /** * Writes to the TDS channel a temporal value as an instance instance of one of the scaled temporal SQL types: DATE, * TIME, DATETIME2, or DATETIMEOFFSET. * * @param cal * Calendar representing the value to write, except for any sub-second nanoseconds * @param subSecondNanos * the sub-second nanoseconds (0 - 999,999,999) * @param scale * the scale (in digits: 0 - 7) to use for the sub-second nanos component * @param ssType * the SQL Server data type (DATE, TIME, DATETIME2, or DATETIMEOFFSET) * @param minutesOffset * the offset value for DATETIMEOFFSET * @param statement * the SQLServerStatement used for encryption * @throws SQLServerException * if an I/O error occurs or if the value is not in the valid range */ byte[] writeEncryptedScaledTemporal(GregorianCalendar cal, int subSecondNanos, int scale, SSType ssType, short minutesOffset, SQLServerStatement statement) throws SQLServerException { assert con.isKatmaiOrLater(); assert SSType.DATE == ssType || SSType.TIME == ssType || SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType : "Unexpected SSType: " + ssType; // store the time and minutesOffset portion of DATETIME2 and DATETIMEOFFSET to be used with date portion byte encodedBytesForEncryption[] = null; int secondsSinceMidnight = 0; long divisor = 0; long scaledNanos = 0; // First, for types with a time component, write the scaled nanos since midnight if (SSType.TIME == ssType || SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType) { assert subSecondNanos >= 0; assert subSecondNanos < Nanos.PER_SECOND; assert scale >= 0; assert scale <= TDS.MAX_FRACTIONAL_SECONDS_SCALE; secondsSinceMidnight = cal.get(Calendar.SECOND) + 60 * cal.get(Calendar.MINUTE) + 60 * 60 * cal.get(Calendar.HOUR_OF_DAY); // Scale nanos since midnight to the desired scale, rounding the value as necessary divisor = Nanos.PER_MAX_SCALE_INTERVAL * (long) Math.pow(10, TDS.MAX_FRACTIONAL_SECONDS_SCALE - scale); // The scaledNanos variable represents the fractional seconds of the value at the scale // indicated by the scale variable. So, for example, scaledNanos = 3 means 300 nanoseconds // at scale TDS.MAX_FRACTIONAL_SECONDS_SCALE, but 3000 nanoseconds at // TDS.MAX_FRACTIONAL_SECONDS_SCALE - 1 scaledNanos = (((long) Nanos.PER_SECOND * secondsSinceMidnight + getRoundedSubSecondNanos(subSecondNanos) + divisor / 2) / divisor) * divisor / 100; // for encrypted time value, SQL server cannot do rounding or casting, // So, driver needs to cast it before encryption. if (SSType.TIME == ssType && 864000000000L <= scaledNanos) { scaledNanos = (((long) Nanos.PER_SECOND * secondsSinceMidnight + getRoundedSubSecondNanos(subSecondNanos)) / divisor) * divisor / 100; } // SQL Server rounding behavior indicates that it always rounds up unless // we are at the max value of the type(NOT every day), in which case it truncates. // If rounding nanos to the specified scale rolls the value to the next day ... if (Nanos.PER_DAY / divisor == scaledNanos) { // If the type is time, always truncate if (SSType.TIME == ssType) { --scaledNanos; } // If the type is datetime2 or datetimeoffset, truncate only if its the max value supported else { assert SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType : "Unexpected SSType: " + ssType; // ... then bump the date, provided that the resulting date is still within // the valid date range. // Extreme edge case (literally, the VERY edge...): // If nanos overflow rolls the date value out of range (that is, we have a value // a few nanoseconds later than 9999-12-31 23:59:59) then truncate the nanos // instead of rolling. // This case is very likely never hit by "real world" applications, but exists // here as a security measure to ensure that such values don't result in a // connection-closing TDS exception. cal.add(Calendar.SECOND, 1); if (cal.get(Calendar.YEAR) <= 9999) { scaledNanos = 0; } else { cal.add(Calendar.SECOND, -1); --scaledNanos; } } } // Encode the scaled nanos to TDS int encodedLength = TDS.nanosSinceMidnightLength(TDS.MAX_FRACTIONAL_SECONDS_SCALE); byte[] encodedBytes = scaledNanosToEncodedBytes(scaledNanos, encodedLength); if (SSType.TIME == ssType) { byte[] cipherText = SQLServerSecurityUtility.encryptWithKey(encodedBytes, cryptoMeta, con, statement); return cipherText; } else if (SSType.DATETIME2 == ssType) { // for DATETIME2 sends both date and time part together for encryption encodedBytesForEncryption = new byte[encodedLength + 3]; System.arraycopy(encodedBytes, 0, encodedBytesForEncryption, 0, encodedBytes.length); } else if (SSType.DATETIMEOFFSET == ssType) { // for DATETIMEOFFSET sends date, time and offset part together for encryption encodedBytesForEncryption = new byte[encodedLength + 5]; System.arraycopy(encodedBytes, 0, encodedBytesForEncryption, 0, encodedBytes.length); } } // Second, for types with a date component, write the days into the Common Era if (SSType.DATE == ssType || SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType) { // Computation of the number of days into the Common Era assumes that // the DAY_OF_YEAR field reflects a pure Gregorian calendar - one that // uses Gregorian leap year rules across the entire range of dates. // For the DAY_OF_YEAR field to accurately reflect pure Gregorian behavior, // we need to use a pure Gregorian calendar for dates that are Julian dates // under a standard Gregorian calendar and for (Gregorian) dates later than // the cutover date in the cutover year. if (cal.getTimeInMillis() < GregorianChange.STANDARD_CHANGE_DATE.getTime() || cal.getActualMaximum(Calendar.DAY_OF_YEAR) < TDS.DAYS_PER_YEAR) { int year = cal.get(Calendar.YEAR); int month = cal.get(Calendar.MONTH); int date = cal.get(Calendar.DATE); // Set the cutover as early as possible (pure Gregorian behavior) cal.setGregorianChange(GregorianChange.PURE_CHANGE_DATE); // Initialize the date field by field (preserving the "wall calendar" value) cal.set(year, month, date); } int daysIntoCE = DDC.daysSinceBaseDate(cal.get(Calendar.YEAR), cal.get(Calendar.DAY_OF_YEAR), 1); // Last-ditch verification that the value is in the valid range for the // DATE/DATETIME2/DATETIMEOFFSET TDS data type (1/1/0001 to 12/31/9999). // If it's not, then throw an exception now so that statement execution // is safely canceled. Attempting to put an invalid value on the wire // would result in a TDS exception, which would close the connection. if (daysIntoCE < 0 || daysIntoCE >= DDC.daysSinceBaseDate(10000, 1, 1)) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {ssType}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_DATETIME_FIELD_OVERFLOW, DriverError.NOT_SET, null); } byte encodedBytes[] = new byte[3]; encodedBytes[0] = (byte) ((daysIntoCE >> 0) & 0xFF); encodedBytes[1] = (byte) ((daysIntoCE >> 8) & 0xFF); encodedBytes[2] = (byte) ((daysIntoCE >> 16) & 0xFF); byte[] cipherText; if (SSType.DATE == ssType) { cipherText = SQLServerSecurityUtility.encryptWithKey(encodedBytes, cryptoMeta, con, statement); } else if (SSType.DATETIME2 == ssType) { // for Max value, does not round up, do casting instead. if (3652058 == daysIntoCE) { // 9999-12-31 if (864000000000L == scaledNanos) { // 24:00:00 in nanoseconds // does not round up scaledNanos = (((long) Nanos.PER_SECOND * secondsSinceMidnight + getRoundedSubSecondNanos(subSecondNanos)) / divisor) * divisor / 100; int encodedLength = TDS.nanosSinceMidnightLength(TDS.MAX_FRACTIONAL_SECONDS_SCALE); byte[] encodedNanoBytes = scaledNanosToEncodedBytes(scaledNanos, encodedLength); // for DATETIME2 sends both date and time part together for encryption encodedBytesForEncryption = new byte[encodedLength + 3]; System.arraycopy(encodedNanoBytes, 0, encodedBytesForEncryption, 0, encodedNanoBytes.length); } } if (encodedBytesForEncryption == null) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_NullValue")); Object[] msgArgs1 = {"encodedBytesForEncryption"}; throw new SQLServerException(form.format(msgArgs1), null); } // Copy the 3 byte date value System.arraycopy(encodedBytes, 0, encodedBytesForEncryption, (encodedBytesForEncryption.length - 3), 3); cipherText = SQLServerSecurityUtility.encryptWithKey(encodedBytesForEncryption, cryptoMeta, con, statement); } else { // for Max value, does not round up, do casting instead. if (3652058 == daysIntoCE) { // 9999-12-31 if (864000000000L == scaledNanos) { // 24:00:00 in nanoseconds // does not round up scaledNanos = (((long) Nanos.PER_SECOND * secondsSinceMidnight + getRoundedSubSecondNanos(subSecondNanos)) / divisor) * divisor / 100; int encodedLength = TDS.nanosSinceMidnightLength(TDS.MAX_FRACTIONAL_SECONDS_SCALE); byte[] encodedNanoBytes = scaledNanosToEncodedBytes(scaledNanos, encodedLength); // for DATETIMEOFFSET sends date, time and offset part together for encryption encodedBytesForEncryption = new byte[encodedLength + 5]; System.arraycopy(encodedNanoBytes, 0, encodedBytesForEncryption, 0, encodedNanoBytes.length); } } if (encodedBytesForEncryption == null) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_NullValue")); Object[] msgArgs1 = {"encodedBytesForEncryption"}; throw new SQLServerException(form.format(msgArgs1), null); } // Copy the 3 byte date value System.arraycopy(encodedBytes, 0, encodedBytesForEncryption, (encodedBytesForEncryption.length - 5), 3); // Copy the 2 byte minutesOffset value System.arraycopy( ByteBuffer.allocate(Short.SIZE / Byte.SIZE).order(ByteOrder.LITTLE_ENDIAN) .putShort(minutesOffset).array(), 0, encodedBytesForEncryption, (encodedBytesForEncryption.length - 2), 2); cipherText = SQLServerSecurityUtility.encryptWithKey(encodedBytesForEncryption, cryptoMeta, con, statement); } return cipherText; } // Invalid type ssType. This condition should never happen. MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_unknownSSType")); Object[] msgArgs = {ssType}; SQLServerException.makeFromDriverError(null, null, form.format(msgArgs), null, true); return null; } private byte[] scaledNanosToEncodedBytes(long scaledNanos, int encodedLength) { byte encodedBytes[] = new byte[encodedLength]; for (int i = 0; i < encodedLength; i++) encodedBytes[i] = (byte) ((scaledNanos >> (8 * i)) & 0xFF); return encodedBytes; } /** * Append the data in a stream in RPC transmission format. * * @param sName * the optional parameter name * @param stream * is the stream * @param streamLength * length of the stream (may be unknown) * @param bOut * boolean true if the data value is being registered as an output parameter * @param jdbcType * The JDBC type used to determine whether the value is textual or non-textual. * @param collation * The SQL collation associated with the value. Null for non-textual SQL Server types. * @throws SQLServerException */ void writeRPCInputStream(String sName, InputStream stream, long streamLength, boolean bOut, JDBCType jdbcType, SQLCollation collation) throws SQLServerException { assert null != stream; assert DataTypes.UNKNOWN_STREAM_LENGTH == streamLength || streamLength >= 0; // Send long values and values with unknown length // using PLP chunking on Yukon and later. boolean usePLP = (DataTypes.UNKNOWN_STREAM_LENGTH == streamLength || streamLength > DataTypes.SHORT_VARTYPE_MAX_BYTES); if (usePLP) { assert DataTypes.UNKNOWN_STREAM_LENGTH == streamLength || streamLength <= DataTypes.MAX_VARTYPE_MAX_BYTES; writeRPCNameValType(sName, bOut, jdbcType.isTextual() ? TDSType.BIGVARCHAR : TDSType.BIGVARBINARY); // Handle Yukon v*max type header here. writeVMaxHeader(streamLength, false, jdbcType.isTextual() ? collation : null); } // Send non-PLP in all other cases else { // If the length of the InputStream is unknown then we need to buffer the entire stream // in memory so that we can determine its length and send that length to the server // before the stream data itself. if (DataTypes.UNKNOWN_STREAM_LENGTH == streamLength) { // Create ByteArrayOutputStream with initial buffer size of 8K to handle typical // binary field sizes more efficiently. Note we can grow beyond 8000 bytes. ByteArrayOutputStream baos = new ByteArrayOutputStream(8000); streamLength = 0L; // Since Shiloh is limited to 64K TDS packets, that's a good upper bound on the maximum // length of InputStream we should try to handle before throwing an exception. long maxStreamLength = 65535L * con.getTDSPacketSize(); try { byte buff[] = new byte[8000]; int bytesRead; while (streamLength < maxStreamLength && -1 != (bytesRead = stream.read(buff, 0, buff.length))) { baos.write(buff); streamLength += bytesRead; } } catch (IOException e) { throw new SQLServerException(e.getMessage(), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET, e); } if (streamLength >= maxStreamLength) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_invalidLength")); Object[] msgArgs = {streamLength}; SQLServerException.makeFromDriverError(null, null, form.format(msgArgs), "", true); } assert streamLength <= Integer.MAX_VALUE; stream = new ByteArrayInputStream(baos.toByteArray(), 0, (int) streamLength); } assert 0 <= streamLength && streamLength <= DataTypes.IMAGE_TEXT_MAX_BYTES; boolean useVarType = streamLength <= DataTypes.SHORT_VARTYPE_MAX_BYTES; writeRPCNameValType(sName, bOut, jdbcType.isTextual() ? (useVarType ? TDSType.BIGVARCHAR : TDSType.TEXT) : (useVarType ? TDSType.BIGVARBINARY : TDSType.IMAGE)); // Write maximum length, optional collation, and actual length if (useVarType) { writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); if (jdbcType.isTextual()) collation.writeCollation(this); writeShort((short) streamLength); } else { writeInt(DataTypes.IMAGE_TEXT_MAX_BYTES); if (jdbcType.isTextual()) collation.writeCollation(this); writeInt((int) streamLength); } } // Write the data writeStream(stream, streamLength, usePLP); } /** * Append the XML data in a stream in RPC transmission format. * * @param sName * the optional parameter name * @param stream * is the stream * @param streamLength * length of the stream (may be unknown) * @param bOut * boolean true if the data value is being registered as an output parameter * @throws SQLServerException */ void writeRPCXML(String sName, InputStream stream, long streamLength, boolean bOut) throws SQLServerException { assert DataTypes.UNKNOWN_STREAM_LENGTH == streamLength || streamLength >= 0; assert DataTypes.UNKNOWN_STREAM_LENGTH == streamLength || streamLength <= DataTypes.MAX_VARTYPE_MAX_BYTES; writeRPCNameValType(sName, bOut, TDSType.XML); writeByte((byte) 0); // No schema // Handle null here and return, we're done here if it's null. if (null == stream) { // Null header for v*max types is 0xFFFFFFFFFFFFFFFF. writeLong(0xFFFFFFFFFFFFFFFFL); } else if (DataTypes.UNKNOWN_STREAM_LENGTH == streamLength) { // Append v*max length. // UNKNOWN_PLP_LEN is 0xFFFFFFFFFFFFFFFE writeLong(0xFFFFFFFFFFFFFFFEL); // NOTE: Don't send the first chunk length, this will be calculated by caller. } else { // For v*max types with known length, length is <totallength8><chunklength4> // We're sending same total length as chunk length (as we're sending 1 chunk). writeLong(streamLength); } if (null != stream) // Write the data writeStream(stream, streamLength, true); } /** * Append the data in a character reader in RPC transmission format. * * @param sName * the optional parameter name * @param re * the reader * @param reLength * the reader data length (in characters) * @param bOut * boolean true if the data value is being registered as an output parameter * @param collation * The SQL collation associated with the value. Null for non-textual SQL Server types. * @throws SQLServerException */ void writeRPCReaderUnicode(String sName, Reader re, long reLength, boolean bOut, SQLCollation collation) throws SQLServerException { assert null != re; assert DataTypes.UNKNOWN_STREAM_LENGTH == reLength || reLength >= 0; // Textual RPC requires a collation. If none is provided, as is the case when // the SSType is non-textual, then use the database collation by default. if (null == collation) collation = con.getDatabaseCollation(); // Send long values and values with unknown length // using PLP chunking on Yukon and later. boolean usePLP = (DataTypes.UNKNOWN_STREAM_LENGTH == reLength || reLength > DataTypes.SHORT_VARTYPE_MAX_CHARS); if (usePLP) { assert DataTypes.UNKNOWN_STREAM_LENGTH == reLength || reLength <= DataTypes.MAX_VARTYPE_MAX_CHARS; writeRPCNameValType(sName, bOut, TDSType.NVARCHAR); // Handle Yukon v*max type header here. writeVMaxHeader( (DataTypes.UNKNOWN_STREAM_LENGTH == reLength) ? DataTypes.UNKNOWN_STREAM_LENGTH : 2 * reLength, // Length // bytes) false, collation); } // Send non-PLP in all other cases else { // Length must be known if we're not sending PLP-chunked data. Yukon is handled above. // For Shiloh, this is enforced in DTV by converting the Reader to some other length- // prefixed value in the setter. assert 0 <= reLength && reLength <= DataTypes.NTEXT_MAX_CHARS; // For non-PLP types, use the long TEXT type rather than the short VARCHAR // type if the stream is too long to fit in the latter or if we don't know the length up // front so we have to assume that it might be too long. boolean useVarType = reLength <= DataTypes.SHORT_VARTYPE_MAX_CHARS; writeRPCNameValType(sName, bOut, useVarType ? TDSType.NVARCHAR : TDSType.NTEXT); // Write maximum length, collation, and actual length of the data if (useVarType) { writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); collation.writeCollation(this); writeShort((short) (2 * reLength)); } else { writeInt(DataTypes.NTEXT_MAX_CHARS); collation.writeCollation(this); writeInt((int) (2 * reLength)); } } // Write the data writeReader(re, reLength, usePLP); } void sendEnclavePackage(String sql, ArrayList<byte[]> enclaveCEKs) throws SQLServerException { if (null != con && con.isAEv2()) { if (null != sql && !sql.isEmpty() && null != enclaveCEKs && 0 < enclaveCEKs.size() && con.enclaveEstablished()) { byte[] b = con.generateEnclavePackage(sql, enclaveCEKs); if (null != b && 0 != b.length) { this.writeShort((short) b.length); this.writeBytes(b); } else { this.writeShort((short) 0); } } else { this.writeShort((short) 0); } } } } /** * TDSPacket provides a mechanism for chaining TDS response packets together in a singly-linked list. * * Having both the link and the data in the same class allows TDSReader marks (see below) to automatically hold onto * exactly as much response data as they need, and no more. Java reference semantics ensure that a mark holds onto its * referenced packet and subsequent packets (through next references). When all marked references to a packet go away, * the packet, and any linked unmarked packets, can be reclaimed by GC. */ final class TDSPacket { final byte[] header = new byte[TDS.PACKET_HEADER_SIZE]; final byte[] payload; int payloadLength; volatile TDSPacket next; final public String toString() { return "TDSPacket(SPID:" + Util.readUnsignedShortBigEndian(header, TDS.PACKET_HEADER_SPID) + " Seq:" + header[TDS.PACKET_HEADER_SEQUENCE_NUM] + ")"; } TDSPacket(int size) { payload = new byte[size]; payloadLength = 0; next = null; } final boolean isEOM() { return TDS.STATUS_BIT_EOM == (header[TDS.PACKET_HEADER_MESSAGE_STATUS] & TDS.STATUS_BIT_EOM); } } /** * TDSReaderMark encapsulates a fixed position in the response data stream. * * Response data is quantized into a linked chain of packets. A mark refers to a specific location in a specific packet * and relies on Java's reference semantics to automatically keep all subsequent packets accessible until the mark is * destroyed. */ final class TDSReaderMark { final TDSPacket packet; final int payloadOffset; TDSReaderMark(TDSPacket packet, int payloadOffset) { this.packet = packet; this.payloadOffset = payloadOffset; } } /** * TDSReader encapsulates the TDS response data stream. * * Bytes are read from SQL Server into a FIFO of packets. Reader methods traverse the packets to access the data. */ final class TDSReader implements Serializable { /** * Always update serialVersionUID when prompted. */ private static final long serialVersionUID = -392905303734809731L; private static final Logger logger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.TDS.Reader"); final private String traceID; private ScheduledFuture<?> timeout; final public String toString() { return traceID; } private final TDSChannel tdsChannel; private final SQLServerConnection con; private final TDSCommand command; final TDSCommand getCommand() { assert null != command; return command; } final SQLServerConnection getConnection() { return con; } private TDSPacket currentPacket = new TDSPacket(0); private TDSPacket lastPacket = currentPacket; private int payloadOffset = 0; private int packetNum = 0; private boolean isStreaming = true; private boolean useColumnEncryption = false; private boolean serverSupportsColumnEncryption = false; private boolean serverSupportsDataClassification = false; private byte serverSupportedDataClassificationVersion = TDS.DATA_CLASSIFICATION_NOT_ENABLED; private ColumnEncryptionVersion columnEncryptionVersion; private final byte valueBytes[] = new byte[256]; protected SensitivityClassification sensitivityClassification; private static final AtomicInteger lastReaderID = new AtomicInteger(0); private static int nextReaderID() { return lastReaderID.incrementAndGet(); } TDSReader(TDSChannel tdsChannel, SQLServerConnection con, TDSCommand command) { this.tdsChannel = tdsChannel; this.con = con; this.command = command; // may be null // if the logging level is not detailed than fine or more we will not have proper reader IDs. if (logger.isLoggable(Level.FINE)) traceID = "TDSReader@" + nextReaderID() + " (" + con.toString() + ")"; else traceID = con.toString(); if (con.isColumnEncryptionSettingEnabled()) { useColumnEncryption = true; } serverSupportsColumnEncryption = con.getServerSupportsColumnEncryption(); columnEncryptionVersion = con.getServerColumnEncryptionVersion(); serverSupportsDataClassification = con.getServerSupportsDataClassification(); serverSupportedDataClassificationVersion = con.getServerSupportedDataClassificationVersion(); } final boolean isColumnEncryptionSettingEnabled() { return useColumnEncryption; } final boolean getServerSupportsColumnEncryption() { return serverSupportsColumnEncryption; } final boolean getServerSupportsDataClassification() { return serverSupportsDataClassification; } final byte getServerSupportedDataClassificationVersion() { return serverSupportedDataClassificationVersion; } final void throwInvalidTDS() throws SQLServerException { if (logger.isLoggable(Level.SEVERE)) logger.severe(toString() + " got unexpected value in TDS response at offset:" + payloadOffset); con.throwInvalidTDS(); } final void throwInvalidTDSToken(String tokenName) throws SQLServerException { if (logger.isLoggable(Level.SEVERE)) logger.severe(toString() + " got unexpected value in TDS response at offset:" + payloadOffset); con.throwInvalidTDSToken(tokenName); } /** * Ensures that payload data is available to be read, automatically advancing to (and possibly reading) the next * packet. * * @return true if additional data is available to be read false if no more data is available */ private boolean ensurePayload() throws SQLServerException { if (payloadOffset == currentPacket.payloadLength) if (!nextPacket()) return false; assert payloadOffset < currentPacket.payloadLength; return true; } /** * Advance (and possibly read) the next packet. * * @return true if additional data is available to be read false if no more data is available */ private boolean nextPacket() throws SQLServerException { assert null != currentPacket; // Shouldn't call this function unless we're at the end of the current packet... TDSPacket consumedPacket = currentPacket; assert payloadOffset == consumedPacket.payloadLength; // If no buffered packets are left then maybe we can read one... // This action must be synchronized against against another thread calling // readAllPackets() to read in ALL of the remaining packets of the current response. if (null == consumedPacket.next) { // if the read comes from getNext() and responseBuffering is Adaptive (in this place is), then reset Counter // State if (null != command && command.getTDSWriter().checkIfTdsMessageTypeIsBatchOrRPC()) { command.getCounter().resetCounter(); } readPacket(); if (null == consumedPacket.next) return false; } // Advance to that packet. If we are streaming through the // response, then unlink the current packet from the next // before moving to allow the packet to be reclaimed. TDSPacket nextPacket = consumedPacket.next; if (isStreaming) { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Moving to next packet -- unlinking consumed packet"); consumedPacket.next = null; } currentPacket = nextPacket; payloadOffset = 0; return true; } /** * Reads the next packet of the TDS channel. * * This method is synchronized to guard against simultaneously reading packets from one thread that is processing * the response and another thread that is trying to buffer it with TDSCommand.detach(). */ synchronized final boolean readPacket() throws SQLServerException { if (null != command && !command.readingResponse()) return false; // Number of packets in should always be less than number of packets out. // If the server has been notified for an interrupt, it may be less by // more than one packet. assert tdsChannel.numMsgsRcvd < tdsChannel.numMsgsSent : "numMsgsRcvd:" + tdsChannel.numMsgsRcvd + " should be less than numMsgsSent:" + tdsChannel.numMsgsSent; TDSPacket newPacket = new TDSPacket(con.getTDSPacketSize()); if (null != command) { // if cancelQueryTimeout is set, we should wait for the total amount of // queryTimeout + cancelQueryTimeout to // terminate the connection. if ((command.getCancelQueryTimeoutSeconds() > 0 && command.getQueryTimeoutSeconds() > 0)) { // if a timeout is configured with this object, add it to the timeout poller int seconds = command.getCancelQueryTimeoutSeconds() + command.getQueryTimeoutSeconds(); this.timeout = con.getSharedTimer().schedule(new TDSTimeoutTask(command, con), seconds); } } // First, read the packet header. for (int headerBytesRead = 0; headerBytesRead < TDS.PACKET_HEADER_SIZE;) { int bytesRead = tdsChannel.read(newPacket.header, headerBytesRead, TDS.PACKET_HEADER_SIZE - headerBytesRead); if (bytesRead < 0) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Premature EOS in response. packetNum:" + packetNum + " headerBytesRead:" + headerBytesRead); con.terminate(SQLServerException.DRIVER_ERROR_IO_FAILED, ((0 == packetNum && 0 == headerBytesRead) ? SQLServerException.getErrString( "R_noServerResponse") : SQLServerException.getErrString("R_truncatedServerResponse"))); } headerBytesRead += bytesRead; } // if execution was subject to timeout then stop timing if (this.timeout != null) { this.timeout.cancel(false); this.timeout = null; } // Header size is a 2 byte unsigned short integer in big-endian order. int packetLength = Util.readUnsignedShortBigEndian(newPacket.header, TDS.PACKET_HEADER_MESSAGE_LENGTH); // Make header size is properly bounded and compute length of the packet payload. if (packetLength < TDS.PACKET_HEADER_SIZE || packetLength > con.getTDSPacketSize()) { if (logger.isLoggable(Level.WARNING)) { logger.warning(toString() + " TDS header contained invalid packet length:" + packetLength + "; packet size:" + con.getTDSPacketSize()); } throwInvalidTDS(); } newPacket.payloadLength = packetLength - TDS.PACKET_HEADER_SIZE; // Just grab the SPID for logging (another big-endian unsigned short). tdsChannel.setSPID(Util.readUnsignedShortBigEndian(newPacket.header, TDS.PACKET_HEADER_SPID)); // Packet header looks good enough. // When logging, copy the packet header to the log buffer. byte[] logBuffer = null; if (tdsChannel.isLoggingPackets()) { logBuffer = new byte[packetLength]; System.arraycopy(newPacket.header, 0, logBuffer, 0, TDS.PACKET_HEADER_SIZE); } // if messageType is RPC or QUERY, then increment Counter's state if (tdsChannel.getWriter().checkIfTdsMessageTypeIsBatchOrRPC()) { command.getCounter().increaseCounter(packetLength); } // Now for the payload... for (int payloadBytesRead = 0; payloadBytesRead < newPacket.payloadLength;) { int bytesRead = tdsChannel.read(newPacket.payload, payloadBytesRead, newPacket.payloadLength - payloadBytesRead); if (bytesRead < 0) con.terminate(SQLServerException.DRIVER_ERROR_IO_FAILED, SQLServerException.getErrString("R_truncatedServerResponse")); payloadBytesRead += bytesRead; } ++packetNum; lastPacket.next = newPacket; lastPacket = newPacket; // When logging, append the payload to the log buffer and write out the whole thing. if (tdsChannel.isLoggingPackets() && logBuffer != null) { System.arraycopy(newPacket.payload, 0, logBuffer, TDS.PACKET_HEADER_SIZE, newPacket.payloadLength); tdsChannel.logPacket(logBuffer, 0, packetLength, this.toString() + " received Packet:" + packetNum + " (" + newPacket.payloadLength + " bytes)"); } // If end of message, then bump the count of messages received and disable // interrupts. If an interrupt happened prior to disabling, then expect // to read the attention ack packet as well. if (newPacket.isEOM()) { ++tdsChannel.numMsgsRcvd; // Notify the command (if any) that we've reached the end of the response. if (null != command) command.onResponseEOM(); } return true; } final TDSReaderMark mark() { TDSReaderMark mark = new TDSReaderMark(currentPacket, payloadOffset); isStreaming = false; if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Buffering from: " + mark.toString()); return mark; } final void reset(TDSReaderMark mark) { if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Resetting to: " + mark.toString()); currentPacket = mark.packet; payloadOffset = mark.payloadOffset; } final void stream() { isStreaming = true; } /** * Returns the number of bytes that can be read (or skipped over) from this TDSReader without blocking by the next * caller of a method for this TDSReader. * * @return the actual number of bytes available. */ final int available() { // The number of bytes that can be read without blocking is just the number // of bytes that are currently buffered. That is the number of bytes left // in the current packet plus the number of bytes in the remaining packets. int available = currentPacket.payloadLength - payloadOffset; for (TDSPacket packet = currentPacket.next; null != packet; packet = packet.next) available += packet.payloadLength; return available; } /** * * @return number of bytes available in the current packet */ final int availableCurrentPacket() { /* * The number of bytes that can be read from the current chunk, without including the next chunk that is * buffered. This is so the driver can confirm if the next chunk sent is new packet or just continuation */ int available = currentPacket.payloadLength - payloadOffset; return available; } final int peekTokenType() throws SQLServerException { // Check whether we're at EOF if (!ensurePayload()) return -1; // Peek at the current byte (don't increment payloadOffset!) return currentPacket.payload[payloadOffset] & 0xFF; } final short peekStatusFlag() throws SQLServerException { // skip the current packet(i.e, TDS packet type) and peek into the status flag (USHORT) if (payloadOffset + 3 <= currentPacket.payloadLength) { short value = Util.readShort(currentPacket.payload, payloadOffset + 1); return value; } return 0; } final int readUnsignedByte() throws SQLServerException { // Ensure that we have a packet to read from. if (!ensurePayload()) throwInvalidTDS(); return currentPacket.payload[payloadOffset++] & 0xFF; } final short readShort() throws SQLServerException { if (payloadOffset + 2 <= currentPacket.payloadLength) { short value = Util.readShort(currentPacket.payload, payloadOffset); payloadOffset += 2; return value; } return Util.readShort(readWrappedBytes(2), 0); } final int readUnsignedShort() throws SQLServerException { if (payloadOffset + 2 <= currentPacket.payloadLength) { int value = Util.readUnsignedShort(currentPacket.payload, payloadOffset); payloadOffset += 2; return value; } return Util.readUnsignedShort(readWrappedBytes(2), 0); } final String readUnicodeString(int length) throws SQLServerException { int byteLength = 2 * length; byte bytes[] = new byte[byteLength]; readBytes(bytes, 0, byteLength); return Util.readUnicodeString(bytes, 0, byteLength, con); } final char readChar() throws SQLServerException { return (char) readShort(); } final int readInt() throws SQLServerException { if (payloadOffset + 4 <= currentPacket.payloadLength) { int value = Util.readInt(currentPacket.payload, payloadOffset); payloadOffset += 4; return value; } return Util.readInt(readWrappedBytes(4), 0); } final int readIntBigEndian() throws SQLServerException { if (payloadOffset + 4 <= currentPacket.payloadLength) { int value = Util.readIntBigEndian(currentPacket.payload, payloadOffset); payloadOffset += 4; return value; } return Util.readIntBigEndian(readWrappedBytes(4), 0); } final long readUnsignedInt() throws SQLServerException { return readInt() & 0xFFFFFFFFL; } final long readLong() throws SQLServerException { if (payloadOffset + 8 <= currentPacket.payloadLength) { long value = Util.readLong(currentPacket.payload, payloadOffset); payloadOffset += 8; return value; } return Util.readLong(readWrappedBytes(8), 0); } final void readBytes(byte[] value, int valueOffset, int valueLength) throws SQLServerException { for (int bytesRead = 0; bytesRead < valueLength;) { // Ensure that we have a packet to read from. if (!ensurePayload()) throwInvalidTDS(); // Figure out how many bytes to copy from the current packet // (the lesser of the remaining value bytes and the bytes left in the packet). int bytesToCopy = valueLength - bytesRead; if (bytesToCopy > currentPacket.payloadLength - payloadOffset) bytesToCopy = currentPacket.payloadLength - payloadOffset; // Copy some bytes from the current packet to the destination value. if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Reading " + bytesToCopy + " bytes from offset " + payloadOffset); System.arraycopy(currentPacket.payload, payloadOffset, value, valueOffset + bytesRead, bytesToCopy); bytesRead += bytesToCopy; payloadOffset += bytesToCopy; } } /** * This function reads valueLength no. of bytes from input buffer without storing them in any array * * @param valueLength * @throws SQLServerException */ final void readSkipBytes(int valueLength) throws SQLServerException { for (int bytesSkipped = 0; bytesSkipped < valueLength;) { // Ensure that we have a packet to read from. if (!ensurePayload()) throwInvalidTDS(); int bytesToSkip = valueLength - bytesSkipped; if (bytesToSkip > currentPacket.payloadLength - payloadOffset) bytesToSkip = currentPacket.payloadLength - payloadOffset; if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Skipping " + bytesToSkip + " bytes from offset " + payloadOffset); bytesSkipped += bytesToSkip; payloadOffset += bytesToSkip; } } final byte[] readWrappedBytes(int valueLength) throws SQLServerException { assert valueLength <= valueBytes.length; readBytes(valueBytes, 0, valueLength); return valueBytes; } final Object readDecimal(int valueLength, TypeInfo typeInfo, JDBCType jdbcType, StreamType streamType) throws SQLServerException { if (valueLength > valueBytes.length) { if (logger.isLoggable(Level.WARNING)) { logger.warning(toString() + " Invalid value length:" + valueLength); } throwInvalidTDS(); } readBytes(valueBytes, 0, valueLength); return DDC.convertBigDecimalToObject(Util.readBigDecimal(valueBytes, valueLength, typeInfo.getScale()), jdbcType, streamType); } final Object readMoney(int valueLength, JDBCType jdbcType, StreamType streamType) throws SQLServerException { BigInteger bi; switch (valueLength) { case 8: // money { int intBitsHi = readInt(); int intBitsLo = readInt(); if (JDBCType.BINARY == jdbcType) { byte value[] = new byte[8]; Util.writeIntBigEndian(intBitsHi, value, 0); Util.writeIntBigEndian(intBitsLo, value, 4); return value; } bi = BigInteger.valueOf(((long) intBitsHi << 32) | (intBitsLo & 0xFFFFFFFFL)); break; } case 4: // smallmoney if (JDBCType.BINARY == jdbcType) { byte value[] = new byte[4]; Util.writeIntBigEndian(readInt(), value, 0); return value; } bi = BigInteger.valueOf(readInt()); break; default: throwInvalidTDS(); return null; } return DDC.convertBigDecimalToObject(new BigDecimal(bi, 4), jdbcType, streamType); } final Object readReal(int valueLength, JDBCType jdbcType, StreamType streamType) throws SQLServerException { if (4 != valueLength) throwInvalidTDS(); return DDC.convertFloatToObject(Float.intBitsToFloat(readInt()), jdbcType, streamType); } final Object readFloat(int valueLength, JDBCType jdbcType, StreamType streamType) throws SQLServerException { if (8 != valueLength) throwInvalidTDS(); return DDC.convertDoubleToObject(Double.longBitsToDouble(readLong()), jdbcType, streamType); } final Object readDateTime(int valueLength, Calendar appTimeZoneCalendar, JDBCType jdbcType, StreamType streamType) throws SQLServerException { // Build and return the right kind of temporal object. int daysSinceSQLBaseDate; int ticksSinceMidnight; int msecSinceMidnight; switch (valueLength) { case 8: // SQL datetime is 4 bytes for days since SQL Base Date // (January 1, 1900 00:00:00 GMT) and 4 bytes for // the number of three hundredths (1/300) of a second // since midnight. daysSinceSQLBaseDate = readInt(); ticksSinceMidnight = readInt(); if (JDBCType.BINARY == jdbcType) { byte value[] = new byte[8]; Util.writeIntBigEndian(daysSinceSQLBaseDate, value, 0); Util.writeIntBigEndian(ticksSinceMidnight, value, 4); return value; } msecSinceMidnight = (ticksSinceMidnight * 10 + 1) / 3; // Convert to msec (1 tick = 1 300th of a sec = 3 // msec) break; case 4: // SQL smalldatetime has less precision. It stores 2 bytes // for the days since SQL Base Date and 2 bytes for minutes // after midnight. daysSinceSQLBaseDate = readUnsignedShort(); ticksSinceMidnight = readUnsignedShort(); if (JDBCType.BINARY == jdbcType) { byte value[] = new byte[4]; Util.writeShortBigEndian((short) daysSinceSQLBaseDate, value, 0); Util.writeShortBigEndian((short) ticksSinceMidnight, value, 2); return value; } msecSinceMidnight = ticksSinceMidnight * 60 * 1000; // Convert to msec (1 tick = 1 min = 60,000 msec) break; default: throwInvalidTDS(); return null; } // Convert the DATETIME/SMALLDATETIME value to the desired Java type. return DDC.convertTemporalToObject(jdbcType, SSType.DATETIME, appTimeZoneCalendar, daysSinceSQLBaseDate, msecSinceMidnight, 0); // scale // (ignored // for // fixed-scale // DATETIME/SMALLDATETIME // types) } final Object readDate(int valueLength, Calendar appTimeZoneCalendar, JDBCType jdbcType) throws SQLServerException { if (TDS.DAYS_INTO_CE_LENGTH != valueLength) throwInvalidTDS(); // Initialize the date fields to their appropriate values. int localDaysIntoCE = readDaysIntoCE(); // Convert the DATE value to the desired Java type. return DDC.convertTemporalToObject(jdbcType, SSType.DATE, appTimeZoneCalendar, localDaysIntoCE, 0, // midnight // local to // app time // zone 0); // scale (ignored for DATE) } final Object readTime(int valueLength, TypeInfo typeInfo, Calendar appTimeZoneCalendar, JDBCType jdbcType) throws SQLServerException { if (TDS.timeValueLength(typeInfo.getScale()) != valueLength) throwInvalidTDS(); // Read the value from the server long localNanosSinceMidnight = readNanosSinceMidnight(typeInfo.getScale()); // Convert the TIME value to the desired Java type. return DDC.convertTemporalToObject(jdbcType, SSType.TIME, appTimeZoneCalendar, 0, localNanosSinceMidnight, typeInfo.getScale()); } final Object readDateTime2(int valueLength, TypeInfo typeInfo, Calendar appTimeZoneCalendar, JDBCType jdbcType) throws SQLServerException { if (TDS.datetime2ValueLength(typeInfo.getScale()) != valueLength) throwInvalidTDS(); // Read the value's constituent components long localNanosSinceMidnight = readNanosSinceMidnight(typeInfo.getScale()); int localDaysIntoCE = readDaysIntoCE(); // Convert the DATETIME2 value to the desired Java type. return DDC.convertTemporalToObject(jdbcType, SSType.DATETIME2, appTimeZoneCalendar, localDaysIntoCE, localNanosSinceMidnight, typeInfo.getScale()); } final Object readDateTimeOffset(int valueLength, TypeInfo typeInfo, JDBCType jdbcType) throws SQLServerException { if (TDS.datetimeoffsetValueLength(typeInfo.getScale()) != valueLength) throwInvalidTDS(); // The nanos since midnight and days into Common Era parts of DATETIMEOFFSET values // are in UTC. Use the minutes offset part to convert to local. long utcNanosSinceMidnight = readNanosSinceMidnight(typeInfo.getScale()); int utcDaysIntoCE = readDaysIntoCE(); int localMinutesOffset = readShort(); // Convert the DATETIMEOFFSET value to the desired Java type. return DDC.convertTemporalToObject(jdbcType, SSType.DATETIMEOFFSET, new GregorianCalendar(new SimpleTimeZone(localMinutesOffset * 60 * 1000, ""), Locale.US), utcDaysIntoCE, utcNanosSinceMidnight, typeInfo.getScale()); } private int readDaysIntoCE() throws SQLServerException { byte value[] = new byte[TDS.DAYS_INTO_CE_LENGTH]; readBytes(value, 0, value.length); int daysIntoCE = 0; for (int i = 0; i < value.length; i++) daysIntoCE |= ((value[i] & 0xFF) << (8 * i)); // Theoretically should never encounter a value that is outside of the valid date range if (daysIntoCE < 0) throwInvalidTDS(); return daysIntoCE; } // Scale multipliers used to convert variable-scaled temporal values to a fixed 100ns scale. // Using this array is measurably faster than using Math.pow(10, ...) private final static int[] SCALED_MULTIPLIERS = {10000000, 1000000, 100000, 10000, 1000, 100, 10, 1}; private long readNanosSinceMidnight(int scale) throws SQLServerException { assert 0 <= scale && scale <= TDS.MAX_FRACTIONAL_SECONDS_SCALE; byte value[] = new byte[TDS.nanosSinceMidnightLength(scale)]; readBytes(value, 0, value.length); long hundredNanosSinceMidnight = 0; for (int i = 0; i < value.length; i++) hundredNanosSinceMidnight |= (value[i] & 0xFFL) << (8 * i); hundredNanosSinceMidnight *= SCALED_MULTIPLIERS[scale]; if (!(0 <= hundredNanosSinceMidnight && hundredNanosSinceMidnight < Nanos.PER_DAY / 100)) throwInvalidTDS(); return 100 * hundredNanosSinceMidnight; } final static String guidTemplate = "NNNNNNNN-NNNN-NNNN-NNNN-NNNNNNNNNNNN"; final Object readGUID(int valueLength, JDBCType jdbcType, StreamType streamType) throws SQLServerException { // GUIDs must be exactly 16 bytes if (16 != valueLength) throwInvalidTDS(); // Read in the GUID's binary value byte guid[] = new byte[16]; readBytes(guid, 0, 16); switch (jdbcType) { case CHAR: case VARCHAR: case LONGVARCHAR: case GUID: { StringBuilder sb = new StringBuilder(guidTemplate.length()); for (int i = 0; i < 4; i++) { sb.append(Util.hexChars[(guid[3 - i] & 0xF0) >> 4]); sb.append(Util.hexChars[guid[3 - i] & 0x0F]); } sb.append('-'); for (int i = 0; i < 2; i++) { sb.append(Util.hexChars[(guid[5 - i] & 0xF0) >> 4]); sb.append(Util.hexChars[guid[5 - i] & 0x0F]); } sb.append('-'); for (int i = 0; i < 2; i++) { sb.append(Util.hexChars[(guid[7 - i] & 0xF0) >> 4]); sb.append(Util.hexChars[guid[7 - i] & 0x0F]); } sb.append('-'); for (int i = 0; i < 2; i++) { sb.append(Util.hexChars[(guid[8 + i] & 0xF0) >> 4]); sb.append(Util.hexChars[guid[8 + i] & 0x0F]); } sb.append('-'); for (int i = 0; i < 6; i++) { sb.append(Util.hexChars[(guid[10 + i] & 0xF0) >> 4]); sb.append(Util.hexChars[guid[10 + i] & 0x0F]); } try { return DDC.convertStringToObject(sb.toString(), Encoding.UNICODE.charset(), jdbcType, streamType); } catch (UnsupportedEncodingException e) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorConvertingValue")); throw new SQLServerException(form.format(new Object[] {"UNIQUEIDENTIFIER", jdbcType}), null, 0, e); } } default: { if (StreamType.BINARY == streamType || StreamType.ASCII == streamType) return new ByteArrayInputStream(guid); return guid; } } } /** * Reads a multi-part table name from TDS and returns it as an array of Strings. */ final SQLIdentifier readSQLIdentifier() throws SQLServerException { // Multi-part names should have between 1 and 4 parts int numParts = readUnsignedByte(); if (!(1 <= numParts && numParts <= 4)) throwInvalidTDS(); // Each part is a length-prefixed Unicode string String[] nameParts = new String[numParts]; for (int i = 0; i < numParts; i++) nameParts[i] = readUnicodeString(readUnsignedShort()); // Build the identifier from the name parts SQLIdentifier identifier = new SQLIdentifier(); identifier.setObjectName(nameParts[numParts - 1]); if (numParts >= 2) identifier.setSchemaName(nameParts[numParts - 2]); if (numParts >= 3) identifier.setDatabaseName(nameParts[numParts - 3]); if (4 == numParts) identifier.setServerName(nameParts[numParts - 4]); return identifier; } final SQLCollation readCollation() throws SQLServerException { SQLCollation collation = null; try { collation = new SQLCollation(this); } catch (UnsupportedEncodingException e) { con.terminate(SQLServerException.DRIVER_ERROR_INVALID_TDS, e.getMessage(), e); // not reached } return collation; } final void skip(int bytesToSkip) throws SQLServerException { assert bytesToSkip >= 0; while (bytesToSkip > 0) { // Ensure that we have a packet to read from. if (!ensurePayload()) throwInvalidTDS(); int bytesSkipped = bytesToSkip; if (bytesSkipped > currentPacket.payloadLength - payloadOffset) bytesSkipped = currentPacket.payloadLength - payloadOffset; bytesToSkip -= bytesSkipped; payloadOffset += bytesSkipped; } } final void tryProcessFeatureExtAck(boolean featureExtAckReceived) throws SQLServerException { // in case of redirection, do not check if TDS_FEATURE_EXTENSION_ACK is received or not. if (null != this.con.getRoutingInfo()) { return; } if (isColumnEncryptionSettingEnabled() && !featureExtAckReceived) throw new SQLServerException(this, SQLServerException.getErrString("R_AE_NotSupportedByServer"), null, 0, false); } final void trySetSensitivityClassification(SensitivityClassification sensitivityClassification) { this.sensitivityClassification = sensitivityClassification; } } /** * The tds default implementation of a timeout command */ class TdsTimeoutCommand extends TimeoutCommand<TDSCommand> { protected TdsTimeoutCommand(int timeout, TDSCommand command, SQLServerConnection sqlServerConnection) { super(timeout, command, sqlServerConnection); } protected void interrupt() throws Exception { TDSCommand command = getCommand(); SQLServerConnection sqlServerConnection = getSqlServerConnection(); try { // If TCP Connection to server is silently dropped, exceeding the query timeout // on the same connection does // not throw SQLTimeoutException // The application stops responding instead until SocketTimeoutException is // thrown. In this case, we must // manually terminate the connection. if (null == command && null != sqlServerConnection) { sqlServerConnection.terminate(SQLServerException.DRIVER_ERROR_IO_FAILED, SQLServerException.getErrString("R_connectionIsClosed")); } else { // If the timer wasn't canceled before it ran out of // time then interrupt the registered command. if (null != command) command.interrupt(SQLServerException.getErrString("R_queryTimedOut")); } } catch (SQLServerException e) { // Request failed to time out and SQLServerConnection does not exist if (null != command) command.log(Level.FINE, "Command could not be timed out. Reason: " + e.getMessage()); throw new SQLServerException(SQLServerException.getErrString("R_crCommandCannotTimeOut"), e); } } } /** * TDSCommand encapsulates an interruptable TDS conversation. * * A conversation may consist of one or more TDS request and response messages. A command may be interrupted at any * point, from any thread, and for any reason. Acknowledgement and handling of an interrupt is fully encapsulated by * this class. * * Commands may be created with an optional timeout (in seconds). Timeouts are implemented as a form of interrupt, where * the interrupt event occurs when the timeout period expires. Currently, only the time to receive the response from the * channel counts against the timeout period. */ abstract class TDSCommand implements Serializable { /** * Always update serialVersionUID when prompted. */ private static final long serialVersionUID = 5485075546328951857L; abstract boolean doExecute() throws SQLServerException; final static Logger logger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.TDS.Command"); private final String logContext; final String getLogContext() { return logContext; } private String traceID; final public String toString() { if (traceID == null) traceID = "TDSCommand@" + Integer.toHexString(hashCode()) + " (" + logContext + ")"; return traceID; } final void log(Level level, String message) { logger.log(level, toString() + ": " + message); } // TDS channel accessors // These are set/reset at command execution time. // Volatile ensures visibility to execution thread and interrupt thread private volatile TDSWriter tdsWriter; private volatile TDSReader tdsReader; protected TDSWriter getTDSWriter() { return tdsWriter; } // Lock to ensure atomicity when manipulating more than one of the following // shared interrupt state variables below. private final Object interruptLock = new Object(); // Flag set when this command starts execution, indicating that it is // ready to respond to interrupts; and cleared when its last response packet is // received, indicating that it is no longer able to respond to interrupts. // If the command is interrupted after interrupts have been disabled, then the // interrupt is ignored. private volatile boolean interruptsEnabled = false; protected boolean getInterruptsEnabled() { return interruptsEnabled; } protected void setInterruptsEnabled(boolean interruptsEnabled) { synchronized (interruptLock) { this.interruptsEnabled = interruptsEnabled; } } // Flag set to indicate that an interrupt has happened. private volatile boolean wasInterrupted = false; private boolean wasInterrupted() { return wasInterrupted; } // The reason for the interrupt. private volatile String interruptReason = null; // Flag set when this command's request to the server is complete. // If a command is interrupted before its request is complete, it is the executing // thread's responsibility to send the attention signal to the server if necessary. // After the request is complete, the interrupting thread must send the attention signal. private volatile boolean requestComplete; protected boolean getRequestComplete() { return requestComplete; } protected void setRequestComplete(boolean requestComplete) { synchronized (interruptLock) { this.requestComplete = requestComplete; } } // Flag set when an attention signal has been sent to the server, indicating that a // TDS packet containing the attention ack message is to be expected in the response. // This flag is cleared after the attention ack message has been received and processed. private volatile boolean attentionPending = false; boolean attentionPending() { return attentionPending; } // Flag set when this command's response has been processed. Until this flag is set, // there may be unprocessed information left in the response, such as transaction // ENVCHANGE notifications. private volatile boolean processedResponse; protected boolean getProcessedResponse() { return processedResponse; } protected void setProcessedResponse(boolean processedResponse) { synchronized (interruptLock) { this.processedResponse = processedResponse; } } // Flag set when this command's response is ready to be read from the server and cleared // after its response has been received, but not necessarily processed, up to and including // any attention ack. The command's response is read either on demand as it is processed, // or by detaching. private volatile boolean readingResponse; private int queryTimeoutSeconds; private int cancelQueryTimeoutSeconds; private ScheduledFuture<?> timeout; private TdsTimeoutCommand timeoutCommand; /* * Some flags for Connection Resiliency. We need to know if a command has already been registered in the poller, or * if it was actually executed. */ private boolean isRegisteredInPoller = false; private boolean isExecuted = false; protected int getQueryTimeoutSeconds() { return this.queryTimeoutSeconds; } protected int getCancelQueryTimeoutSeconds() { return this.cancelQueryTimeoutSeconds; } final boolean readingResponse() { return readingResponse; } protected ArrayList<byte[]> enclaveCEKs; // Counter reference, so maxResultBuffer property can by acknowledged private ICounter counter; ICounter getCounter() { return counter; } void createCounter(ICounter previousCounter, Properties activeConnectionProperties) { if (null == previousCounter) { String maxResultBuffer = activeConnectionProperties .getProperty(SQLServerDriverStringProperty.MAX_RESULT_BUFFER.toString()); counter = new MaxResultBufferCounter(Long.parseLong(maxResultBuffer)); } else { counter = previousCounter; } } synchronized void addToPoller() { if (!isRegisteredInPoller) { // If command execution is subject to timeout then start timing until // the server returns the first response packet. if (queryTimeoutSeconds > 0) { this.timeoutCommand = new TdsTimeoutCommand(queryTimeoutSeconds, this, null); TimeoutPoller.getTimeoutPoller().addTimeoutCommand(this.timeoutCommand); isRegisteredInPoller = true; } } } boolean wasExecuted() { return isExecuted; } /** * Creates this command with an optional timeout. * * @param logContext * the string describing the context for this command. * @param queryTimeoutSeconds * the time before which the command must complete before it is interrupted. A value of 0 means no timeout. * @param cancelQueryTimeoutSeconds * the time to cancel the query timeout A value of 0 means no timeout. */ TDSCommand(String logContext, int queryTimeoutSeconds, int cancelQueryTimeoutSeconds) { this.logContext = logContext; this.queryTimeoutSeconds = queryTimeoutSeconds; this.cancelQueryTimeoutSeconds = cancelQueryTimeoutSeconds; } /** * Executes this command. * * @param tdsWriter * @param tdsReader * @throws SQLServerException * on any error executing the command, including cancel or timeout. */ boolean execute(TDSWriter tdsWriter, TDSReader tdsReader) throws SQLServerException { isExecuted = true; this.tdsWriter = tdsWriter; this.tdsReader = tdsReader; assert null != tdsReader; try { return doExecute(); // Derived classes implement the execution details } catch (SQLServerException e) { try { // If command execution threw an exception for any reason before the request // was complete then interrupt the command (it may already be interrupted) // and close it out to ensure that any response to the error/interrupt // is processed. // no point in trying to cancel on a closed connection. if (!requestComplete && !tdsReader.getConnection().isClosed()) { interrupt(e.getMessage()); onRequestComplete(); close(); } } catch (SQLServerException interruptException) { if (logger.isLoggable(Level.FINE)) logger.fine(this.toString() + ": Ignoring error in sending attention: " + interruptException.getMessage()); } // throw the original exception even if trying to interrupt fails even in the case // of trying to send a cancel to the server. throw e; } } /** * Provides sane default response handling. * * This default implementation just consumes everything in the response message. */ void processResponse(TDSReader tdsReader) throws SQLServerException { if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Processing response"); try { TDSParser.parse(tdsReader, getLogContext()); } catch (SQLServerException e) { if (SQLServerException.DRIVER_ERROR_FROM_DATABASE != e.getDriverErrorCode()) throw e; if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Ignoring error from database: " + e.getMessage()); } } /** * Clears this command from the TDS channel so that another command can execute. * * This method does not process the response. It just buffers it in memory, including any attention ack that may be * present. */ final void detach() throws SQLServerException { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": detaching..."); // Read any remaining response packets from the server. // This operation may be timed out or cancelled from another thread. while (tdsReader.readPacket()); // Postcondition: the entire response has been read assert !readingResponse; } final void close() { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": closing..."); if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": processing response..."); while (!processedResponse) { try { processResponse(tdsReader); } catch (SQLServerException e) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": close ignoring error processing response: " + e.getMessage()); if (tdsReader.getConnection().isSessionUnAvailable()) { processedResponse = true; attentionPending = false; } } } if (attentionPending) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": processing attention ack..."); try { TDSParser.parse(tdsReader, "attention ack"); } catch (SQLServerException e) { if (tdsReader.getConnection().isSessionUnAvailable()) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": giving up on attention ack after connection closed by exception: " + e); attentionPending = false; } else { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": ignored exception: " + e); } } // If the parser returns to us without processing the expected attention ack, // then assume that no attention ack is forthcoming from the server and // terminate the connection to prevent any other command from executing. if (attentionPending) { if (logger.isLoggable(Level.SEVERE)) { logger.severe(this.toString() + ": expected attn ack missing or not processed; terminating connection..."); } try { tdsReader.throwInvalidTDS(); } catch (SQLServerException e) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": ignored expected invalid TDS exception: " + e); assert tdsReader.getConnection().isSessionUnAvailable(); attentionPending = false; } } } // Postcondition: // Response has been processed and there is no attention pending -- the command is closed. // Of course the connection may be closed too, but the command is done regardless... assert processedResponse && !attentionPending; } /** * Interrupts execution of this command, typically from another thread. * * Only the first interrupt has any effect. Subsequent interrupts are ignored. Interrupts are also ignored until * enabled. If interrupting the command requires an attention signal to be sent to the server, then this method * sends that signal if the command's request is already complete. * * Signalling mechanism is "fire and forget". It is up to either the execution thread or, possibly, a detaching * thread, to ensure that any pending attention ack later will be received and processed. * * @param reason * the reason for the interrupt, typically cancel or timeout. * @throws SQLServerException * if interrupting fails for some reason. This call does not throw the reason for the interrupt. */ void interrupt(String reason) throws SQLServerException { // Multiple, possibly simultaneous, interrupts may occur. // Only the first one should be recognized and acted upon. synchronized (interruptLock) { if (interruptsEnabled && !wasInterrupted()) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": Raising interrupt for reason:" + reason); wasInterrupted = true; interruptReason = reason; if (requestComplete) attentionPending = tdsWriter.sendAttention(); if (correspondingThread != null) { this.correspondingThread.interrupt(); this.correspondingThread = null; } } } } private boolean interruptChecked = false; private Thread correspondingThread = null; /** * Checks once whether an interrupt has occurred, and, if it has, throws an exception indicating that fact. * * Any calls after the first to check for interrupts are no-ops. This method is called periodically from this * command's execution thread to notify the app when an interrupt has happened. * * It should only be called from places where consistent behavior can be ensured after the exception is thrown. For * example, it should not be called at arbitrary times while processing the response, as doing so could leave the * response token stream in an inconsistent state. Currently, response processing only checks for interrupts after * every result or OUT parameter. * * Request processing checks for interrupts before writing each packet. * * @throws SQLServerException * if this command was interrupted, throws the reason for the interrupt. */ final void checkForInterrupt() throws SQLServerException { // Throw an exception with the interrupt reason if this command was interrupted. // Note that the interrupt reason may be null. Checking whether the // command was interrupted does not require the interrupt lock since only one // of the shared state variables is being manipulated; interruptChecked is not // shared with the interrupt thread. if (wasInterrupted() && !interruptChecked) { interruptChecked = true; if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": throwing interrupt exception, reason: " + interruptReason); throw new SQLServerException(interruptReason, SQLState.STATEMENT_CANCELED, DriverError.NOT_SET, null); } } /** * Notifies this command when no more request packets are to be sent to the server. * * After the last packet has been sent, the only way to interrupt the request is to send an attention signal from * the interrupt() method. * * Note that this method is called when the request completes normally (last packet sent with EOM bit) or when it * completes after being interrupted (0 or more packets sent with no EOM bit). */ final void onRequestComplete() throws SQLServerException { synchronized (interruptLock) { assert !requestComplete; if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": request complete"); requestComplete = true; // If this command was interrupted before its request was complete then // we need to send the attention signal if necessary. Note that if no // attention signal is sent (i.e. no packets were sent to the server before // the interrupt happened), then don't expect an attention ack or any // other response. if (!interruptsEnabled) { assert !attentionPending; assert !processedResponse; assert !readingResponse; processedResponse = true; } else if (wasInterrupted()) { if (tdsWriter.isEOMSent()) { attentionPending = tdsWriter.sendAttention(); readingResponse = attentionPending; } else { assert !attentionPending; readingResponse = tdsWriter.ignoreMessage(); } processedResponse = !readingResponse; } else { assert !attentionPending; assert !processedResponse; readingResponse = true; } } } /** * Notifies this command when the last packet of the response has been read. * * When the last packet is read, interrupts are disabled. If an interrupt occurred prior to disabling that caused an * attention signal to be sent to the server, then an extra packet containing the attention ack is read. * * This ensures that on return from this method, the TDS channel is clear of all response packets for this command. * * Note that this method is called for the attention ack message itself as well, so we need to be sure not to expect * more than one attention ack... */ final void onResponseEOM() throws SQLServerException { boolean readAttentionAck = false; // Atomically disable interrupts and check for a previous interrupt requiring // an attention ack to be read. synchronized (interruptLock) { if (interruptsEnabled) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": disabling interrupts"); // Determine whether we still need to read the attention ack packet. // When a command is interrupted, Yukon (and later) always sends a response // containing at least a DONE(ERROR) token before it sends the attention ack, // even if the command's request was not complete. readAttentionAck = attentionPending; interruptsEnabled = false; } } // If an attention packet needs to be read then read it. This should // be done outside of the interrupt lock to avoid unnecessarily blocking // interrupting threads. Note that it is remotely possible that the call // to readPacket won't actually read anything if the attention ack was // already read by TDSCommand.detach(), in which case this method could // be called from multiple threads, leading to a benign followup process // to clear the readingResponse flag. if (readAttentionAck) tdsReader.readPacket(); readingResponse = false; } /** * Notifies this command when the end of its response token stream has been reached. * * After this call, we are guaranteed that tokens in the response have been processed. */ final void onTokenEOF() { processedResponse = true; } /** * Notifies this command when the attention ack (a DONE token with a special flag) has been processed. * * After this call, the attention ack should no longer be expected. */ final void onAttentionAck() { assert attentionPending; attentionPending = false; } /** * Starts sending this command's TDS request to the server. * * @param tdsMessageType * the type of the TDS message (RPC, QUERY, etc.) * @return the TDS writer used to write the request. * @throws SQLServerException * on any error, including acknowledgement of an interrupt. */ final TDSWriter startRequest(byte tdsMessageType) throws SQLServerException { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": starting request..."); // Start this command's request message try { tdsWriter.startMessage(this, tdsMessageType); } catch (SQLServerException e) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": starting request: exception: " + e.getMessage()); throw e; } // (Re)initialize this command's interrupt state for its current execution. // To ensure atomically consistent behavior, do not leave the interrupt lock // until interrupts have been (re)enabled. synchronized (interruptLock) { requestComplete = false; readingResponse = false; processedResponse = false; attentionPending = false; wasInterrupted = false; interruptReason = null; interruptsEnabled = true; } return tdsWriter; } /** * Finishes the TDS request and then starts reading the TDS response from the server. * * @return the TDS reader used to read the response. * @throws SQLServerException * if there is any kind of error. */ final TDSReader startResponse() throws SQLServerException { return startResponse(false); } final TDSReader startResponse(boolean isAdaptive) throws SQLServerException { // Finish sending the request message. If this command was interrupted // at any point before endMessage() returns, then endMessage() throws an // exception with the reason for the interrupt. Request interrupts // are disabled by the time endMessage() returns. if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": finishing request"); try { tdsWriter.endMessage(); } catch (SQLServerException e) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": finishing request: endMessage threw exception: " + e.getMessage()); throw e; } // If command execution is subject to timeout then start timing until // the server returns the first response packet. if (queryTimeoutSeconds > 0) { SQLServerConnection conn = tdsReader != null ? tdsReader.getConnection() : null; this.timeout = tdsWriter.getSharedTimer().schedule(new TDSTimeoutTask(this, conn), queryTimeoutSeconds); } addToPoller(); if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Reading response..."); try { // Wait for the server to execute the request and read the first packet // (responseBuffering=adaptive) or all packets (responseBuffering=full) // of the response. if (isAdaptive) { tdsReader.readPacket(); } else { while (tdsReader.readPacket()); } } catch (SQLServerException e) { if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Exception reading response: " + e.getMessage()); throw e; } finally { // If command execution was subject to timeout then stop timing as soon // as the server returns the first response packet or errors out. if (this.timeout != null) { this.timeout.cancel(false); this.timeout = null; } } // A new response is received hence increment unprocessed response count. // but do not increment when sending fedauth tokens as that is an extra request if (!(this instanceof FedAuthTokenCommand)) { tdsReader.getConnection().getSessionRecovery().incrementUnprocessedResponseCount(); } return tdsReader; } /* * Currently only used in Connection Resiliency scenarios. This thread reference allows the current command to * interrupt the thread if it's sleeping. This is useful in timeout cases. */ void attachThread(Thread reconnectThread) { this.correspondingThread = reconnectThread; } } /** * UninterruptableTDSCommand encapsulates an uninterruptable TDS conversation. * * TDSCommands have interruptability built in. However, some TDSCommands such as DTC commands, connection commands, * cursor close and prepared statement handle close shouldn't be interruptable. This class provides a base * implementation for such commands. */ abstract class UninterruptableTDSCommand extends TDSCommand { /** * Always update serialVersionUID when prompted. */ private static final long serialVersionUID = -6457195977162963793L; UninterruptableTDSCommand(String logContext) { super(logContext, 0, 0); } final void interrupt(String reason) throws SQLServerException { // Interrupting an uninterruptable command is a no-op. That is, // it can happen, but it should have no effect. if (logger.isLoggable(Level.FINEST)) { logger.finest(toString() + " Ignoring interrupt of uninterruptable TDS command; Reason:" + reason); } } }
package com.microsoft.sqlserver.jdbc; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.Reader; import java.io.UnsupportedEncodingException; import java.math.BigDecimal; import java.math.BigInteger; import java.math.RoundingMode; import java.net.Inet4Address; import java.net.Inet6Address; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketAddress; import java.net.SocketException; import java.net.SocketTimeoutException; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.channels.SelectionKey; import java.nio.channels.Selector; import java.nio.channels.SocketChannel; import java.nio.charset.Charset; import java.security.KeyStore; import java.security.Provider; import java.security.Security; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.sql.Timestamp; import java.text.MessageFormat; import java.time.OffsetDateTime; import java.time.OffsetTime; import java.util.Arrays; import java.util.Calendar; import java.util.Collection; import java.util.GregorianCalendar; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.SimpleTimeZone; import java.util.TimeZone; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.logging.Level; import java.util.logging.Logger; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLSocket; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509TrustManager; import javax.xml.bind.DatatypeConverter; final class TDS { // TDS protocol versions static final int VER_DENALI = 0x74000004; // TDS 7.4 static final int VER_KATMAI = 0x730B0003; // TDS 7.3B(includes null bit compression) static final int VER_YUKON = 0x72090002; // TDS 7.2 static final int VER_UNKNOWN = 0x00000000; // Unknown/uninitialized static final int TDS_RET_STAT = 0x79; static final int TDS_COLMETADATA = 0x81; static final int TDS_TABNAME = 0xA4; static final int TDS_COLINFO = 0xA5; static final int TDS_ORDER = 0xA9; static final int TDS_ERR = 0xAA; static final int TDS_MSG = 0xAB; static final int TDS_RETURN_VALUE = 0xAC; static final int TDS_LOGIN_ACK = 0xAD; static final int TDS_FEATURE_EXTENSION_ACK = 0xAE; static final int TDS_ROW = 0xD1; static final int TDS_NBCROW = 0xD2; static final int TDS_ENV_CHG = 0xE3; static final int TDS_SSPI = 0xED; static final int TDS_DONE = 0xFD; static final int TDS_DONEPROC = 0xFE; static final int TDS_DONEINPROC = 0xFF; static final int TDS_FEDAUTHINFO = 0xEE; // FedAuth static final int TDS_FEATURE_EXT_FEDAUTH = 0x02; static final int TDS_FEDAUTH_LIBRARY_SECURITYTOKEN = 0x01; static final int TDS_FEDAUTH_LIBRARY_ADAL = 0x02; static final int TDS_FEDAUTH_LIBRARY_RESERVED = 0x7F; static final byte ADALWORKFLOW_ACTIVEDIRECTORYPASSWORD = 0x01; static final byte ADALWORKFLOW_ACTIVEDIRECTORYINTEGRATED = 0x02; static final byte FEDAUTH_INFO_ID_STSURL = 0x01; // FedAuthInfoData is token endpoint URL from which to acquire fed auth token static final byte FEDAUTH_INFO_ID_SPN = 0x02; // FedAuthInfoData is the SPN to use for acquiring fed auth token // AE constants static final int TDS_FEATURE_EXT_AE = 0x04; static final int MAX_SUPPORTED_TCE_VERSION = 0x01; // max version static final int CUSTOM_CIPHER_ALGORITHM_ID = 0; // max version static final int AES_256_CBC = 1; static final int AEAD_AES_256_CBC_HMAC_SHA256 = 2; static final int AE_METADATA = 0x08; static final int TDS_TVP = 0xF3; static final int TVP_ROW = 0x01; static final int TVP_NULL_TOKEN = 0xFFFF; static final int TVP_STATUS_DEFAULT = 0x02; static final int TVP_ORDER_UNIQUE_TOKEN = 0x10; // TVP_ORDER_UNIQUE_TOKEN flags static final byte TVP_ORDERASC_FLAG = 0x1; static final byte TVP_ORDERDESC_FLAG = 0x2; static final byte TVP_UNIQUE_FLAG = 0x4; // TVP flags, may be used in other places static final int FLAG_NULLABLE = 0x01; static final int FLAG_TVP_DEFAULT_COLUMN = 0x200; static final int FEATURE_EXT_TERMINATOR = -1; static final String getTokenName(int tdsTokenType) { switch (tdsTokenType) { case TDS_RET_STAT: return "TDS_RET_STAT (0x79)"; case TDS_COLMETADATA: return "TDS_COLMETADATA (0x81)"; case TDS_TABNAME: return "TDS_TABNAME (0xA4)"; case TDS_COLINFO: return "TDS_COLINFO (0xA5)"; case TDS_ORDER: return "TDS_ORDER (0xA9)"; case TDS_ERR: return "TDS_ERR (0xAA)"; case TDS_MSG: return "TDS_MSG (0xAB)"; case TDS_RETURN_VALUE: return "TDS_RETURN_VALUE (0xAC)"; case TDS_LOGIN_ACK: return "TDS_LOGIN_ACK (0xAD)"; case TDS_FEATURE_EXTENSION_ACK: return "TDS_FEATURE_EXTENSION_ACK (0xAE)"; case TDS_ROW: return "TDS_ROW (0xD1)"; case TDS_NBCROW: return "TDS_NBCROW (0xD2)"; case TDS_ENV_CHG: return "TDS_ENV_CHG (0xE3)"; case TDS_SSPI: return "TDS_SSPI (0xED)"; case TDS_DONE: return "TDS_DONE (0xFD)"; case TDS_DONEPROC: return "TDS_DONEPROC (0xFE)"; case TDS_DONEINPROC: return "TDS_DONEINPROC (0xFF)"; case TDS_FEDAUTHINFO: return "TDS_FEDAUTHINFO (0xEE)"; default: return "unknown token (0x" + Integer.toHexString(tdsTokenType).toUpperCase() + ")"; } } // RPC ProcIDs for use with RPCRequest (PKT_RPC) calls static final short PROCID_SP_CURSOR = 1; static final short PROCID_SP_CURSOROPEN = 2; static final short PROCID_SP_CURSORPREPARE = 3; static final short PROCID_SP_CURSOREXECUTE = 4; static final short PROCID_SP_CURSORPREPEXEC = 5; static final short PROCID_SP_CURSORUNPREPARE = 6; static final short PROCID_SP_CURSORFETCH = 7; static final short PROCID_SP_CURSOROPTION = 8; static final short PROCID_SP_CURSORCLOSE = 9; static final short PROCID_SP_EXECUTESQL = 10; static final short PROCID_SP_PREPARE = 11; static final short PROCID_SP_EXECUTE = 12; static final short PROCID_SP_PREPEXEC = 13; static final short PROCID_SP_PREPEXECRPC = 14; static final short PROCID_SP_UNPREPARE = 15; // Constants for use with cursor RPCs static final short SP_CURSOR_OP_UPDATE = 1; static final short SP_CURSOR_OP_DELETE = 2; static final short SP_CURSOR_OP_INSERT = 4; static final short SP_CURSOR_OP_REFRESH = 8; static final short SP_CURSOR_OP_LOCK = 16; static final short SP_CURSOR_OP_SETPOSITION = 32; static final short SP_CURSOR_OP_ABSOLUTE = 64; // Constants for server-cursored result sets. // See the Engine Cursors Functional Specification for details. static final int FETCH_FIRST = 1; static final int FETCH_NEXT = 2; static final int FETCH_PREV = 4; static final int FETCH_LAST = 8; static final int FETCH_ABSOLUTE = 16; static final int FETCH_RELATIVE = 32; static final int FETCH_REFRESH = 128; static final int FETCH_INFO = 256; static final int FETCH_PREV_NOADJUST = 512; static final byte RPC_OPTION_NO_METADATA = (byte) 0x02; // Transaction manager request types static final short TM_GET_DTC_ADDRESS = 0; static final short TM_PROPAGATE_XACT = 1; static final short TM_BEGIN_XACT = 5; static final short TM_PROMOTE_PROMOTABLE_XACT = 6; static final short TM_COMMIT_XACT = 7; static final short TM_ROLLBACK_XACT = 8; static final short TM_SAVE_XACT = 9; static final byte PKT_QUERY = 1; static final byte PKT_RPC = 3; static final byte PKT_REPLY = 4; static final byte PKT_CANCEL_REQ = 6; static final byte PKT_BULK = 7; static final byte PKT_DTC = 14; static final byte PKT_LOGON70 = 16; // 0x10 static final byte PKT_SSPI = 17; static final byte PKT_PRELOGIN = 18; // 0x12 static final byte PKT_FEDAUTH_TOKEN_MESSAGE = 8; // Authentication token for federated authentication static final byte STATUS_NORMAL = 0x00; static final byte STATUS_BIT_EOM = 0x01; static final byte STATUS_BIT_ATTENTION = 0x02;// this is called ignore bit in TDS spec static final byte STATUS_BIT_RESET_CONN = 0x08; // Various TDS packet size constants static final int INVALID_PACKET_SIZE = -1; static final int INITIAL_PACKET_SIZE = 4096; static final int MIN_PACKET_SIZE = 512; static final int MAX_PACKET_SIZE = 32767; static final int DEFAULT_PACKET_SIZE = 8000; static final int SERVER_PACKET_SIZE = 0; // Accept server's configured packet size // TDS packet header size and offsets static final int PACKET_HEADER_SIZE = 8; static final int PACKET_HEADER_MESSAGE_TYPE = 0; static final int PACKET_HEADER_MESSAGE_STATUS = 1; static final int PACKET_HEADER_MESSAGE_LENGTH = 2; static final int PACKET_HEADER_SPID = 4; static final int PACKET_HEADER_SEQUENCE_NUM = 6; static final int PACKET_HEADER_WINDOW = 7; // Reserved/Not used // MARS header length: // 2 byte header type // 8 byte transaction descriptor // 4 byte outstanding request count static final int MARS_HEADER_LENGTH = 18; // 2 byte header type, 8 byte transaction descriptor, static final int TRACE_HEADER_LENGTH = 26; // header length (4) + header type (2) + guid (16) + Sequence number size (4) static final short HEADERTYPE_TRACE = 3; // trace header type // Message header length static final int MESSAGE_HEADER_LENGTH = MARS_HEADER_LENGTH + 4; // length includes message header itself static final byte B_PRELOGIN_OPTION_VERSION = 0x00; static final byte B_PRELOGIN_OPTION_ENCRYPTION = 0x01; static final byte B_PRELOGIN_OPTION_INSTOPT = 0x02; static final byte B_PRELOGIN_OPTION_THREADID = 0x03; static final byte B_PRELOGIN_OPTION_MARS = 0x04; static final byte B_PRELOGIN_OPTION_TRACEID = 0x05; static final byte B_PRELOGIN_OPTION_FEDAUTHREQUIRED = 0x06; static final byte B_PRELOGIN_OPTION_TERMINATOR = (byte) 0xFF; // Login option byte 1 static final byte LOGIN_OPTION1_ORDER_X86 = 0x00; static final byte LOGIN_OPTION1_ORDER_6800 = 0x01; static final byte LOGIN_OPTION1_CHARSET_ASCII = 0x00; static final byte LOGIN_OPTION1_CHARSET_EBCDIC = 0x02; static final byte LOGIN_OPTION1_FLOAT_IEEE_754 = 0x00; static final byte LOGIN_OPTION1_FLOAT_VAX = 0x04; static final byte LOGIN_OPTION1_FLOAT_ND5000 = 0x08; static final byte LOGIN_OPTION1_DUMPLOAD_ON = 0x00; static final byte LOGIN_OPTION1_DUMPLOAD_OFF = 0x10; static final byte LOGIN_OPTION1_USE_DB_ON = 0x00; static final byte LOGIN_OPTION1_USE_DB_OFF = 0x20; static final byte LOGIN_OPTION1_INIT_DB_WARN = 0x00; static final byte LOGIN_OPTION1_INIT_DB_FATAL = 0x40; static final byte LOGIN_OPTION1_SET_LANG_OFF = 0x00; static final byte LOGIN_OPTION1_SET_LANG_ON = (byte) 0x80; // Login option byte 2 static final byte LOGIN_OPTION2_INIT_LANG_WARN = 0x00; static final byte LOGIN_OPTION2_INIT_LANG_FATAL = 0x01; static final byte LOGIN_OPTION2_ODBC_OFF = 0x00; static final byte LOGIN_OPTION2_ODBC_ON = 0x02; static final byte LOGIN_OPTION2_TRAN_BOUNDARY_OFF = 0x00; static final byte LOGIN_OPTION2_TRAN_BOUNDARY_ON = 0x04; static final byte LOGIN_OPTION2_CACHE_CONNECTION_OFF = 0x00; static final byte LOGIN_OPTION2_CACHE_CONNECTION_ON = 0x08; static final byte LOGIN_OPTION2_USER_NORMAL = 0x00; static final byte LOGIN_OPTION2_USER_SERVER = 0x10; static final byte LOGIN_OPTION2_USER_REMUSER = 0x20; static final byte LOGIN_OPTION2_USER_SQLREPL = 0x30; static final byte LOGIN_OPTION2_INTEGRATED_SECURITY_OFF = 0x00; static final byte LOGIN_OPTION2_INTEGRATED_SECURITY_ON = (byte) 0x80; // Login option byte 3 static final byte LOGIN_OPTION3_DEFAULT = 0x00; static final byte LOGIN_OPTION3_CHANGE_PASSWORD = 0x01; static final byte LOGIN_OPTION3_SEND_YUKON_BINARY_XML = 0x02; static final byte LOGIN_OPTION3_USER_INSTANCE = 0x04; static final byte LOGIN_OPTION3_UNKNOWN_COLLATION_HANDLING = 0x08; static final byte LOGIN_OPTION3_FEATURE_EXTENSION = 0x10; // Login type flag (bits 5 - 7 reserved for future use) static final byte LOGIN_SQLTYPE_DEFAULT = 0x00; static final byte LOGIN_SQLTYPE_TSQL = 0x01; static final byte LOGIN_SQLTYPE_ANSI_V1 = 0x02; static final byte LOGIN_SQLTYPE_ANSI89_L1 = 0x03; static final byte LOGIN_SQLTYPE_ANSI89_L2 = 0x04; static final byte LOGIN_SQLTYPE_ANSI89_IEF = 0x05; static final byte LOGIN_SQLTYPE_ANSI89_ENTRY = 0x06; static final byte LOGIN_SQLTYPE_ANSI89_TRANS = 0x07; static final byte LOGIN_SQLTYPE_ANSI89_INTER = 0x08; static final byte LOGIN_SQLTYPE_ANSI89_FULL = 0x09; static final byte LOGIN_OLEDB_OFF = 0x00; static final byte LOGIN_OLEDB_ON = 0x10; static final byte LOGIN_READ_ONLY_INTENT = 0x20; static final byte LOGIN_READ_WRITE_INTENT = 0x00; static final byte ENCRYPT_OFF = 0x00; static final byte ENCRYPT_ON = 0x01; static final byte ENCRYPT_NOT_SUP = 0x02; static final byte ENCRYPT_REQ = 0x03; static final byte ENCRYPT_INVALID = (byte) 0xFF; static final String getEncryptionLevel(int level) { switch (level) { case ENCRYPT_OFF: return "OFF"; case ENCRYPT_ON: return "ON"; case ENCRYPT_NOT_SUP: return "NOT SUPPORTED"; case ENCRYPT_REQ: return "REQUIRED"; default: return "unknown encryption level (0x" + Integer.toHexString(level).toUpperCase() + ")"; } } // Prelogin packet length, including the tds header, // version, encrpytion, and traceid data sessions. // For detailed info, please check the definition of // preloginRequest in Prelogin function. static final byte B_PRELOGIN_MESSAGE_LENGTH = 67; static final byte B_PRELOGIN_MESSAGE_LENGTH_WITH_FEDAUTH = 73; // Scroll options and concurrency options lifted out // of the the Yukon cursors spec for sp_cursoropen. final static int SCROLLOPT_KEYSET = 1; final static int SCROLLOPT_DYNAMIC = 2; final static int SCROLLOPT_FORWARD_ONLY = 4; final static int SCROLLOPT_STATIC = 8; final static int SCROLLOPT_FAST_FORWARD = 16; final static int SCROLLOPT_PARAMETERIZED_STMT = 4096; final static int SCROLLOPT_AUTO_FETCH = 8192; final static int SCROLLOPT_AUTO_CLOSE = 16384; final static int CCOPT_READ_ONLY = 1; final static int CCOPT_SCROLL_LOCKS = 2; final static int CCOPT_OPTIMISTIC_CC = 4; final static int CCOPT_OPTIMISTIC_CCVAL = 8; final static int CCOPT_ALLOW_DIRECT = 8192; final static int CCOPT_UPDT_IN_PLACE = 16384; // Result set rows include an extra, "hidden" ROWSTAT column which indicates // the overall success or failure of the row fetch operation. With a keyset // cursor, the value in the ROWSTAT column indicates whether the row has been // deleted from the database. static final int ROWSTAT_FETCH_SUCCEEDED = 1; static final int ROWSTAT_FETCH_MISSING = 2; // ColumnInfo status final static int COLINFO_STATUS_EXPRESSION = 0x04; final static int COLINFO_STATUS_KEY = 0x08; final static int COLINFO_STATUS_HIDDEN = 0x10; final static int COLINFO_STATUS_DIFFERENT_NAME = 0x20; final static int MAX_FRACTIONAL_SECONDS_SCALE = 7; final static Timestamp MAX_TIMESTAMP = Timestamp.valueOf("2079-06-06 23:59:59"); final static Timestamp MIN_TIMESTAMP = Timestamp.valueOf("1900-01-01 00:00:00"); static int nanosSinceMidnightLength(int scale) { final int[] scaledLengths = {3, 3, 3, 4, 4, 5, 5, 5}; assert scale >= 0; assert scale <= MAX_FRACTIONAL_SECONDS_SCALE; return scaledLengths[scale]; } final static int DAYS_INTO_CE_LENGTH = 3; final static int MINUTES_OFFSET_LENGTH = 2; // Number of days in a "normal" (non-leap) year according to SQL Server. final static int DAYS_PER_YEAR = 365; final static int BASE_YEAR_1900 = 1900; final static int BASE_YEAR_1970 = 1970; final static String BASE_DATE_1970 = "1970-01-01"; static int timeValueLength(int scale) { return nanosSinceMidnightLength(scale); } static int datetime2ValueLength(int scale) { return DAYS_INTO_CE_LENGTH + nanosSinceMidnightLength(scale); } static int datetimeoffsetValueLength(int scale) { return DAYS_INTO_CE_LENGTH + MINUTES_OFFSET_LENGTH + nanosSinceMidnightLength(scale); } // TDS is just a namespace - it can't be instantiated. private TDS() { } } class Nanos { static final int PER_SECOND = 1000000000; static final int PER_MAX_SCALE_INTERVAL = PER_SECOND / (int) Math.pow(10, TDS.MAX_FRACTIONAL_SECONDS_SCALE); static final int PER_MILLISECOND = PER_SECOND / 1000; static final long PER_DAY = 24 * 60 * 60 * (long) PER_SECOND; private Nanos() { } } // Constants relating to the historically accepted Julian-Gregorian calendar cutover date (October 15, 1582). // Used in processing SQL Server temporal data types whose date component may precede that date. // Scoping these constants to a class defers their initialization to first use. class GregorianChange { // Cutover date for a pure Gregorian calendar - that is, a proleptic Gregorian calendar with // Gregorian leap year behavior throughout its entire range. This is the cutover date is used // with temporal server values, which are represented in terms of number of days relative to a // base date. static final java.util.Date PURE_CHANGE_DATE = new java.util.Date(Long.MIN_VALUE); // The standard Julian to Gregorian cutover date (October 15, 1582) that the JDBC temporal // classes (Time, Date, Timestamp) assume when converting to and from their UTC milliseconds // representations. static final java.util.Date STANDARD_CHANGE_DATE = (new GregorianCalendar(Locale.US)).getGregorianChange(); // A hint as to the number of days since 1/1/0001, past which we do not need to // not rationalize the difference between SQL Server behavior (pure Gregorian) // and Java behavior (standard Gregorian). // Not having to rationalize the difference has a substantial (measured) performance benefit // for temporal getters. // The hint does not need to be exact, as long as it's later than the actual change date. static final int DAYS_SINCE_BASE_DATE_HINT = DDC.daysSinceBaseDate(1583, 1, 1); // Extra days that need to added to a pure gregorian date, post the gergorian // cut over date, to match the default julian-gregorain calendar date of java. static final int EXTRA_DAYS_TO_BE_ADDED; static { // This issue refers to the following bugs in java(same issue). // The issue is fixed in JRE 1.7 // and exists in all the older versions. // Due to the above bug, in older JVM versions(1.6 and before), // the date calculation is incorrect at the Gregorian cut over date. // i.e. the next date after Oct 4th 1582 is Oct 17th 1582, where as // it should have been Oct 15th 1582. // We intentionally do not make a check based on JRE version. // If we do so, our code would break if the bug is fixed in a later update // to an older JRE. So, we check for the existence of the bug instead. GregorianCalendar cal = new GregorianCalendar(Locale.US); cal.clear(); cal.set(1, 1, 577738, 0, 0, 0);// 577738 = 1+577737(no of days since epoch that brings us to oct 15th 1582) if (cal.get(Calendar.DAY_OF_MONTH) == 15) { // If the date calculation is correct(the above bug is fixed), // post the default gregorian cut over date, the pure gregorian date // falls short by two days for all dates compared to julian-gregorian date. // so, we add two extra days for functional correctness. // Note: other ways, in which this issue can be fixed instead of // trying to detect the JVM bug is // a) use unoptimized code path in the function convertTemporalToObject // b) use cal.add api instead of cal.set api in the current optimized code path // In both the above approaches, the code is about 6-8 times slower, // resulting in an overall perf regression of about (10-30)% for perf test cases EXTRA_DAYS_TO_BE_ADDED = 2; } else EXTRA_DAYS_TO_BE_ADDED = 0; } private GregorianChange() { } } final class UTC { // UTC/GMT time zone singleton. static final TimeZone timeZone = new SimpleTimeZone(0, "UTC"); private UTC() { } } final class TDSChannel { private static final Logger logger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.TDS.Channel"); final Logger getLogger() { return logger; } private final String traceID; final public String toString() { return traceID; } private final SQLServerConnection con; private final TDSWriter tdsWriter; final TDSWriter getWriter() { return tdsWriter; } final TDSReader getReader(TDSCommand command) { return new TDSReader(this, con, command); } // Socket for raw TCP/IP communications with SQL Server private Socket tcpSocket; // Socket for SSL-encrypted communications with SQL Server private SSLSocket sslSocket; // Socket providing the communications interface to the driver. // For SSL-encrypted connections, this is the SSLSocket wrapped // around the TCP socket. For unencrypted connections, it is // just the TCP socket itself. private Socket channelSocket; // Implementation of a Socket proxy that can switch from TDS-wrapped I/O // (using the TDSChannel itself) during SSL handshake to raw I/O over // the TCP/IP socket. ProxySocket proxySocket = null; // I/O streams for raw TCP/IP communications with SQL Server private InputStream tcpInputStream; private OutputStream tcpOutputStream; // I/O streams providing the communications interface to the driver. // For SSL-encrypted connections, these are streams obtained from // the SSL socket above. They wrap the underlying TCP streams. // For unencrypted connections, they are just the TCP streams themselves. private InputStream inputStream; private OutputStream outputStream; /** TDS packet payload logger */ private static Logger packetLogger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.TDS.DATA"); private final boolean isLoggingPackets = packetLogger.isLoggable(Level.FINEST); final boolean isLoggingPackets() { return isLoggingPackets; } // Number of TDS messages sent to and received from the server int numMsgsSent = 0; int numMsgsRcvd = 0; // Last SPID received from the server. Used for logging and to tag subsequent outgoing // packets to facilitate diagnosing problems from the server side. private int spid = 0; void setSPID(int spid) { this.spid = spid; } int getSPID() { return spid; } void resetPooledConnection() { tdsWriter.resetPooledConnection(); } TDSChannel(SQLServerConnection con) { this.con = con; traceID = "TDSChannel (" + con.toString() + ")"; this.tcpSocket = null; this.sslSocket = null; this.channelSocket = null; this.tcpInputStream = null; this.tcpOutputStream = null; this.inputStream = null; this.outputStream = null; this.tdsWriter = new TDSWriter(this, con); } /** * Opens the physical communications channel (TCP/IP socket and I/O streams) to the SQL Server. */ final void open(String host, int port, int timeoutMillis, boolean useParallel, boolean useTnir, boolean isTnirFirstAttempt, int timeoutMillisForFullTimeout) throws SQLServerException { if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + ": Opening TCP socket..."); SocketFinder socketFinder = new SocketFinder(traceID, con); channelSocket = tcpSocket = socketFinder.findSocket(host, port, timeoutMillis, useParallel, useTnir, isTnirFirstAttempt, timeoutMillisForFullTimeout); try { // Set socket options tcpSocket.setTcpNoDelay(true); tcpSocket.setKeepAlive(true); // set SO_TIMEOUT int socketTimeout = con.getSocketTimeoutMilliseconds(); tcpSocket.setSoTimeout(socketTimeout); inputStream = tcpInputStream = tcpSocket.getInputStream(); outputStream = tcpOutputStream = tcpSocket.getOutputStream(); } catch (IOException ex) { SQLServerException.ConvertConnectExceptionToSQLServerException(host, port, con, ex); } } /** * Disables SSL on this TDS channel. */ void disableSSL() { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Disabling SSL..."); /* * The mission: To close the SSLSocket and release everything that it is holding onto other than the TCP/IP socket and streams. * * The challenge: Simply closing the SSLSocket tries to do additional, unnecessary shutdown I/O over the TCP/IP streams that are bound to the * socket proxy, resulting in a hang and confusing SQL Server. * * Solution: Rewire the ProxySocket's input and output streams (one more time) to closed streams. SSLSocket sees that the streams are already * closed and does not attempt to do any further I/O on them before closing itself. */ // Create a couple of cheap closed streams InputStream is = new ByteArrayInputStream(new byte[0]); try { is.close(); } catch (IOException e) { // No reason to expect a brand new ByteArrayInputStream not to close, // but just in case... logger.fine("Ignored error closing InputStream: " + e.getMessage()); } OutputStream os = new ByteArrayOutputStream(); try { os.close(); } catch (IOException e) { // No reason to expect a brand new ByteArrayOutputStream not to close, // but just in case... logger.fine("Ignored error closing OutputStream: " + e.getMessage()); } // Rewire the proxy socket to the closed streams if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Rewiring proxy streams for SSL socket close"); proxySocket.setStreams(is, os); // Now close the SSL socket. It will see that the proxy socket's streams // are closed and not try to do any further I/O over them. try { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Closing SSL socket"); sslSocket.close(); } catch (IOException e) { // Don't care if we can't close the SSL socket. We're done with it anyway. logger.fine("Ignored error closing SSLSocket: " + e.getMessage()); } // Do not close the proxy socket. Doing so would close our TCP socket // to which the proxy socket is bound. Instead, just null out the reference // to free up the few resources it holds onto. proxySocket = null; // Finally, with all of the SSL support out of the way, put the TDSChannel // back to using the TCP/IP socket and streams directly. inputStream = tcpInputStream; outputStream = tcpOutputStream; channelSocket = tcpSocket; sslSocket = null; if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " SSL disabled"); } /** * Used during SSL handshake, this class implements an InputStream that reads SSL handshake response data (framed in TDS messages) from the TDS * channel. */ private class SSLHandshakeInputStream extends InputStream { private final TDSReader tdsReader; private final SSLHandshakeOutputStream sslHandshakeOutputStream; private final Logger logger; private final String logContext; SSLHandshakeInputStream(TDSChannel tdsChannel, SSLHandshakeOutputStream sslHandshakeOutputStream) { this.tdsReader = tdsChannel.getReader(null); this.sslHandshakeOutputStream = sslHandshakeOutputStream; this.logger = tdsChannel.getLogger(); this.logContext = tdsChannel.toString() + " (SSLHandshakeInputStream):"; } /** * If there is no handshake response data available to be read from existing packets then this method ensures that the SSL handshake output * stream has been flushed to the server, and reads another packet (starting the next TDS response message). * * Note that simply using TDSReader.ensurePayload isn't sufficient as it does not automatically start the new response message. */ private void ensureSSLPayload() throws IOException { if (0 == tdsReader.available()) { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " No handshake response bytes available. Flushing SSL handshake output stream."); try { sslHandshakeOutputStream.endMessage(); } catch (SQLServerException e) { logger.finer(logContext + " Ending TDS message threw exception:" + e.getMessage()); throw new IOException(e.getMessage()); } if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Reading first packet of SSL handshake response"); try { tdsReader.readPacket(); } catch (SQLServerException e) { logger.finer(logContext + " Reading response packet threw exception:" + e.getMessage()); throw new IOException(e.getMessage()); } } } public long skip(long n) throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Skipping " + n + " bytes..."); if (n <= 0) return 0; if (n > Integer.MAX_VALUE) n = Integer.MAX_VALUE; ensureSSLPayload(); try { tdsReader.skip((int) n); } catch (SQLServerException e) { logger.finer(logContext + " Skipping bytes threw exception:" + e.getMessage()); throw new IOException(e.getMessage()); } return n; } private final byte oneByte[] = new byte[1]; public int read() throws IOException { int bytesRead; while (0 == (bytesRead = readInternal(oneByte, 0, oneByte.length))) ; assert 1 == bytesRead || -1 == bytesRead; return 1 == bytesRead ? oneByte[0] : -1; } public int read(byte[] b) throws IOException { return readInternal(b, 0, b.length); } public int read(byte b[], int offset, int maxBytes) throws IOException { return readInternal(b, offset, maxBytes); } private int readInternal(byte b[], int offset, int maxBytes) throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Reading " + maxBytes + " bytes..."); ensureSSLPayload(); try { tdsReader.readBytes(b, offset, maxBytes); } catch (SQLServerException e) { logger.finer(logContext + " Reading bytes threw exception:" + e.getMessage()); throw new IOException(e.getMessage()); } return maxBytes; } } /** * Used during SSL handshake, this class implements an OutputStream that writes SSL handshake request data (framed in TDS messages) to the TDS * channel. */ private class SSLHandshakeOutputStream extends OutputStream { private final TDSWriter tdsWriter; /** Flag indicating when it is necessary to start a new prelogin TDS message */ private boolean messageStarted; private final Logger logger; private final String logContext; SSLHandshakeOutputStream(TDSChannel tdsChannel) { this.tdsWriter = tdsChannel.getWriter(); this.messageStarted = false; this.logger = tdsChannel.getLogger(); this.logContext = tdsChannel.toString() + " (SSLHandshakeOutputStream):"; } public void flush() throws IOException { // It seems that the security provider implementation in some JVMs // (notably SunJSSE in the 6.0 JVM) likes to add spurious calls to // flush the SSL handshake output stream during SSL handshaking. // We need to ignore these calls because the SSL handshake payload // needs to be completely encapsulated in TDS. The SSL handshake // input stream always ensures that this output stream has been flushed // before trying to read the response. if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Ignored a request to flush the stream"); } void endMessage() throws SQLServerException { // We should only be asked to end the message if we have started one assert messageStarted; if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Finishing TDS message"); // Flush any remaining bytes through the writer. Since there may be fewer bytes // ready to send than a full TDS packet, we end the message here and start a new // one later if additional handshake data needs to be sent. tdsWriter.endMessage(); messageStarted = false; } private final byte singleByte[] = new byte[1]; public void write(int b) throws IOException { singleByte[0] = (byte) (b & 0xFF); writeInternal(singleByte, 0, singleByte.length); } public void write(byte[] b) throws IOException { writeInternal(b, 0, b.length); } public void write(byte[] b, int off, int len) throws IOException { writeInternal(b, off, len); } private void writeInternal(byte[] b, int off, int len) throws IOException { try { // Start out the handshake request in a new prelogin message. Subsequent // writes just add handshake data to the request until flushed. if (!messageStarted) { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Starting new TDS packet..."); tdsWriter.startMessage(null, TDS.PKT_PRELOGIN); messageStarted = true; } if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Writing " + len + " bytes..."); tdsWriter.writeBytes(b, off, len); } catch (SQLServerException e) { logger.finer(logContext + " Writing bytes threw exception:" + e.getMessage()); throw new IOException(e.getMessage()); } } } /** * This class implements an InputStream that just forwards all of its methods to an underlying InputStream. * * It is more predictable than FilteredInputStream which forwards some of its read methods directly to the underlying stream, but not others. */ private final class ProxyInputStream extends InputStream { private InputStream filteredStream; ProxyInputStream(InputStream is) { filteredStream = is; } final void setFilteredStream(InputStream is) { filteredStream = is; } public long skip(long n) throws IOException { long bytesSkipped; if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Skipping " + n + " bytes"); bytesSkipped = filteredStream.skip(n); if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Skipped " + n + " bytes"); return bytesSkipped; } public int available() throws IOException { int bytesAvailable = filteredStream.available(); if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " " + bytesAvailable + " bytes available"); return bytesAvailable; } private final byte oneByte[] = new byte[1]; public int read() throws IOException { int bytesRead; while (0 == (bytesRead = readInternal(oneByte, 0, oneByte.length))) ; assert 1 == bytesRead || -1 == bytesRead; return 1 == bytesRead ? oneByte[0] : -1; } public int read(byte[] b) throws IOException { return readInternal(b, 0, b.length); } public int read(byte b[], int offset, int maxBytes) throws IOException { return readInternal(b, offset, maxBytes); } private int readInternal(byte b[], int offset, int maxBytes) throws IOException { int bytesRead; if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Reading " + maxBytes + " bytes"); try { bytesRead = filteredStream.read(b, offset, maxBytes); } catch (IOException e) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " " + e.getMessage()); logger.finer(toString() + " Reading bytes threw exception:" + e.getMessage()); throw e; } if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Read " + bytesRead + " bytes"); return bytesRead; } public boolean markSupported() { boolean markSupported = filteredStream.markSupported(); if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Returning markSupported: " + markSupported); return markSupported; } public void mark(int readLimit) { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Marking next " + readLimit + " bytes"); filteredStream.mark(readLimit); } public void reset() throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Resetting to previous mark"); filteredStream.reset(); } public void close() throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Closing"); filteredStream.close(); } } /** * This class implements an OutputStream that just forwards all of its methods to an underlying OutputStream. * * This class essentially does what FilteredOutputStream does, but is more efficient for our usage. FilteredOutputStream transforms block writes * to sequences of single-byte writes. */ final class ProxyOutputStream extends OutputStream { private OutputStream filteredStream; ProxyOutputStream(OutputStream os) { filteredStream = os; } final void setFilteredStream(OutputStream os) { filteredStream = os; } public void close() throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Closing"); filteredStream.close(); } public void flush() throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Flushing"); filteredStream.flush(); } private final byte singleByte[] = new byte[1]; public void write(int b) throws IOException { singleByte[0] = (byte) (b & 0xFF); writeInternal(singleByte, 0, singleByte.length); } public void write(byte[] b) throws IOException { writeInternal(b, 0, b.length); } public void write(byte[] b, int off, int len) throws IOException { writeInternal(b, off, len); } private void writeInternal(byte[] b, int off, int len) throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Writing " + len + " bytes"); filteredStream.write(b, off, len); } } /** * This class implements a Socket whose I/O streams can be switched from using a TDSChannel for I/O to using its underlying TCP/IP socket. * * The SSL socket binds to a ProxySocket. The initial SSL handshake is done over TDSChannel I/O streams so that the handshake payload is framed in * TDS packets. The I/O streams are then switched to TCP/IP I/O streams using setStreams, and SSL communications continue directly over the TCP/IP * I/O streams. * * Most methods other than those for getting the I/O streams are simply forwarded to the TDSChannel's underlying TCP/IP socket. Methods that * change the socket binding or provide direct channel access are disallowed. */ private class ProxySocket extends Socket { private final TDSChannel tdsChannel; private final Logger logger; private final String logContext; private final ProxyInputStream proxyInputStream; private final ProxyOutputStream proxyOutputStream; ProxySocket(TDSChannel tdsChannel) { this.tdsChannel = tdsChannel; this.logger = tdsChannel.getLogger(); this.logContext = tdsChannel.toString() + " (ProxySocket):"; // Create the I/O streams SSLHandshakeOutputStream sslHandshakeOutputStream = new SSLHandshakeOutputStream(tdsChannel); SSLHandshakeInputStream sslHandshakeInputStream = new SSLHandshakeInputStream(tdsChannel, sslHandshakeOutputStream); this.proxyOutputStream = new ProxyOutputStream(sslHandshakeOutputStream); this.proxyInputStream = new ProxyInputStream(sslHandshakeInputStream); } void setStreams(InputStream is, OutputStream os) { proxyInputStream.setFilteredStream(is); proxyOutputStream.setFilteredStream(os); } public InputStream getInputStream() throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Getting input stream"); return proxyInputStream; } public OutputStream getOutputStream() throws IOException { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Getting output stream"); return proxyOutputStream; } // Allow methods that should just forward to the underlying TCP socket or return fixed values public InetAddress getInetAddress() { return tdsChannel.tcpSocket.getInetAddress(); } public boolean getKeepAlive() throws SocketException { return tdsChannel.tcpSocket.getKeepAlive(); } public InetAddress getLocalAddress() { return tdsChannel.tcpSocket.getLocalAddress(); } public int getLocalPort() { return tdsChannel.tcpSocket.getLocalPort(); } public SocketAddress getLocalSocketAddress() { return tdsChannel.tcpSocket.getLocalSocketAddress(); } public boolean getOOBInline() throws SocketException { return tdsChannel.tcpSocket.getOOBInline(); } public int getPort() { return tdsChannel.tcpSocket.getPort(); } public int getReceiveBufferSize() throws SocketException { return tdsChannel.tcpSocket.getReceiveBufferSize(); } public SocketAddress getRemoteSocketAddress() { return tdsChannel.tcpSocket.getRemoteSocketAddress(); } public boolean getReuseAddress() throws SocketException { return tdsChannel.tcpSocket.getReuseAddress(); } public int getSendBufferSize() throws SocketException { return tdsChannel.tcpSocket.getSendBufferSize(); } public int getSoLinger() throws SocketException { return tdsChannel.tcpSocket.getSoLinger(); } public int getSoTimeout() throws SocketException { return tdsChannel.tcpSocket.getSoTimeout(); } public boolean getTcpNoDelay() throws SocketException { return tdsChannel.tcpSocket.getTcpNoDelay(); } public int getTrafficClass() throws SocketException { return tdsChannel.tcpSocket.getTrafficClass(); } public boolean isBound() { return true; } public boolean isClosed() { return false; } public boolean isConnected() { return true; } public boolean isInputShutdown() { return false; } public boolean isOutputShutdown() { return false; } public String toString() { return tdsChannel.tcpSocket.toString(); } public SocketChannel getChannel() { return null; } // Disallow calls to methods that would change the underlying TCP socket public void bind(SocketAddress bindPoint) throws IOException { logger.finer(logContext + " Disallowed call to bind. Throwing IOException."); throw new IOException(); } public void connect(SocketAddress endpoint) throws IOException { logger.finer(logContext + " Disallowed call to connect (without timeout). Throwing IOException."); throw new IOException(); } public void connect(SocketAddress endpoint, int timeout) throws IOException { logger.finer(logContext + " Disallowed call to connect (with timeout). Throwing IOException."); throw new IOException(); } // Ignore calls to methods that would otherwise allow the SSL socket // to directly manipulate the underlying TCP socket public void close() throws IOException { if (logger.isLoggable(Level.FINER)) logger.finer(logContext + " Ignoring close"); } public void setReceiveBufferSize(int size) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setReceiveBufferSize size:" + size); } public void setSendBufferSize(int size) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setSendBufferSize size:" + size); } public void setReuseAddress(boolean on) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setReuseAddress"); } public void setSoLinger(boolean on, int linger) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setSoLinger"); } public void setSoTimeout(int timeout) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setSoTimeout"); } public void setTcpNoDelay(boolean on) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setTcpNoDelay"); } public void setTrafficClass(int tc) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setTrafficClass"); } public void shutdownInput() throws IOException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring shutdownInput"); } public void shutdownOutput() throws IOException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring shutdownOutput"); } public void sendUrgentData(int data) throws IOException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring sendUrgentData"); } public void setKeepAlive(boolean on) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setKeepAlive"); } public void setOOBInline(boolean on) throws SocketException { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Ignoring setOOBInline"); } } /** * This class implements an X509TrustManager that always accepts the X509Certificate chain offered to it. * * A PermissiveX509TrustManager is used to "verify" the authenticity of the server when the trustServerCertificate connection property is set to * true. */ private final class PermissiveX509TrustManager extends Object implements X509TrustManager { private final TDSChannel tdsChannel; private final Logger logger; private final String logContext; PermissiveX509TrustManager(TDSChannel tdsChannel) { this.tdsChannel = tdsChannel; this.logger = tdsChannel.getLogger(); this.logContext = tdsChannel.toString() + " (PermissiveX509TrustManager):"; } public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException { if (logger.isLoggable(Level.FINER)) logger.finer(logContext + " Trusting client certificate (!)"); } public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { if (logger.isLoggable(Level.FINER)) logger.finer(logContext + " Trusting server certificate"); } public X509Certificate[] getAcceptedIssuers() { return new X509Certificate[0]; } } /** * This class implements an X509TrustManager that hostname for validation. * * This validates the subject name in the certificate with the host name */ private final class HostNameOverrideX509TrustManager extends Object implements X509TrustManager { private final Logger logger; private final String logContext; private final X509TrustManager defaultTrustManager; private String hostName; HostNameOverrideX509TrustManager(TDSChannel tdsChannel, X509TrustManager tm, String hostName) { this.logger = tdsChannel.getLogger(); this.logContext = tdsChannel.toString() + " (HostNameOverrideX509TrustManager):"; defaultTrustManager = tm; // canonical name is in lower case so convert this to lowercase too. this.hostName = hostName.toLowerCase(); ; } // Parse name in RFC 2253 format // Returns the common name if successful, null if failed to find the common name. // The parser tuned to be safe than sorry so if it sees something it cant parse correctly it returns null private String parseCommonName(String distinguishedName) { int index; // canonical name converts entire name to lowercase index = distinguishedName.indexOf("cn="); if (index == -1) { return null; } distinguishedName = distinguishedName.substring(index + 3); // Parse until a comma or end is reached // Note the parser will handle gracefully (essentially will return empty string) , inside the quotes (e.g cn="Foo, bar") however // RFC 952 says that the hostName cant have commas however the parser should not (and will not) crash if it sees a , within quotes. for (index = 0; index < distinguishedName.length(); index++) { if (distinguishedName.charAt(index) == ',') { break; } } String commonName = distinguishedName.substring(0, index); // strip any quotes if (commonName.length() > 1 && ('\"' == commonName.charAt(0))) { if ('\"' == commonName.charAt(commonName.length() - 1)) commonName = commonName.substring(1, commonName.length() - 1); else { // Be safe the name is not ended in " return null so the common Name wont match commonName = null; } } return commonName; } private boolean validateServerName(String nameInCert) throws CertificateException { // Failed to get the common name from DN or empty CN if (null == nameInCert) { if (logger.isLoggable(Level.FINER)) logger.finer(logContext + " Failed to parse the name from the certificate or name is empty."); return false; } // Verify that the name in certificate matches exactly with the host name if (!nameInCert.equals(hostName)) { if (logger.isLoggable(Level.FINER)) logger.finer(logContext + " The name in certificate " + nameInCert + " does not match with the server name " + hostName + "."); return false; } if (logger.isLoggable(Level.FINER)) logger.finer(logContext + " The name in certificate:" + nameInCert + " validated against server name " + hostName + "."); return true; } public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Forwarding ClientTrusted."); defaultTrustManager.checkClientTrusted(chain, authType); } public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " Forwarding Trusting server certificate"); defaultTrustManager.checkServerTrusted(chain, authType); if (logger.isLoggable(Level.FINEST)) logger.finest(logContext + " default serverTrusted succeeded proceeding with server name validation"); validateServerNameInCertificate(chain[0]); } private void validateServerNameInCertificate(X509Certificate cert) throws CertificateException { String nameInCertDN = cert.getSubjectX500Principal().getName("canonical"); if (logger.isLoggable(Level.FINER)) { logger.finer(logContext + " Validating the server name:" + hostName); logger.finer(logContext + " The DN name in certificate:" + nameInCertDN); } boolean isServerNameValidated; // the name in cert is in RFC2253 format parse it to get the actual subject name String subjectCN = parseCommonName(nameInCertDN); isServerNameValidated = validateServerName(subjectCN); if (!isServerNameValidated) { Collection<List<?>> sanCollection = cert.getSubjectAlternativeNames(); if (sanCollection != null) { // find a subjectAlternateName entry corresponding to DNS Name for (List<?> sanEntry : sanCollection) { if (sanEntry != null && sanEntry.size() >= 2) { Object key = sanEntry.get(0); Object value = sanEntry.get(1); if (logger.isLoggable(Level.FINER)) { logger.finer(logContext + "Key: " + key + "; KeyClass:" + (key != null ? key.getClass() : null) + ";value: " + value + "; valueClass:" + (value != null ? value.getClass() : null)); } // "Note that the Collection returned may contain // more than one name of the same type." // So, more than one entry of dnsNameType can be present. // Java docs guarantee that the first entry in the list will be an integer. // 2 is the sequence no of a dnsName if ((key != null) && (key instanceof Integer) && ((Integer) key == 2)) { // As per RFC2459, the DNSName will be in the // "preferred name syntax" as specified by RFC // 1034 and the name can be in upper or lower case. // And no significance is attached to case. // Java docs guarantee that the second entry in the list // will be a string for dnsName if (value != null && value instanceof String) { String dnsNameInSANCert = (String) value; // convert to upper case and then to lower case in english locale // to avoid Turkish i issues. // Note that, this conversion was not necessary for // cert.getSubjectX500Principal().getName("canonical"); // as the above API already does this by default as per documentation. dnsNameInSANCert = dnsNameInSANCert.toUpperCase(Locale.US); dnsNameInSANCert = dnsNameInSANCert.toLowerCase(Locale.US); isServerNameValidated = validateServerName(dnsNameInSANCert); if (isServerNameValidated) { if (logger.isLoggable(Level.FINER)) { logger.finer(logContext + " found a valid name in certificate: " + dnsNameInSANCert); } break; } } if (logger.isLoggable(Level.FINER)) { logger.finer(logContext + " the following name in certificate does not match the serverName: " + value); } } } else { if (logger.isLoggable(Level.FINER)) { logger.finer(logContext + " found an invalid san entry: " + sanEntry); } } } } } if (!isServerNameValidated) { String msg = SQLServerException.getErrString("R_certNameFailed"); throw new CertificateException(msg); } } public X509Certificate[] getAcceptedIssuers() { return defaultTrustManager.getAcceptedIssuers(); } } enum SSLHandhsakeState { SSL_HANDHSAKE_NOT_STARTED, SSL_HANDHSAKE_STARTED, SSL_HANDHSAKE_COMPLETE }; /** * Enables SSL Handshake. * * @param host * Server Host Name for SSL Handshake * @param port * Server Port for SSL Handshake * @throws SQLServerException */ void enableSSL(String host, int port) throws SQLServerException { // If enabling SSL fails, which it can for a number of reasons, the following items // are used in logging information to the TDS channel logger to help diagnose the problem. Provider tmfProvider = null; // TrustManagerFactory provider Provider sslContextProvider = null; // SSLContext provider Provider ksProvider = null; // KeyStore provider String tmfDefaultAlgorithm = null; // Default algorithm (typically X.509) used by the TrustManagerFactory SSLHandhsakeState handshakeState = SSLHandhsakeState.SSL_HANDHSAKE_NOT_STARTED; boolean isFips = false; String trustStoreType = null; String fipsProvider = null; // If anything in here fails, terminate the connection and throw an exception try { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Enabling SSL..."); String trustStoreFileName = con.activeConnectionProperties.getProperty(SQLServerDriverStringProperty.TRUST_STORE.toString()); String trustStorePassword = con.activeConnectionProperties.getProperty(SQLServerDriverStringProperty.TRUST_STORE_PASSWORD.toString()); String hostNameInCertificate = con.activeConnectionProperties .getProperty(SQLServerDriverStringProperty.HOSTNAME_IN_CERTIFICATE.toString()); trustStoreType = con.activeConnectionProperties.getProperty(SQLServerDriverStringProperty.TRUST_STORE_TYPE.toString()); if(StringUtils.isEmpty(trustStoreType)) { trustStoreType = SQLServerDriverStringProperty.TRUST_STORE_TYPE.getDefaultValue(); } fipsProvider = con.activeConnectionProperties.getProperty(SQLServerDriverStringProperty.FIPS_PROVIDER.toString()); isFips = Boolean.valueOf(con.activeConnectionProperties.getProperty(SQLServerDriverBooleanProperty.FIPS.toString())); if (isFips) { validateFips(fipsProvider, trustStoreType, trustStoreFileName); } assert TDS.ENCRYPT_OFF == con.getRequestedEncryptionLevel() || // Login only SSL TDS.ENCRYPT_ON == con.getRequestedEncryptionLevel(); // Full SSL assert TDS.ENCRYPT_OFF == con.getNegotiatedEncryptionLevel() || // Login only SSL TDS.ENCRYPT_ON == con.getNegotiatedEncryptionLevel() || // Full SSL TDS.ENCRYPT_REQ == con.getNegotiatedEncryptionLevel(); // Full SSL // If we requested login only SSL or full SSL without server certificate validation, // then we'll "validate" the server certificate using a naive TrustManager that trusts // everything it sees. TrustManager[] tm = null; if (TDS.ENCRYPT_OFF == con.getRequestedEncryptionLevel() || (TDS.ENCRYPT_ON == con.getRequestedEncryptionLevel() && con.trustServerCertificate())) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " SSL handshake will trust any certificate"); tm = new TrustManager[] {new PermissiveX509TrustManager(this)}; } // Otherwise, we'll validate the certificate using a real TrustManager obtained // from the a security provider that is capable of validating X.509 certificates. else { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " SSL handshake will validate server certificate"); KeyStore ks = null; // If we are using the system default trustStore and trustStorePassword // then we can skip all of the KeyStore loading logic below. // The security provider's implementation takes care of everything for us. if (null == trustStoreFileName && null == trustStorePassword) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Using system default trust store and password"); } // Otherwise either the trustStore, trustStorePassword, or both was specified. // In that case, we need to load up a KeyStore ourselves. else { // First, obtain an interface to a KeyStore that can load trust material // stored in Java Key Store (JKS) format. if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Finding key store interface"); if (isFips) { ks = KeyStore.getInstance(trustStoreType, fipsProvider); } else { ks = KeyStore.getInstance(trustStoreType); } ksProvider = ks.getProvider(); // Next, load up the trust store file from the specified location. // Note: This function returns a null InputStream if the trust store cannot // be loaded. This is by design. See the method comment and documentation // for KeyStore.load for details. InputStream is = loadTrustStore(trustStoreFileName); // Finally, load the KeyStore with the trust material (if any) from the // InputStream and close the stream. if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Loading key store"); try { ks.load(is, (null == trustStorePassword) ? null : trustStorePassword.toCharArray()); } finally { // We are done with the trustStorePassword (if set). Clear it for better security. con.activeConnectionProperties.remove(SQLServerDriverStringProperty.TRUST_STORE_PASSWORD.toString()); // We are also done with the trust store input stream. if (null != is) { try { is.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.fine(toString() + " Ignoring error closing trust material InputStream..."); } } } } // Either we now have a KeyStore populated with trust material or we are using the // default source of trust material (cacerts). Either way, we are now ready to // use a TrustManagerFactory to create a TrustManager that uses the trust material // to validate the server certificate. // Next step is to get a TrustManagerFactory that can produce TrustManagers // that understands X.509 certificates. TrustManagerFactory tmf = null; if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Locating X.509 trust manager factory"); tmfDefaultAlgorithm = TrustManagerFactory.getDefaultAlgorithm(); tmf = TrustManagerFactory.getInstance(tmfDefaultAlgorithm); tmfProvider = tmf.getProvider(); // Tell the TrustManagerFactory to give us TrustManagers that we can use to // validate the server certificate using the trust material in the KeyStore. if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Getting trust manager"); tmf.init(ks); tm = tmf.getTrustManagers(); // if the host name in cert provided use it or use the host name Only if it is not FIPS if (!isFips) { if (null != hostNameInCertificate) { tm = new TrustManager[] {new HostNameOverrideX509TrustManager(this, (X509TrustManager) tm[0], hostNameInCertificate)}; } else { tm = new TrustManager[] {new HostNameOverrideX509TrustManager(this, (X509TrustManager) tm[0], host)}; } } } // end if (!con.trustServerCertificate()) // Now, with a real or fake TrustManager in hand, get a context for creating a // SSL sockets through a SSL socket factory. We require at least TLS support. SSLContext sslContext = null; if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Getting TLS or better SSL context"); sslContext = SSLContext.getInstance("TLS"); sslContextProvider = sslContext.getProvider(); if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Initializing SSL context"); sslContext.init(null, tm, null); // Got the SSL context. Now create an SSL socket over our own proxy socket // which we can toggle between TDS-encapsulated and raw communications. // Initially, the proxy is set to encapsulate the SSL handshake in TDS packets. proxySocket = new ProxySocket(this); if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Creating SSL socket"); sslSocket = (SSLSocket) sslContext.getSocketFactory().createSocket(proxySocket, host, port, false); // don't close proxy when SSL socket // is closed // At long last, start the SSL handshake ... if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Starting SSL handshake"); // TLS 1.2 intermittent exception happens here. handshakeState = SSLHandhsakeState.SSL_HANDHSAKE_STARTED; sslSocket.startHandshake(); handshakeState = SSLHandhsakeState.SSL_HANDHSAKE_COMPLETE; // After SSL handshake is complete, rewire proxy socket to use raw TCP/IP streams ... if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Rewiring proxy streams after handshake"); proxySocket.setStreams(inputStream, outputStream); // ... and rewire TDSChannel to use SSL streams. if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Getting SSL InputStream"); inputStream = sslSocket.getInputStream(); if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Getting SSL OutputStream"); outputStream = sslSocket.getOutputStream(); // SSL is now enabled; switch over the channel socket channelSocket = sslSocket; if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " SSL enabled"); } catch (Exception e) { // Log the original exception and its source at FINER level if (logger.isLoggable(Level.FINER)) logger.log(Level.FINER, e.getMessage(), e); // If enabling SSL fails, the following information may help diagnose the problem. // Do not use Level INFO or above which is sent to standard output/error streams. // This is because due to an intermittent TLS 1.2 connection issue, we will be retrying the connection and // do not want to print this message in console. if (logger.isLoggable(Level.FINER)) logger.log(Level.FINER, "java.security path: " + JAVA_SECURITY + "\n" + "Security providers: " + Arrays.asList(Security.getProviders()) + "\n" + ((null != sslContextProvider) ? ("SSLContext provider info: " + sslContextProvider.getInfo() + "\n" + "SSLContext provider services:\n" + sslContextProvider.getServices() + "\n") : "") + ((null != tmfProvider) ? ("TrustManagerFactory provider info: " + tmfProvider.getInfo() + "\n") : "") + ((null != tmfDefaultAlgorithm) ? ("TrustManagerFactory default algorithm: " + tmfDefaultAlgorithm + "\n") : "") + ((null != ksProvider) ? ("KeyStore provider info: " + ksProvider.getInfo() + "\n") : "") + "java.ext.dirs: " + System.getProperty("java.ext.dirs")); MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_sslFailed")); Object[] msgArgs = {e.getMessage()}; // It is important to get the localized message here, otherwise error messages won't match for different locales. String errMsg = e.getLocalizedMessage(); // The error message may have a connection id appended to it. Extract the message only for comparison. // This client connection id is appended in method checkAndAppendClientConnId(). if (errMsg.contains(SQLServerException.LOG_CLIENT_CONNECTION_ID_PREFIX)) { errMsg = errMsg.substring(0, errMsg.indexOf(SQLServerException.LOG_CLIENT_CONNECTION_ID_PREFIX)); } // Isolate the TLS1.2 intermittent connection error. if (e instanceof IOException && (SSLHandhsakeState.SSL_HANDHSAKE_STARTED == handshakeState) && (errMsg.equals(SQLServerException.getErrString("R_truncatedServerResponse")))) { con.terminate(SQLServerException.DRIVER_ERROR_INTERMITTENT_TLS_FAILED, form.format(msgArgs), e); } else { con.terminate(SQLServerException.DRIVER_ERROR_SSL_FAILED, form.format(msgArgs), e); } } } /** * Validate FIPS if fips set as true * * Valid FIPS settings: * <LI>Encrypt should be true * <LI>trustServerCertificate should be false * <LI>if certificate is not installed FIPSProvider & TrustStoreType should be present. * * @param fipsProvider * FIPS Provider * @param trustStoreType * @param trustStoreFileName * @throws SQLServerException * @since 6.1.4 */ private void validateFips(final String fipsProvider, final String trustStoreType, final String trustStoreFileName) throws SQLServerException { boolean isValid = false; boolean isEncryptOn; boolean isValidTrustStoreType; boolean isValidTrustStore; boolean isTrustServerCertificate; boolean isValidFipsProvider; String strError = SQLServerException.getErrString("R_invalidFipsConfig"); isEncryptOn = (TDS.ENCRYPT_ON == con.getRequestedEncryptionLevel()); // Here different FIPS provider supports different KeyStore type along with different JVM Implementation. isValidFipsProvider = !StringUtils.isEmpty(fipsProvider); isValidTrustStoreType = !StringUtils.isEmpty(trustStoreType); isValidTrustStore = !StringUtils.isEmpty(trustStoreFileName); isTrustServerCertificate = con.trustServerCertificate(); if (isEncryptOn && !isTrustServerCertificate) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Found parameters are encrypt is true & trustServerCertificate false"); isValid = true; if (isValidTrustStore) { // In case of valid trust store we need to check fipsProvider and TrustStoreType. if (!isValidFipsProvider || !isValidTrustStoreType) { isValid = false; strError = SQLServerException.getErrString("R_invalidFipsProviderConfig"); if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " FIPS provider & TrustStoreType should pass with TrustStore."); } if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Found FIPS parameters seems to be valid."); } } else { strError = SQLServerException.getErrString("R_invalidFipsEncryptConfig"); } if (!isValid) { throw new SQLServerException(strError, null, 0, null); } } private final static String SEPARATOR = System.getProperty("file.separator"); private final static String JAVA_HOME = System.getProperty("java.home"); private final static String JAVA_SECURITY = JAVA_HOME + SEPARATOR + "lib" + SEPARATOR + "security"; private final static String JSSECACERTS = JAVA_SECURITY + SEPARATOR + "jssecacerts"; private final static String CACERTS = JAVA_SECURITY + SEPARATOR + "cacerts"; /** * Loads the contents of a trust store into an InputStream. * * When a location to a trust store is specified, this method attempts to load that store. Otherwise, it looks for and attempts to load the * default trust store using essentially the same logic (outlined in the JSSE Reference Guide) as the default X.509 TrustManagerFactory. * * @return an InputStream containing the contents of the loaded trust store * @return null if the trust store cannot be loaded. * * Note: It is by design that this function returns null when the trust store cannot be loaded rather than throwing an exception. The * reason is that KeyStore.load, which uses the returned InputStream, interprets a null InputStream to mean that there are no trusted * certificates, which mirrors the behavior of the default (no trust store, no password specified) path. */ final InputStream loadTrustStore(String trustStoreFileName) { FileInputStream is = null; // First case: Trust store filename was specified if (null != trustStoreFileName) { try { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Opening specified trust store: " + trustStoreFileName); is = new FileInputStream(trustStoreFileName); } catch (FileNotFoundException e) { if (logger.isLoggable(Level.FINE)) logger.fine(toString() + " Trust store not found: " + e.getMessage()); // If the trustStoreFileName connection property is set, but the file is not found, // then treat it as if the file was empty so that the TrustManager reports // that no certificate is found. } } // Second case: Trust store filename derived from javax.net.ssl.trustStore system property else if (null != (trustStoreFileName = System.getProperty("javax.net.ssl.trustStore"))) { try { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Opening default trust store (from javax.net.ssl.trustStore): " + trustStoreFileName); is = new FileInputStream(trustStoreFileName); } catch (FileNotFoundException e) { if (logger.isLoggable(Level.FINE)) logger.fine(toString() + " Trust store not found: " + e.getMessage()); // If the javax.net.ssl.trustStore property is set, but the file is not found, // then treat it as if the file was empty so that the TrustManager reports // that no certificate is found. } } // Third case: No trust store specified and no system property set. Use jssecerts/cacerts. else { try { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Opening default trust store: " + JSSECACERTS); is = new FileInputStream(JSSECACERTS); } catch (FileNotFoundException e) { if (logger.isLoggable(Level.FINE)) logger.fine(toString() + " Trust store not found: " + e.getMessage()); } // No jssecerts. Try again with cacerts... if (null == is) { try { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Opening default trust store: " + CACERTS); is = new FileInputStream(CACERTS); } catch (FileNotFoundException e) { if (logger.isLoggable(Level.FINE)) logger.fine(toString() + " Trust store not found: " + e.getMessage()); // No jssecerts or cacerts. Treat it as if the trust store is empty so that // the TrustManager reports that no certificate is found. } } } return is; } final int read(byte[] data, int offset, int length) throws SQLServerException { try { return inputStream.read(data, offset, length); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.fine(toString() + " read failed:" + e.getMessage()); if (e instanceof SocketTimeoutException) { con.terminate(SQLServerException.ERROR_SOCKET_TIMEOUT, e.getMessage(), e); } else { con.terminate(SQLServerException.DRIVER_ERROR_IO_FAILED, e.getMessage(), e); } return 0; // Keep the compiler happy. } } final void write(byte[] data, int offset, int length) throws SQLServerException { try { outputStream.write(data, offset, length); } catch (IOException e) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " write failed:" + e.getMessage()); con.terminate(SQLServerException.DRIVER_ERROR_IO_FAILED, e.getMessage(), e); } } final void flush() throws SQLServerException { try { outputStream.flush(); } catch (IOException e) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " flush failed:" + e.getMessage()); con.terminate(SQLServerException.DRIVER_ERROR_IO_FAILED, e.getMessage(), e); } } final void close() { if (null != sslSocket) disableSSL(); if (null != inputStream) { if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Closing inputStream..."); try { inputStream.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.log(Level.FINE, this.toString() + ": Ignored error closing inputStream", e); } } if (null != outputStream) { if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Closing outputStream..."); try { outputStream.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.log(Level.FINE, this.toString() + ": Ignored error closing outputStream", e); } } if (null != tcpSocket) { if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + ": Closing TCP socket..."); try { tcpSocket.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.log(Level.FINE, this.toString() + ": Ignored error closing socket", e); } } } /** * Logs TDS packet data to the com.microsoft.sqlserver.jdbc.TDS.DATA logger * * @param data * the buffer containing the TDS packet payload data to log * @param nStartOffset * offset into the above buffer from where to start logging * @param nLength * length (in bytes) of payload * @param messageDetail * other loggable details about the payload */ void logPacket(byte data[], int nStartOffset, int nLength, String messageDetail) { assert 0 <= nLength && nLength <= data.length; assert 0 <= nStartOffset && nStartOffset <= data.length; final char hexChars[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; final char printableChars[] = {'.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', ' ', '!', '\"', ' '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_', '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'}; // Log message body lines have this form: // "XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX ................" // 012345678911111111112222222222333333333344444444445555555555666666 // 01234567890123456789012345678901234567890123456789012345 final char lineTemplate[] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'}; char logLine[] = new char[lineTemplate.length]; System.arraycopy(lineTemplate, 0, logLine, 0, lineTemplate.length); // Logging builds up a string buffer for the entire log trace // before writing it out. So use an initial size large enough // that the buffer doesn't have to resize itself. StringBuilder logMsg = new StringBuilder(messageDetail.length() + // Message detail 4 * nLength + // 2-digit hex + space + ASCII, per byte 4 * (1 + nLength / 16) + // 2 extra spaces + CR/LF, per line (16 bytes per line) 80); // Extra fluff: IP:Port, Connection #, SPID, ... // Format the headline like so: // /157.55.121.182:2983 Connection 1, SPID 53, Message info here ... // Note: the log formatter itself timestamps what we write so we don't have // to do it again here. logMsg.append(tcpSocket.getLocalAddress().toString() + ":" + tcpSocket.getLocalPort() + " SPID:" + spid + " " + messageDetail + "\r\n"); // Fill in the body of the log message, line by line, 16 bytes per line. int nBytesLogged = 0; int nBytesThisLine; while (true) { // Fill up the line with as many bytes as we can (up to 16 bytes) for (nBytesThisLine = 0; nBytesThisLine < 16 && nBytesLogged < nLength; nBytesThisLine++, nBytesLogged++) { int nUnsignedByteVal = (data[nStartOffset + nBytesLogged] + 256) % 256; logLine[3 * nBytesThisLine] = hexChars[nUnsignedByteVal / 16]; logLine[3 * nBytesThisLine + 1] = hexChars[nUnsignedByteVal % 16]; logLine[50 + nBytesThisLine] = printableChars[nUnsignedByteVal]; } // Pad out the remainder with whitespace for (int nBytesJustified = nBytesThisLine; nBytesJustified < 16; nBytesJustified++) { logLine[3 * nBytesJustified] = ' '; logLine[3 * nBytesJustified + 1] = ' '; } logMsg.append(logLine, 0, 50 + nBytesThisLine); if (nBytesLogged == nLength) break; logMsg.append("\r\n"); } if (packetLogger.isLoggable(Level.FINEST)) { packetLogger.finest(logMsg.toString()); } } /** * Get the current socket SO_TIMEOUT value. * * @return the current socket timeout value * @throws IOException thrown if the socket timeout cannot be read */ final int getNetworkTimeout() throws IOException { return tcpSocket.getSoTimeout(); } /** * Set the socket SO_TIMEOUT value. * * @param timeout the socket timeout in milliseconds * @throws IOException thrown if the socket timeout cannot be set */ final void setNetworkTimeout(int timeout) throws IOException { tcpSocket.setSoTimeout(timeout); } } /** * SocketFinder is used to find a server socket to which a connection can be made. This class abstracts the logic of finding a socket from TDSChannel * class. * * In the case when useParallel is set to true, this is achieved by trying to make parallel connections to multiple IP addresses. This class is * responsible for spawning multiple threads and keeping track of the search result and the connected socket or exception to be thrown. * * In the case where multiSubnetFailover is false, we try our old logic of trying to connect to the first ip address * * Typical usage of this class is SocketFinder sf = new SocketFinder(traceId, conn); Socket = sf.getSocket(hostName, port, timeout); */ final class SocketFinder { /** * Indicates the result of a search */ enum Result { UNKNOWN,// search is still in progress SUCCESS,// found a socket FAILURE// failed in finding a socket } // Thread pool - the values in the constructor are chosen based on the // explanation given in design_connection_director_multisubnet.doc private static final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>()); // When parallel connections are to be used, use minimum timeout slice of 1500 milliseconds. private static final int minTimeoutForParallelConnections = 1500; // lock used for synchronization while updating // data within a socketFinder object private final Object socketFinderlock = new Object(); // lock on which the parent thread would wait // after spawning threads. private final Object parentThreadLock = new Object(); // indicates whether the socketFinder has succeeded or failed // in finding a socket or is still trying to find a socket private volatile Result result = Result.UNKNOWN; // total no of socket connector threads // spawned by a socketFinder object private int noOfSpawnedThreads = 0; // no of threads that finished their socket connection // attempts and notified socketFinder about their result private volatile int noOfThreadsThatNotified = 0; // If a valid connected socket is found, this value would be non-null, // else this would be null private volatile Socket selectedSocket = null; // This would be one of the exceptions returned by the // socketConnector threads private volatile IOException selectedException = null; // Logging variables private static final Logger logger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.SocketFinder"); private final String traceID; // maximum number of IP Addresses supported private static final int ipAddressLimit = 64; // necessary for raising exceptions so that the connection pool can be notified private final SQLServerConnection conn; /** * Constructs a new SocketFinder object with appropriate traceId * * @param callerTraceID * traceID of the caller * @param sqlServerConnection * the SQLServer connection */ SocketFinder(String callerTraceID, SQLServerConnection sqlServerConnection) { traceID = "SocketFinder(" + callerTraceID + ")"; conn = sqlServerConnection; } /** * Used to find a socket to which a connection can be made * * @param hostName * @param portNumber * @param timeoutInMilliSeconds * @return connected socket * @throws IOException */ Socket findSocket(String hostName, int portNumber, int timeoutInMilliSeconds, boolean useParallel, boolean useTnir, boolean isTnirFirstAttempt, int timeoutInMilliSecondsForFullTimeout) throws SQLServerException { assert timeoutInMilliSeconds != 0 : "The driver does not allow a time out of 0"; try { InetAddress[] inetAddrs = null; // inetAddrs is only used if useParallel is true or TNIR is true. Skip resolving address if that's not the case. if (useParallel || useTnir) { // Ignore TNIR if host resolves to more than 64 IPs. Make sure we are using original timeout for this. inetAddrs = InetAddress.getAllByName(hostName); if ((useTnir) && (inetAddrs.length > ipAddressLimit)) { useTnir = false; timeoutInMilliSeconds = timeoutInMilliSecondsForFullTimeout; } } if (!useParallel) { // MSF is false. TNIR could be true or false. DBMirroring could be true or false. // For TNIR first attempt, we should do existing behavior including how host name is resolved. if (useTnir && isTnirFirstAttempt) { return getDefaultSocket(hostName, portNumber, SQLServerConnection.TnirFirstAttemptTimeoutMs); } else if (!useTnir) { return getDefaultSocket(hostName, portNumber, timeoutInMilliSeconds); } } // Code reaches here only if MSF = true or (TNIR = true and not TNIR first attempt) if (logger.isLoggable(Level.FINER)) { StringBuilder loggingString = new StringBuilder(this.toString()); loggingString.append(" Total no of InetAddresses: "); loggingString.append(inetAddrs.length); loggingString.append(". They are: "); for (InetAddress inetAddr : inetAddrs) { loggingString.append(inetAddr.toString() + ";"); } logger.finer(loggingString.toString()); } if (inetAddrs.length > ipAddressLimit) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_ipAddressLimitWithMultiSubnetFailover")); Object[] msgArgs = {Integer.toString(ipAddressLimit)}; String errorStr = form.format(msgArgs); // we do not want any retry to happen here. So, terminate the connection // as the config is unsupported. conn.terminate(SQLServerException.DRIVER_ERROR_UNSUPPORTED_CONFIG, errorStr); } if (Util.isIBM()) { timeoutInMilliSeconds = Math.max(timeoutInMilliSeconds, minTimeoutForParallelConnections); if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + "Using Java NIO with timeout:" + timeoutInMilliSeconds); } findSocketUsingJavaNIO(inetAddrs, portNumber, timeoutInMilliSeconds); } else { LinkedList<Inet4Address> inet4Addrs = new LinkedList<Inet4Address>(); LinkedList<Inet6Address> inet6Addrs = new LinkedList<Inet6Address>(); for (InetAddress inetAddr : inetAddrs) { if (inetAddr instanceof Inet4Address) { inet4Addrs.add((Inet4Address) inetAddr); } else { assert inetAddr instanceof Inet6Address : "Unexpected IP address " + inetAddr.toString(); inet6Addrs.add((Inet6Address) inetAddr); } } // use half timeout only if both IPv4 and IPv6 addresses are present int timeoutForEachIPAddressType; if ((!inet4Addrs.isEmpty()) && (!inet6Addrs.isEmpty())) { timeoutForEachIPAddressType = Math.max(timeoutInMilliSeconds / 2, minTimeoutForParallelConnections); } else timeoutForEachIPAddressType = Math.max(timeoutInMilliSeconds, minTimeoutForParallelConnections); if (!inet4Addrs.isEmpty()) { if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + "Using Java NIO with timeout:" + timeoutForEachIPAddressType); } // inet4Addrs.toArray(new InetAddress[0]) is java style of converting a linked list to an array of reqd size findSocketUsingJavaNIO(inet4Addrs.toArray(new InetAddress[0]), portNumber, timeoutForEachIPAddressType); } if (!result.equals(Result.SUCCESS)) { // try threading logic if (!inet6Addrs.isEmpty()) { // do not start any threads if there is only one ipv6 address if (inet6Addrs.size() == 1) { return getConnectedSocket(inet6Addrs.get(0), portNumber, timeoutForEachIPAddressType); } if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + "Using Threading with timeout:" + timeoutForEachIPAddressType); } findSocketUsingThreading(inet6Addrs, portNumber, timeoutForEachIPAddressType); } } } // If the thread continued execution due to timeout, the result may not be known. // In that case, update the result to failure. Note that this case is possible // for both IPv4 and IPv6. // Using double-checked locking for performance reasons. if (result.equals(Result.UNKNOWN)) { synchronized (socketFinderlock) { if (result.equals(Result.UNKNOWN)) { result = Result.FAILURE; if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + " The parent thread updated the result to failure"); } } } } // After we reach this point, there is no need for synchronization any more. // Because, the result would be known(success/failure). // And no threads would update SocketFinder // as their function calls would now be no-ops. if (result.equals(Result.FAILURE)) { if (selectedException == null) { if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + " There is no selectedException. The wait calls timed out before any connect call returned or timed out."); } String message = SQLServerException.getErrString("R_connectionTimedOut"); selectedException = new IOException(message); } throw selectedException; } } catch (InterruptedException ex) { // re-interrupt the current thread, in order to restore the thread's interrupt status. Thread.currentThread().interrupt(); close(selectedSocket); SQLServerException.ConvertConnectExceptionToSQLServerException(hostName, portNumber, conn, ex); } catch (IOException ex) { close(selectedSocket); // The code below has been moved from connectHelper. // If we do not move it, the functions open(caller of findSocket) // and findSocket will have to // declare both IOException and SQLServerException in the throws clause // as we throw custom SQLServerExceptions(eg:IPAddressLimit, wrapping other exceptions // like interruptedException) in findSocket. // That would be a bit awkward, because connecthelper(the caller of open) // just wraps IOException into SQLServerException and throws SQLServerException. // Instead, it would be good to wrap all exceptions at one place - Right here, their origin. SQLServerException.ConvertConnectExceptionToSQLServerException(hostName, portNumber, conn, ex); } assert result.equals(Result.SUCCESS); assert selectedSocket != null : "Bug in code. Selected Socket cannot be null here."; return selectedSocket; } /** * This function uses java NIO to connect to all the addresses in inetAddrs with in a specified timeout. If it succeeds in connecting, it closes * all the other open sockets and updates the result to success. * * @param inetAddrs * the array of inetAddress to which connection should be made * @param portNumber * the port number at which connection should be made * @param timeoutInMilliSeconds * @throws IOException */ private void findSocketUsingJavaNIO(InetAddress[] inetAddrs, int portNumber, int timeoutInMilliSeconds) throws IOException { // The driver does not allow a time out of zero. // Also, the unit of time the user can specify in the driver is seconds. // So, even if the user specifies 1 second(least value), the least possible // value that can come here as timeoutInMilliSeconds is 500 milliseconds. assert timeoutInMilliSeconds != 0 : "The timeout cannot be zero"; assert inetAddrs.length != 0 : "Number of inetAddresses should not be zero in this function"; Selector selector = null; LinkedList<SocketChannel> socketChannels = new LinkedList<SocketChannel>(); SocketChannel selectedChannel = null; try { selector = Selector.open(); for (int i = 0; i < inetAddrs.length; i++) { SocketChannel sChannel = SocketChannel.open(); socketChannels.add(sChannel); // make the channel non-blocking sChannel.configureBlocking(false); // register the channel for connect event int ops = SelectionKey.OP_CONNECT; SelectionKey key = sChannel.register(selector, ops); sChannel.connect(new InetSocketAddress(inetAddrs[i], portNumber)); if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + " initiated connection to address: " + inetAddrs[i] + ", portNumber: " + portNumber); } long timerNow = System.currentTimeMillis(); long timerExpire = timerNow + timeoutInMilliSeconds; // Denotes the no of channels that still need to processed int noOfOutstandingChannels = inetAddrs.length; while (true) { long timeRemaining = timerExpire - timerNow; // if the timeout expired or a channel is selected or there are no more channels left to processes if ((timeRemaining <= 0) || (selectedChannel != null) || (noOfOutstandingChannels <= 0)) break; // denotes the no of channels that are ready to be processed. i.e. they are either connected // or encountered an exception while trying to connect int readyChannels = selector.select(timeRemaining); if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + " no of channels ready: " + readyChannels); // There are no real time guarantees on the time out of the select API used above. // This check is necessary // a) to guard against cases where the select returns faster than expected. // b) for cases where no channels could connect with in the time out if (readyChannels != 0) { Set<SelectionKey> selectedKeys = selector.selectedKeys(); Iterator<SelectionKey> keyIterator = selectedKeys.iterator(); while (keyIterator.hasNext()) { SelectionKey key = keyIterator.next(); SocketChannel ch = (SocketChannel) key.channel(); if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + " processing the channel :" + ch);// this traces the IP by default boolean connected = false; try { connected = ch.finishConnect(); // ch.finishConnect should either return true or throw an exception // as we have subscribed for OP_CONNECT. assert connected == true : "finishConnect on channel:" + ch + " cannot be false"; selectedChannel = ch; if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + " selected the channel :" + selectedChannel); break; } catch (IOException ex) { if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + " the exception: " + ex.getClass() + " with message: " + ex.getMessage() + " occured while processing the channel: " + ch); updateSelectedException(ex, this.toString()); // close the channel pro-actively so that we do not // hang on to network resources ch.close(); } // unregister the key and remove from the selector's selectedKeys key.cancel(); keyIterator.remove(); noOfOutstandingChannels } } timerNow = System.currentTimeMillis(); } } catch (IOException ex) { // in case of an exception, close the selected channel. // All other channels will be closed in the finally block, // as they need to be closed irrespective of a success/failure close(selectedChannel); throw ex; } finally { // close the selector // As per java docs, on selector.close(), any uncancelled keys still // associated with this // selector are invalidated, their channels are deregistered, and any other // resources associated with this selector are released. // So, its not necessary to cancel each key again close(selector); // Close all channels except the selected one. // As we close channels pro-actively in the try block, // its possible that we close a channel twice. // Closing a channel second time is a no-op. // This code is should be in the finally block to guard against cases where // we pre-maturely exit try block due to an exception in selector or other places. for (SocketChannel s : socketChannels) { if (s != selectedChannel) { close(s); } } } // if a channel was selected, make the necessary updates if (selectedChannel != null) { // the selectedChannel has the address that is connected successfully // convert it to a java.net.Socket object with the address SocketAddress iadd = selectedChannel.getRemoteAddress(); selectedSocket = new Socket(); selectedSocket.connect(iadd); result = Result.SUCCESS; // close the channel since it is not used anymore selectedChannel.close(); } } // This method contains the old logic of connecting to // a socket of one of the IPs corresponding to a given host name. // In the old code below, the logic around 0 timeout has been removed as // 0 timeout is not allowed. The code has been re-factored so that the logic // is common for hostName or InetAddress. private Socket getDefaultSocket(String hostName, int portNumber, int timeoutInMilliSeconds) throws IOException { // Open the socket, with or without a timeout, throwing an UnknownHostException // if there is a failure to resolve the host name to an InetSocketAddress. // Note that Socket(host, port) throws an UnknownHostException if the host name // cannot be resolved, but that InetSocketAddress(host, port) does not - it sets // the returned InetSocketAddress as unresolved. InetSocketAddress addr = new InetSocketAddress(hostName, portNumber); return getConnectedSocket(addr, timeoutInMilliSeconds); } private Socket getConnectedSocket(InetAddress inetAddr, int portNumber, int timeoutInMilliSeconds) throws IOException { InetSocketAddress addr = new InetSocketAddress(inetAddr, portNumber); return getConnectedSocket(addr, timeoutInMilliSeconds); } private Socket getConnectedSocket(InetSocketAddress addr, int timeoutInMilliSeconds) throws IOException { assert timeoutInMilliSeconds != 0 : "timeout cannot be zero"; if (addr.isUnresolved()) throw new java.net.UnknownHostException(); selectedSocket = new Socket(); selectedSocket.connect(addr, timeoutInMilliSeconds); return selectedSocket; } private void findSocketUsingThreading(LinkedList<Inet6Address> inetAddrs, int portNumber, int timeoutInMilliSeconds) throws IOException, InterruptedException { assert timeoutInMilliSeconds != 0 : "The timeout cannot be zero"; assert inetAddrs.isEmpty() == false : "Number of inetAddresses should not be zero in this function"; LinkedList<Socket> sockets = new LinkedList<Socket>(); LinkedList<SocketConnector> socketConnectors = new LinkedList<SocketConnector>(); try { // create a socket, inetSocketAddress and a corresponding socketConnector per inetAddress noOfSpawnedThreads = inetAddrs.size(); for (InetAddress inetAddress : inetAddrs) { Socket s = new Socket(); sockets.add(s); InetSocketAddress inetSocketAddress = new InetSocketAddress(inetAddress, portNumber); SocketConnector socketConnector = new SocketConnector(s, inetSocketAddress, timeoutInMilliSeconds, this); socketConnectors.add(socketConnector); } // acquire parent lock and spawn all threads synchronized (parentThreadLock) { for (SocketConnector sc : socketConnectors) { threadPoolExecutor.execute(sc); } long timerNow = System.currentTimeMillis(); long timerExpire = timerNow + timeoutInMilliSeconds; // The below loop is to guard against the spurious wake up problem while (true) { long timeRemaining = timerExpire - timerNow; if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + " TimeRemaining:" + timeRemaining + "; Result:" + result + "; Max. open thread count: " + threadPoolExecutor.getLargestPoolSize() + "; Current open thread count:" + threadPoolExecutor.getActiveCount()); } // if there is no time left or if the result is determined, break. // Note that a dirty read of result is totally fine here. // Since this thread holds the parentThreadLock, even if we do a dirty // read here, the child thread, after updating the result, would not be // able to call notify on the parentThreadLock // (and thus finish execution) as it would be waiting on parentThreadLock // held by this thread(the parent thread). // So, this thread will wait again and then be notified by the childThread. // On the other hand, if we try to take socketFinderLock here to avoid // dirty read, we would introduce a dead lock due to the // reverse order of locking in updateResult method. if (timeRemaining <= 0 || (!result.equals(Result.UNKNOWN))) break; parentThreadLock.wait(timeRemaining); if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + " The parent thread wokeup."); } timerNow = System.currentTimeMillis(); } } } finally { // Close all sockets except the selected one. // As we close sockets pro-actively in the child threads, // its possible that we close a socket twice. // Closing a socket second time is a no-op. // If a child thread is waiting on the connect call on a socket s, // closing the socket s here ensures that an exception is thrown // in the child thread immediately. This mitigates the problem // of thread explosion by ensuring that unnecessary threads die // quickly without waiting for "min(timeOut, 21)" seconds for (Socket s : sockets) { if (s != selectedSocket) { close(s); } } } } /** * search result */ Result getResult() { return result; } void close(Selector selector) { if (null != selector) { if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + ": Closing Selector"); try { selector.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.log(Level.FINE, this.toString() + ": Ignored the following error while closing Selector", e); } } } void close(Socket socket) { if (null != socket) { if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + ": Closing TCP socket:" + socket); try { socket.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.log(Level.FINE, this.toString() + ": Ignored the following error while closing socket", e); } } } void close(SocketChannel socketChannel) { if (null != socketChannel) { if (logger.isLoggable(Level.FINER)) logger.finer(this.toString() + ": Closing TCP socket channel:" + socketChannel); try { socketChannel.close(); } catch (IOException e) { if (logger.isLoggable(Level.FINE)) logger.log(Level.FINE, this.toString() + "Ignored the following error while closing socketChannel", e); } } } /** * Used by socketConnector threads to notify the socketFinder of their connection attempt result(a connected socket or exception). It updates the * result, socket and exception variables of socketFinder object. This method notifies the parent thread if a socket is found or if all the * spawned threads have notified. It also closes a socket if it is not selected for use by socketFinder. * * @param socket * the SocketConnector's socket * @param exception * Exception that occurred in socket connector thread * @param threadId * Id of the calling Thread for diagnosis */ void updateResult(Socket socket, IOException exception, String threadId) { if (result.equals(Result.UNKNOWN)) { if (logger.isLoggable(Level.FINER)) { logger.finer("The following child thread is waiting for socketFinderLock:" + threadId); } synchronized (socketFinderlock) { if (logger.isLoggable(Level.FINER)) { logger.finer("The following child thread acquired socketFinderLock:" + threadId); } if (result.equals(Result.UNKNOWN)) { // if the connection was successful and no socket has been // selected yet if (exception == null && selectedSocket == null) { selectedSocket = socket; result = Result.SUCCESS; if (logger.isLoggable(Level.FINER)) { logger.finer("The socket of the following thread has been chosen:" + threadId); } } // if an exception occurred if (exception != null) { updateSelectedException(exception, threadId); } } noOfThreadsThatNotified++; // if all threads notified, but the result is still unknown, // update the result to failure if ((noOfThreadsThatNotified >= noOfSpawnedThreads) && result.equals(Result.UNKNOWN)) { result = Result.FAILURE; } if (!result.equals(Result.UNKNOWN)) { // 1) Note that at any point of time, there is only one // thread(parent/child thread) competing for parentThreadLock. // 2) The only time where a child thread could be waiting on // parentThreadLock is before the wait call in the parentThread // 3) After the above happens, the parent thread waits to be // notified on parentThreadLock. After being notified, // it would be the ONLY thread competing for the lock. // for the following reasons // a) The parentThreadLock is taken while holding the socketFinderLock. // So, all child threads, except one, block on socketFinderLock // (not parentThreadLock) // b) After parentThreadLock is notified by a child thread, the result // would be known(Refer the double-checked locking done at the // start of this method). So, all child threads would exit // as no-ops and would never compete with parent thread // for acquiring parentThreadLock // 4) As the parent thread is the only thread that competes for the // parentThreadLock, it need not wait to acquire the lock once it wakes // up and gets scheduled. // This results in better performance as it would close unnecessary // sockets and thus help child threads die quickly. if (logger.isLoggable(Level.FINER)) { logger.finer("The following child thread is waiting for parentThreadLock:" + threadId); } synchronized (parentThreadLock) { if (logger.isLoggable(Level.FINER)) { logger.finer("The following child thread acquired parentThreadLock:" + threadId); } parentThreadLock.notify(); } if (logger.isLoggable(Level.FINER)) { logger.finer("The following child thread released parentThreadLock and notified the parent thread:" + threadId); } } } if (logger.isLoggable(Level.FINER)) { logger.finer("The following child thread released socketFinderLock:" + threadId); } } } /** * Updates the selectedException if * <p> * a) selectedException is null * <p> * b) ex is a non-socketTimeoutException and selectedException is a socketTimeoutException * <p> * If there are multiple exceptions, that are not related to socketTimeout the first non-socketTimeout exception is picked. If all exceptions are * related to socketTimeout, the first exception is picked. Note: This method is not thread safe. The caller should ensure thread safety. * * @param ex * the IOException * @param traceId * the traceId of the thread */ public void updateSelectedException(IOException ex, String traceId) { boolean updatedException = false; if (selectedException == null) { selectedException = ex; updatedException = true; } else if ((!(ex instanceof SocketTimeoutException)) && (selectedException instanceof SocketTimeoutException)) { selectedException = ex; updatedException = true; } if (updatedException) { if (logger.isLoggable(Level.FINER)) { logger.finer("The selected exception is updated to the following: ExceptionType:" + ex.getClass() + "; ExceptionMessage:" + ex.getMessage() + "; by the following thread:" + traceId); } } } /** * Used fof tracing * * @return traceID string */ public String toString() { return traceID; } } /** * This is used to connect a socket in a separate thread */ final class SocketConnector implements Runnable { // socket on which connection attempt would be made private final Socket socket; // the socketFinder associated with this connector private final SocketFinder socketFinder; // inetSocketAddress to connect to private final InetSocketAddress inetSocketAddress; // timeout in milliseconds private final int timeoutInMilliseconds; // Logging variables private static final Logger logger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.SocketConnector"); private final String traceID; // Id of the thread. used for diagnosis private final String threadID; // a counter used to give unique IDs to each connector thread. // this will have the id of the thread that was last created. private static long lastThreadID = 0; /** * Constructs a new SocketConnector object with the associated socket and socketFinder */ SocketConnector(Socket socket, InetSocketAddress inetSocketAddress, int timeOutInMilliSeconds, SocketFinder socketFinder) { this.socket = socket; this.inetSocketAddress = inetSocketAddress; this.timeoutInMilliseconds = timeOutInMilliSeconds; this.socketFinder = socketFinder; this.threadID = Long.toString(nextThreadID()); this.traceID = "SocketConnector:" + this.threadID + "(" + socketFinder.toString() + ")"; } /** * If search for socket has not finished, this function tries to connect a socket(with a timeout) synchronously. It further notifies the * socketFinder the result of the connection attempt */ public void run() { IOException exception = null; // Note that we do not need socketFinder lock here // as we update nothing in socketFinder based on the condition. // So, its perfectly fine to make a dirty read. SocketFinder.Result result = socketFinder.getResult(); if (result.equals(SocketFinder.Result.UNKNOWN)) { try { if (logger.isLoggable(Level.FINER)) { logger.finer( this.toString() + " connecting to InetSocketAddress:" + inetSocketAddress + " with timeout:" + timeoutInMilliseconds); } socket.connect(inetSocketAddress, timeoutInMilliseconds); } catch (IOException ex) { if (logger.isLoggable(Level.FINER)) { logger.finer(this.toString() + " exception:" + ex.getClass() + " with message:" + ex.getMessage() + " occured while connecting to InetSocketAddress:" + inetSocketAddress); } exception = ex; } socketFinder.updateResult(socket, exception, this.toString()); } } /** * Used for tracing * * @return traceID string */ public String toString() { return traceID; } /** * Generates the next unique thread id. */ private static synchronized long nextThreadID() { if (lastThreadID == Long.MAX_VALUE) { if (logger.isLoggable(Level.FINER)) logger.finer("Resetting the Id count"); lastThreadID = 1; } else { lastThreadID++; } return lastThreadID; } } /** * TDSWriter implements the client to server TDS data pipe. */ final class TDSWriter { private static Logger logger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.TDS.Writer"); private final String traceID; final public String toString() { return traceID; } private final TDSChannel tdsChannel; private final SQLServerConnection con; // Flag to indicate whether data written via writeXXX() calls // is loggable. Data is normally loggable. But sensitive // data, such as user credentials, should never be logged for // security reasons. private boolean dataIsLoggable = true; void setDataLoggable(boolean value) { dataIsLoggable = value; } private TDSCommand command = null; // TDS message type (Query, RPC, DTC, etc.) sent at the beginning // of every TDS message header. Value is set when starting a new // TDS message of the specified type. private byte tdsMessageType; private volatile int sendResetConnection = 0; // Size (in bytes) of the TDS packets to/from the server. // This size is normally fixed for the life of the connection, // but it can change once after the logon packet because packet // size negotiation happens at logon time. private int currentPacketSize = 0; // Size of the TDS packet header, which is: // byte type // byte status // short length // short SPID // byte packet // byte window private final static int TDS_PACKET_HEADER_SIZE = 8; private final static byte[] placeholderHeader = new byte[TDS_PACKET_HEADER_SIZE]; // Intermediate array used to convert typically "small" values such as fixed-length types // (byte, int, long, etc.) and Strings from their native form to bytes for sending to // the channel buffers. private byte valueBytes[] = new byte[256]; // Monotonically increasing packet number associated with the current message private volatile int packetNum = 0; // Bytes for sending decimal/numeric data private final static int BYTES4 = 4; private final static int BYTES8 = 8; private final static int BYTES12 = 12; private final static int BYTES16 = 16; public final static int BIGDECIMAL_MAX_LENGTH = 0x11; // is set to true when EOM is sent for the current message. // Note that this variable will never be accessed from multiple threads // simultaneously and so it need not be volatile private boolean isEOMSent = false; boolean isEOMSent() { return isEOMSent; } // Packet data buffers private ByteBuffer stagingBuffer; private ByteBuffer socketBuffer; private ByteBuffer logBuffer; private CryptoMetadata cryptoMeta = null; TDSWriter(TDSChannel tdsChannel, SQLServerConnection con) { this.tdsChannel = tdsChannel; this.con = con; traceID = "TDSWriter@" + Integer.toHexString(hashCode()) + " (" + con.toString() + ")"; } // TDS message start/end operations void preparePacket() throws SQLServerException { if (tdsChannel.isLoggingPackets()) { Arrays.fill(logBuffer.array(), (byte) 0xFE); logBuffer.clear(); } // Write a placeholder packet header. This will be replaced // with the real packet header when the packet is flushed. writeBytes(placeholderHeader); } /** * Start a new TDS message. */ void writeMessageHeader() throws SQLServerException { // TDS 7.2 & later: // Include ALL_Headers/MARS header in message's first packet // Note: The PKT_BULK message does not nees this ALL_HEADERS if ((TDS.PKT_QUERY == tdsMessageType || TDS.PKT_DTC == tdsMessageType || TDS.PKT_RPC == tdsMessageType)) { boolean includeTraceHeader = false; int totalHeaderLength = TDS.MESSAGE_HEADER_LENGTH; if (TDS.PKT_QUERY == tdsMessageType || TDS.PKT_RPC == tdsMessageType) { if (con.isDenaliOrLater() && !ActivityCorrelator.getCurrent().IsSentToServer() && Util.IsActivityTraceOn()) { includeTraceHeader = true; totalHeaderLength += TDS.TRACE_HEADER_LENGTH; } } writeInt(totalHeaderLength); // allHeaders.TotalLength (DWORD) writeInt(TDS.MARS_HEADER_LENGTH); // MARS header length (DWORD) writeShort((short) 2); // allHeaders.HeaderType(MARS header) (USHORT) writeBytes(con.getTransactionDescriptor()); writeInt(1); // marsHeader.OutstandingRequestCount if (includeTraceHeader) { writeInt(TDS.TRACE_HEADER_LENGTH); // trace header length (DWORD) writeTraceHeaderData(); ActivityCorrelator.setCurrentActivityIdSentFlag(); // set the flag to indicate this ActivityId is sent } } } void writeTraceHeaderData() throws SQLServerException { ActivityId activityId = ActivityCorrelator.getCurrent(); final byte[] actIdByteArray = Util.asGuidByteArray(activityId.getId()); long seqNum = activityId.getSequence(); writeShort(TDS.HEADERTYPE_TRACE); // trace header type writeBytes(actIdByteArray, 0, actIdByteArray.length); // guid part of ActivityId writeInt((int) seqNum); // sequence number of ActivityId if (logger.isLoggable(Level.FINER)) logger.finer("Send Trace Header - ActivityID: " + activityId.toString()); } /** * Convenience method to prepare the TDS channel for writing and start a new TDS message. * * @param command * The TDS command * @param tdsMessageType * The TDS message type (PKT_QUERY, PKT_RPC, etc.) */ void startMessage(TDSCommand command, byte tdsMessageType) throws SQLServerException { this.command = command; this.tdsMessageType = tdsMessageType; this.packetNum = 0; this.isEOMSent = false; this.dataIsLoggable = true; // If the TDS packet size has changed since the last request // (which should really only happen after the login packet) // then allocate new buffers that are the correct size. int negotiatedPacketSize = con.getTDSPacketSize(); if (currentPacketSize != negotiatedPacketSize) { socketBuffer = ByteBuffer.allocate(negotiatedPacketSize).order(ByteOrder.LITTLE_ENDIAN); stagingBuffer = ByteBuffer.allocate(negotiatedPacketSize).order(ByteOrder.LITTLE_ENDIAN); logBuffer = ByteBuffer.allocate(negotiatedPacketSize).order(ByteOrder.LITTLE_ENDIAN); currentPacketSize = negotiatedPacketSize; } socketBuffer.position(socketBuffer.limit()); stagingBuffer.clear(); preparePacket(); writeMessageHeader(); } final void endMessage() throws SQLServerException { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Finishing TDS message"); writePacket(TDS.STATUS_BIT_EOM); } // If a complete request has not been sent to the server, // the client MUST send the next packet with both ignore bit (0x02) and EOM bit (0x01) // set in the status to cancel the request. final boolean ignoreMessage() throws SQLServerException { if (packetNum > 0) { assert !isEOMSent; if (logger.isLoggable(Level.FINER)) logger.finest(toString() + " Finishing TDS message by sending ignore bit and end of message"); writePacket(TDS.STATUS_BIT_EOM | TDS.STATUS_BIT_ATTENTION); return true; } return false; } final void resetPooledConnection() { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " resetPooledConnection"); sendResetConnection = TDS.STATUS_BIT_RESET_CONN; } // Primitive write operations void writeByte(byte value) throws SQLServerException { if (stagingBuffer.remaining() >= 1) { stagingBuffer.put(value); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.put(value); else logBuffer.position(logBuffer.position() + 1); } } else { valueBytes[0] = value; writeWrappedBytes(valueBytes, 1); } } /** * writing sqlCollation information for sqlVariant type when sending character types. * * @param variantType * @throws SQLServerException */ void writeCollationForSqlVariant(SqlVariant variantType) throws SQLServerException { writeInt(variantType.getCollation().getCollationInfo()); writeByte((byte) (variantType.getCollation().getCollationSortID() & 0xFF)); } void writeChar(char value) throws SQLServerException { if (stagingBuffer.remaining() >= 2) { stagingBuffer.putChar(value); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.putChar(value); else logBuffer.position(logBuffer.position() + 2); } } else { Util.writeShort((short) value, valueBytes, 0); writeWrappedBytes(valueBytes, 2); } } void writeShort(short value) throws SQLServerException { if (stagingBuffer.remaining() >= 2) { stagingBuffer.putShort(value); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.putShort(value); else logBuffer.position(logBuffer.position() + 2); } } else { Util.writeShort(value, valueBytes, 0); writeWrappedBytes(valueBytes, 2); } } void writeInt(int value) throws SQLServerException { if (stagingBuffer.remaining() >= 4) { stagingBuffer.putInt(value); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.putInt(value); else logBuffer.position(logBuffer.position() + 4); } } else { Util.writeInt(value, valueBytes, 0); writeWrappedBytes(valueBytes, 4); } } /** * Append a real value in the TDS stream. * * @param value * the data value */ void writeReal(Float value) throws SQLServerException { writeInt(Float.floatToRawIntBits(value.floatValue())); } /** * Append a double value in the TDS stream. * * @param value * the data value */ void writeDouble(double value) throws SQLServerException { if (stagingBuffer.remaining() >= 8) { stagingBuffer.putDouble(value); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.putDouble(value); else logBuffer.position(logBuffer.position() + 8); } } else { long bits = Double.doubleToLongBits(value); long mask = 0xFF; int nShift = 0; for (int i = 0; i < 8; i++) { writeByte((byte) ((bits & mask) >> nShift)); nShift += 8; mask = mask << 8; } } } /** * Append a big decimal in the TDS stream. * * @param bigDecimalVal * the big decimal data value * @param srcJdbcType * the source JDBCType * @param precision * the precision of the data value * @param scale * the scale of the column * @throws SQLServerException */ void writeBigDecimal(BigDecimal bigDecimalVal, int srcJdbcType, int precision, int scale) throws SQLServerException { /* * Length including sign byte One 1-byte unsigned integer that represents the sign of the decimal value (0 => Negative, 1 => positive) One 4-, * 8-, 12-, or 16-byte signed integer that represents the decimal value multiplied by 10^scale. */ /* * setScale of all BigDecimal value based on metadata as scale is not sent seperately for individual value. Use the rounding used in Server. * Say, for BigDecimal("0.1"), if scale in metdadata is 0, then ArithmeticException would be thrown if RoundingMode is not set */ bigDecimalVal = bigDecimalVal.setScale(scale, RoundingMode.HALF_UP); // data length + 1 byte for sign int bLength = BYTES16 + 1; writeByte((byte) (bLength)); // Byte array to hold all the data and padding bytes. byte[] bytes = new byte[bLength]; byte[] valueBytes = DDC.convertBigDecimalToBytes(bigDecimalVal, scale); // removing the precision and scale information from the valueBytes array System.arraycopy(valueBytes, 2, bytes, 0, valueBytes.length - 2); writeBytes(bytes); } /** * Append a big decimal inside sql_variant in the TDS stream. * * @param bigDecimalVal * the big decimal data value * @param srcJdbcType * the source JDBCType */ void writeSqlVariantInternalBigDecimal(BigDecimal bigDecimalVal, int srcJdbcType) throws SQLServerException { /* * Length including sign byte One 1-byte unsigned integer that represents the sign of the decimal value (0 => Negative, 1 => positive) One * 16-byte signed integer that represents the decimal value multiplied by 10^scale. In sql_variant, we send the bigdecimal with precision 38, * therefore we use 16 bytes for the maximum size of this integer. */ boolean isNegative = (bigDecimalVal.signum() < 0); BigInteger bi = bigDecimalVal.unscaledValue(); if (isNegative) bi = bi.negate(); int bLength; bLength = BYTES16; writeByte((byte) (isNegative ? 0 : 1)); // Get the bytes of the BigInteger value. It is in reverse order, with // most significant byte in 0-th element. We need to reverse it first before sending over TDS. byte[] unscaledBytes = bi.toByteArray(); if (unscaledBytes.length > bLength) { // If precession of input is greater than maximum allowed (p><= 38) throw Exception MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {JDBCType.of(srcJdbcType)}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_LENGTH_MISMATCH, DriverError.NOT_SET, null); } // Byte array to hold all the reversed and padding bytes. byte[] bytes = new byte[bLength]; // We need to fill up the rest of the array with zeros, as unscaledBytes may have less bytes // than the required size for TDS. int remaining = bLength - unscaledBytes.length; // Reverse the bytes. int i, j; for (i = 0, j = unscaledBytes.length - 1; i < unscaledBytes.length;) bytes[i++] = unscaledBytes[j // Fill the rest of the array with zeros. for (; i < remaining; i++) bytes[i] = (byte) 0x00; writeBytes(bytes); } void writeSmalldatetime(String value) throws SQLServerException { GregorianCalendar calendar = initializeCalender(TimeZone.getDefault()); long utcMillis; // Value to which the calendar is to be set (in milliseconds 1/1/1970 00:00:00 GMT) java.sql.Timestamp timestampValue = java.sql.Timestamp.valueOf(value); utcMillis = timestampValue.getTime(); // Load the calendar with the desired value calendar.setTimeInMillis(utcMillis); // Number of days since the SQL Server Base Date (January 1, 1900) int daysSinceSQLBaseDate = DDC.daysSinceBaseDate(calendar.get(Calendar.YEAR), calendar.get(Calendar.DAY_OF_YEAR), TDS.BASE_YEAR_1900); // Next, figure out the number of milliseconds since midnight of the current day. int millisSinceMidnight = 1000 * calendar.get(Calendar.SECOND) + // Seconds into the current minute 60 * 1000 * calendar.get(Calendar.MINUTE) + // Minutes into the current hour 60 * 60 * 1000 * calendar.get(Calendar.HOUR_OF_DAY); // Hours into the current day // The last millisecond of the current day is always rounded to the first millisecond // of the next day because DATETIME is only accurate to 1/300th of a second. if (1000 * 60 * 60 * 24 - 1 <= millisSinceMidnight) { ++daysSinceSQLBaseDate; millisSinceMidnight = 0; } // Number of days since the SQL Server Base Date (January 1, 1900) writeShort((short) daysSinceSQLBaseDate); int secondsSinceMidnight = (millisSinceMidnight / 1000); int minutesSinceMidnight = (secondsSinceMidnight / 60); // Values that are 29.998 seconds or less are rounded down to the nearest minute minutesSinceMidnight = ((secondsSinceMidnight % 60) > 29.998) ? minutesSinceMidnight + 1 : minutesSinceMidnight; // Minutes since midnight writeShort((short) minutesSinceMidnight); } void writeDatetime(String value) throws SQLServerException { GregorianCalendar calendar = initializeCalender(TimeZone.getDefault()); long utcMillis; // Value to which the calendar is to be set (in milliseconds 1/1/1970 00:00:00 GMT) int subSecondNanos; java.sql.Timestamp timestampValue = java.sql.Timestamp.valueOf(value); utcMillis = timestampValue.getTime(); subSecondNanos = timestampValue.getNanos(); // Load the calendar with the desired value calendar.setTimeInMillis(utcMillis); // Number of days there have been since the SQL Base Date. // These are based on SQL Server algorithms int daysSinceSQLBaseDate = DDC.daysSinceBaseDate(calendar.get(Calendar.YEAR), calendar.get(Calendar.DAY_OF_YEAR), TDS.BASE_YEAR_1900); // Number of milliseconds since midnight of the current day. int millisSinceMidnight = (subSecondNanos + Nanos.PER_MILLISECOND / 2) / Nanos.PER_MILLISECOND + // Millis into the current second 1000 * calendar.get(Calendar.SECOND) + // Seconds into the current minute 60 * 1000 * calendar.get(Calendar.MINUTE) + // Minutes into the current hour 60 * 60 * 1000 * calendar.get(Calendar.HOUR_OF_DAY); // Hours into the current day // The last millisecond of the current day is always rounded to the first millisecond // of the next day because DATETIME is only accurate to 1/300th of a second. if (1000 * 60 * 60 * 24 - 1 <= millisSinceMidnight) { ++daysSinceSQLBaseDate; millisSinceMidnight = 0; } // Last-ditch verification that the value is in the valid range for the // DATETIMEN TDS data type (1/1/1753 to 12/31/9999). If it's not, then // throw an exception now so that statement execution is safely canceled. // Attempting to put an invalid value on the wire would result in a TDS // exception, which would close the connection. // These are based on SQL Server algorithms if (daysSinceSQLBaseDate < DDC.daysSinceBaseDate(1753, 1, TDS.BASE_YEAR_1900) || daysSinceSQLBaseDate >= DDC.daysSinceBaseDate(10000, 1, TDS.BASE_YEAR_1900)) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {SSType.DATETIME}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_DATETIME_FIELD_OVERFLOW, DriverError.NOT_SET, null); } // Number of days since the SQL Server Base Date (January 1, 1900) writeInt(daysSinceSQLBaseDate); // Milliseconds since midnight (at a resolution of three hundredths of a second) writeInt((3 * millisSinceMidnight + 5) / 10); } void writeDate(String value) throws SQLServerException { GregorianCalendar calendar = initializeCalender(TimeZone.getDefault()); long utcMillis; java.sql.Date dateValue = java.sql.Date.valueOf(value); utcMillis = dateValue.getTime(); // Load the calendar with the desired value calendar.setTimeInMillis(utcMillis); writeScaledTemporal(calendar, 0, // subsecond nanos (none for a date value) 0, // scale (dates are not scaled) SSType.DATE); } void writeTime(java.sql.Timestamp value, int scale) throws SQLServerException { GregorianCalendar calendar = initializeCalender(TimeZone.getDefault()); long utcMillis; // Value to which the calendar is to be set (in milliseconds 1/1/1970 00:00:00 GMT) int subSecondNanos; utcMillis = value.getTime(); subSecondNanos = value.getNanos(); // Load the calendar with the desired value calendar.setTimeInMillis(utcMillis); writeScaledTemporal(calendar, subSecondNanos, scale, SSType.TIME); } void writeDateTimeOffset(Object value, int scale, SSType destSSType) throws SQLServerException { GregorianCalendar calendar; TimeZone timeZone; // Time zone to associate with the value in the Gregorian calendar long utcMillis; // Value to which the calendar is to be set (in milliseconds 1/1/1970 00:00:00 GMT) int subSecondNanos; int minutesOffset; microsoft.sql.DateTimeOffset dtoValue = (microsoft.sql.DateTimeOffset) value; utcMillis = dtoValue.getTimestamp().getTime(); subSecondNanos = dtoValue.getTimestamp().getNanos(); minutesOffset = dtoValue.getMinutesOffset(); // If the target data type is DATETIMEOFFSET, then use UTC for the calendar that // will hold the value, since writeRPCDateTimeOffset expects a UTC calendar. // Otherwise, when converting from DATETIMEOFFSET to other temporal data types, // use a local time zone determined by the minutes offset of the value, since // the writers for those types expect local calendars. timeZone = (SSType.DATETIMEOFFSET == destSSType) ? UTC.timeZone : new SimpleTimeZone(minutesOffset * 60 * 1000, ""); calendar = new GregorianCalendar(timeZone, Locale.US); calendar.setLenient(true); calendar.clear(); calendar.setTimeInMillis(utcMillis); writeScaledTemporal(calendar, subSecondNanos, scale, SSType.DATETIMEOFFSET); writeShort((short) minutesOffset); } void writeOffsetDateTimeWithTimezone(OffsetDateTime offsetDateTimeValue, int scale) throws SQLServerException { GregorianCalendar calendar; TimeZone timeZone; long utcMillis; int subSecondNanos; int minutesOffset = 0; try { // offsetTimeValue.getOffset() returns a ZoneOffset object which has only hours and minutes // components. So the result of the division will be an integer always. SQL Server also supports // offsets in minutes precision. minutesOffset = offsetDateTimeValue.getOffset().getTotalSeconds() / 60; } catch (Exception e) { throw new SQLServerException(SQLServerException.getErrString("R_zoneOffsetError"), null, // SQLState is null as this error is generated in // the driver 0, // Use 0 instead of DriverError.NOT_SET to use the correct constructor e); } subSecondNanos = offsetDateTimeValue.getNano(); // writeScaledTemporal() expects subSecondNanos in 9 digits precssion // but getNano() used in OffsetDateTime returns precession based on nanoseconds read from csv // padding zeros to match the expectation of writeScaledTemporal() int padding = 9 - String.valueOf(subSecondNanos).length(); while (padding > 0) { subSecondNanos = subSecondNanos * 10; padding } // For TIME_WITH_TIMEZONE, use UTC for the calendar that will hold the value timeZone = UTC.timeZone; // The behavior is similar to microsoft.sql.DateTimeOffset // In Timestamp format, only YEAR needs to have 4 digits. The leading zeros for the rest of the fields can be omitted. String offDateTimeStr = String.format("%04d", offsetDateTimeValue.getYear()) + '-' + offsetDateTimeValue.getMonthValue() + '-' + offsetDateTimeValue.getDayOfMonth() + ' ' + offsetDateTimeValue.getHour() + ':' + offsetDateTimeValue.getMinute() + ':' + offsetDateTimeValue.getSecond(); utcMillis = Timestamp.valueOf(offDateTimeStr).getTime(); calendar = initializeCalender(timeZone); calendar.setTimeInMillis(utcMillis); // Local timezone value in minutes int minuteAdjustment = ((TimeZone.getDefault().getRawOffset()) / (60 * 1000)); // check if date is in day light savings and add daylight saving minutes if (TimeZone.getDefault().inDaylightTime(calendar.getTime())) minuteAdjustment += (TimeZone.getDefault().getDSTSavings()) / (60 * 1000); // If the local time is negative then positive minutesOffset must be subtracted from calender minuteAdjustment += (minuteAdjustment < 0) ? (minutesOffset * (-1)) : minutesOffset; calendar.add(Calendar.MINUTE, minuteAdjustment); writeScaledTemporal(calendar, subSecondNanos, scale, SSType.DATETIMEOFFSET); writeShort((short) minutesOffset); } void writeOffsetTimeWithTimezone(OffsetTime offsetTimeValue, int scale) throws SQLServerException { GregorianCalendar calendar; TimeZone timeZone; long utcMillis; int subSecondNanos; int minutesOffset = 0; try { // offsetTimeValue.getOffset() returns a ZoneOffset object which has only hours and minutes // components. So the result of the division will be an integer always. SQL Server also supports // offsets in minutes precision. minutesOffset = offsetTimeValue.getOffset().getTotalSeconds() / 60; } catch (Exception e) { throw new SQLServerException(SQLServerException.getErrString("R_zoneOffsetError"), null, // SQLState is null as this error is generated in // the driver 0, // Use 0 instead of DriverError.NOT_SET to use the correct constructor e); } subSecondNanos = offsetTimeValue.getNano(); // writeScaledTemporal() expects subSecondNanos in 9 digits precssion // but getNano() used in OffsetDateTime returns precession based on nanoseconds read from csv // padding zeros to match the expectation of writeScaledTemporal() int padding = 9 - String.valueOf(subSecondNanos).length(); while (padding > 0) { subSecondNanos = subSecondNanos * 10; padding } // For TIME_WITH_TIMEZONE, use UTC for the calendar that will hold the value timeZone = UTC.timeZone; // Using TDS.BASE_YEAR_1900, based on SQL server behavious // If date only contains a time part, the return value is 1900, the base year. // In Timestamp format, leading zeros for the fields can be omitted. String offsetTimeStr = TDS.BASE_YEAR_1900 + "-01-01" + ' ' + offsetTimeValue.getHour() + ':' + offsetTimeValue.getMinute() + ':' + offsetTimeValue.getSecond(); utcMillis = Timestamp.valueOf(offsetTimeStr).getTime(); calendar = initializeCalender(timeZone); calendar.setTimeInMillis(utcMillis); int minuteAdjustment = (TimeZone.getDefault().getRawOffset()) / (60 * 1000); // check if date is in day light savings and add daylight saving minutes to Local timezone(in minutes) if (TimeZone.getDefault().inDaylightTime(calendar.getTime())) minuteAdjustment += ((TimeZone.getDefault().getDSTSavings()) / (60 * 1000)); // If the local time is negative then positive minutesOffset must be subtracted from calender minuteAdjustment += (minuteAdjustment < 0) ? (minutesOffset * (-1)) : minutesOffset; calendar.add(Calendar.MINUTE, minuteAdjustment); writeScaledTemporal(calendar, subSecondNanos, scale, SSType.DATETIMEOFFSET); writeShort((short) minutesOffset); } void writeLong(long value) throws SQLServerException { if (stagingBuffer.remaining() >= 8) { stagingBuffer.putLong(value); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.putLong(value); else logBuffer.position(logBuffer.position() + 8); } } else { valueBytes[0] = (byte) ((value >> 0) & 0xFF); valueBytes[1] = (byte) ((value >> 8) & 0xFF); valueBytes[2] = (byte) ((value >> 16) & 0xFF); valueBytes[3] = (byte) ((value >> 24) & 0xFF); valueBytes[4] = (byte) ((value >> 32) & 0xFF); valueBytes[5] = (byte) ((value >> 40) & 0xFF); valueBytes[6] = (byte) ((value >> 48) & 0xFF); valueBytes[7] = (byte) ((value >> 56) & 0xFF); writeWrappedBytes(valueBytes, 8); } } void writeBytes(byte[] value) throws SQLServerException { writeBytes(value, 0, value.length); } void writeBytes(byte[] value, int offset, int length) throws SQLServerException { assert length <= value.length; int bytesWritten = 0; int bytesToWrite; if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Writing " + length + " bytes"); while ((bytesToWrite = length - bytesWritten) > 0) { if (0 == stagingBuffer.remaining()) writePacket(TDS.STATUS_NORMAL); if (bytesToWrite > stagingBuffer.remaining()) bytesToWrite = stagingBuffer.remaining(); stagingBuffer.put(value, offset + bytesWritten, bytesToWrite); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.put(value, offset + bytesWritten, bytesToWrite); else logBuffer.position(logBuffer.position() + bytesToWrite); } bytesWritten += bytesToWrite; } } void writeWrappedBytes(byte value[], int valueLength) throws SQLServerException { // This function should only be used to write a value that is longer than // what remains in the current staging buffer. However, the value must // be short enough to fit in an empty buffer. assert valueLength <= value.length; int remaining = stagingBuffer.remaining(); assert remaining < valueLength; assert valueLength <= stagingBuffer.capacity(); // Fill any remaining space in the staging buffer remaining = stagingBuffer.remaining(); if (remaining > 0) { stagingBuffer.put(value, 0, remaining); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.put(value, 0, remaining); else logBuffer.position(logBuffer.position() + remaining); } } writePacket(TDS.STATUS_NORMAL); // After swapping, the staging buffer should once again be empty, so the // remainder of the value can be written to it. stagingBuffer.put(value, remaining, valueLength - remaining); if (tdsChannel.isLoggingPackets()) { if (dataIsLoggable) logBuffer.put(value, remaining, valueLength - remaining); else logBuffer.position(logBuffer.position() + remaining); } } void writeString(String value) throws SQLServerException { int charsCopied = 0; int length = value.length(); while (charsCopied < length) { int bytesToCopy = 2 * (length - charsCopied); if (bytesToCopy > valueBytes.length) bytesToCopy = valueBytes.length; int bytesCopied = 0; while (bytesCopied < bytesToCopy) { char ch = value.charAt(charsCopied++); valueBytes[bytesCopied++] = (byte) ((ch >> 0) & 0xFF); valueBytes[bytesCopied++] = (byte) ((ch >> 8) & 0xFF); } writeBytes(valueBytes, 0, bytesCopied); } } void writeStream(InputStream inputStream, long advertisedLength, boolean writeChunkSizes) throws SQLServerException { assert DataTypes.UNKNOWN_STREAM_LENGTH == advertisedLength || advertisedLength >= 0; long actualLength = 0; final byte[] streamByteBuffer = new byte[4 * currentPacketSize]; int bytesRead = 0; int bytesToWrite; do { // Read in next chunk for (bytesToWrite = 0; -1 != bytesRead && bytesToWrite < streamByteBuffer.length; bytesToWrite += bytesRead) { try { bytesRead = inputStream.read(streamByteBuffer, bytesToWrite, streamByteBuffer.length - bytesToWrite); } catch (IOException e) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorReadingStream")); Object[] msgArgs = {e.toString()}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET); } if (-1 == bytesRead) break; // Check for invalid bytesRead returned from InputStream.read if (bytesRead < 0 || bytesRead > streamByteBuffer.length - bytesToWrite) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorReadingStream")); Object[] msgArgs = {SQLServerException.getErrString("R_streamReadReturnedInvalidValue")}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET); } } // Write it out if (writeChunkSizes) writeInt(bytesToWrite); writeBytes(streamByteBuffer, 0, bytesToWrite); actualLength += bytesToWrite; } while (-1 != bytesRead || bytesToWrite > 0); // If we were given an input stream length that we had to match and // the actual stream length did not match then cancel the request. if (DataTypes.UNKNOWN_STREAM_LENGTH != advertisedLength && actualLength != advertisedLength) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_mismatchedStreamLength")); Object[] msgArgs = {Long.valueOf(advertisedLength), Long.valueOf(actualLength)}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_LENGTH_MISMATCH, DriverError.NOT_SET); } } /* * Adding another function for writing non-unicode reader instead of re-factoring the writeReader() for performance efficiency. As this method * will only be used in bulk copy, it needs to be efficient. Note: Any changes in algorithm/logic should propagate to both writeReader() and * writeNonUnicodeReader(). */ void writeNonUnicodeReader(Reader reader, long advertisedLength, boolean isDestBinary, Charset charSet) throws SQLServerException { assert DataTypes.UNKNOWN_STREAM_LENGTH == advertisedLength || advertisedLength >= 0; long actualLength = 0; char[] streamCharBuffer = new char[currentPacketSize]; // The unicode version, writeReader() allocates a byte buffer that is 4 times the currentPacketSize, not sure why. byte[] streamByteBuffer = new byte[currentPacketSize]; int charsRead = 0; int charsToWrite; int bytesToWrite; String streamString; do { // Read in next chunk for (charsToWrite = 0; -1 != charsRead && charsToWrite < streamCharBuffer.length; charsToWrite += charsRead) { try { charsRead = reader.read(streamCharBuffer, charsToWrite, streamCharBuffer.length - charsToWrite); } catch (IOException e) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorReadingStream")); Object[] msgArgs = {e.toString()}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET); } if (-1 == charsRead) break; // Check for invalid bytesRead returned from Reader.read if (charsRead < 0 || charsRead > streamCharBuffer.length - charsToWrite) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorReadingStream")); Object[] msgArgs = {SQLServerException.getErrString("R_streamReadReturnedInvalidValue")}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET); } } if (!isDestBinary) { // Write it out // This also writes the PLP_TERMINATOR token after all the data in the the stream are sent. // The Do-While loop goes on one more time as charsToWrite is greater than 0 for the last chunk, and // in this last round the only thing that is written is an int value of 0, which is the PLP Terminator token(0x00000000). writeInt(charsToWrite); for (int charsCopied = 0; charsCopied < charsToWrite; ++charsCopied) { if (null == charSet) { streamByteBuffer[charsCopied] = (byte) (streamCharBuffer[charsCopied] & 0xFF); } else { // encoding as per collation streamByteBuffer[charsCopied] = new String(streamCharBuffer[charsCopied] + "").getBytes(charSet)[0]; } } writeBytes(streamByteBuffer, 0, charsToWrite); } else { bytesToWrite = charsToWrite; if (0 != charsToWrite) bytesToWrite = charsToWrite / 2; streamString = new String(streamCharBuffer); byte[] bytes = ParameterUtils.HexToBin(streamString.trim()); writeInt(bytesToWrite); writeBytes(bytes, 0, bytesToWrite); } actualLength += charsToWrite; } while (-1 != charsRead || charsToWrite > 0); // If we were given an input stream length that we had to match and // the actual stream length did not match then cancel the request. if (DataTypes.UNKNOWN_STREAM_LENGTH != advertisedLength && actualLength != advertisedLength) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_mismatchedStreamLength")); Object[] msgArgs = {Long.valueOf(advertisedLength), Long.valueOf(actualLength)}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_LENGTH_MISMATCH, DriverError.NOT_SET); } } /* * Note: There is another method with same code logic for non unicode reader, writeNonUnicodeReader(), implemented for performance efficiency. Any * changes in algorithm/logic should propagate to both writeReader() and writeNonUnicodeReader(). */ void writeReader(Reader reader, long advertisedLength, boolean writeChunkSizes) throws SQLServerException { assert DataTypes.UNKNOWN_STREAM_LENGTH == advertisedLength || advertisedLength >= 0; long actualLength = 0; char[] streamCharBuffer = new char[2 * currentPacketSize]; byte[] streamByteBuffer = new byte[4 * currentPacketSize]; int charsRead = 0; int charsToWrite; do { // Read in next chunk for (charsToWrite = 0; -1 != charsRead && charsToWrite < streamCharBuffer.length; charsToWrite += charsRead) { try { charsRead = reader.read(streamCharBuffer, charsToWrite, streamCharBuffer.length - charsToWrite); } catch (IOException e) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorReadingStream")); Object[] msgArgs = {e.toString()}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET); } if (-1 == charsRead) break; // Check for invalid bytesRead returned from Reader.read if (charsRead < 0 || charsRead > streamCharBuffer.length - charsToWrite) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorReadingStream")); Object[] msgArgs = {SQLServerException.getErrString("R_streamReadReturnedInvalidValue")}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET); } } // Write it out if (writeChunkSizes) writeInt(2 * charsToWrite); // Convert from Unicode characters to bytes // Note: The following inlined code is much faster than the equivalent // call to (new String(streamCharBuffer)).getBytes("UTF-16LE") because it // saves a conversion to String and use of Charset in that conversion. for (int charsCopied = 0; charsCopied < charsToWrite; ++charsCopied) { streamByteBuffer[2 * charsCopied] = (byte) ((streamCharBuffer[charsCopied] >> 0) & 0xFF); streamByteBuffer[2 * charsCopied + 1] = (byte) ((streamCharBuffer[charsCopied] >> 8) & 0xFF); } writeBytes(streamByteBuffer, 0, 2 * charsToWrite); actualLength += charsToWrite; } while (-1 != charsRead || charsToWrite > 0); // If we were given an input stream length that we had to match and // the actual stream length did not match then cancel the request. if (DataTypes.UNKNOWN_STREAM_LENGTH != advertisedLength && actualLength != advertisedLength) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_mismatchedStreamLength")); Object[] msgArgs = {Long.valueOf(advertisedLength), Long.valueOf(actualLength)}; error(form.format(msgArgs), SQLState.DATA_EXCEPTION_LENGTH_MISMATCH, DriverError.NOT_SET); } } GregorianCalendar initializeCalender(TimeZone timeZone) { GregorianCalendar calendar; // Create the calendar that will hold the value. For DateTimeOffset values, the calendar's // time zone is UTC. For other values, the calendar's time zone is a local time zone. calendar = new GregorianCalendar(timeZone, Locale.US); // Set the calendar lenient to allow setting the DAY_OF_YEAR and MILLISECOND fields // to roll other fields to their correct values. calendar.setLenient(true); // Clear the calendar of any existing state. The state of a new Calendar object always // reflects the current date, time, DST offset, etc. calendar.clear(); return calendar; } final void error(String reason, SQLState sqlState, DriverError driverError) throws SQLServerException { assert null != command; command.interrupt(reason); throw new SQLServerException(reason, sqlState, driverError, null); } /** * Sends an attention signal to the server, if necessary, to tell it to stop processing the current command on this connection. * * If no packets of the command's request have yet been sent to the server, then no attention signal needs to be sent. The interrupt will be * handled entirely by the driver. * * This method does not need synchronization as it does not manipulate interrupt state and writing is guaranteed to occur only from one thread at * a time. */ final boolean sendAttention() throws SQLServerException { // If any request packets were already written to the server then send an // attention signal to the server to tell it to ignore the request or // cancel its execution. if (packetNum > 0) { // Ideally, we would want to add the following assert here. // But to add that the variable isEOMSent would have to be made // volatile as this piece of code would be reached from multiple // threads. So, not doing it to avoid perf hit. Note that // isEOMSent would be updated in writePacket everytime an EOM is sent // assert isEOMSent; if (logger.isLoggable(Level.FINE)) logger.fine(this + ": sending attention..."); ++tdsChannel.numMsgsSent; startMessage(command, TDS.PKT_CANCEL_REQ); endMessage(); return true; } return false; } private void writePacket(int tdsMessageStatus) throws SQLServerException { final boolean atEOM = (TDS.STATUS_BIT_EOM == (TDS.STATUS_BIT_EOM & tdsMessageStatus)); final boolean isCancelled = ((TDS.PKT_CANCEL_REQ == tdsMessageType) || ((tdsMessageStatus & TDS.STATUS_BIT_ATTENTION) == TDS.STATUS_BIT_ATTENTION)); // Before writing each packet to the channel, check if an interrupt has occurred. if (null != command && (!isCancelled)) command.checkForInterrupt(); writePacketHeader(tdsMessageStatus | sendResetConnection); sendResetConnection = 0; flush(atEOM); // If this is the last packet then flush the remainder of the request // through the socket. The first flush() call ensured that data currently // waiting in the socket buffer was sent, flipped the buffers, and started // sending data from the staging buffer (flipped to be the new socket buffer). // This flush() call ensures that all remaining data in the socket buffer is sent. if (atEOM) { flush(atEOM); isEOMSent = true; ++tdsChannel.numMsgsSent; } // If we just sent the first login request packet and SSL encryption was enabled // for login only, then disable SSL now. if (TDS.PKT_LOGON70 == tdsMessageType && 1 == packetNum && TDS.ENCRYPT_OFF == con.getNegotiatedEncryptionLevel()) { tdsChannel.disableSSL(); } // Notify the currently associated command (if any) that we have written the last // of the response packets to the channel. if (null != command && (!isCancelled) && atEOM) command.onRequestComplete(); } private void writePacketHeader(int tdsMessageStatus) { int tdsMessageLength = stagingBuffer.position(); ++packetNum; // Write the TDS packet header back at the start of the staging buffer stagingBuffer.put(TDS.PACKET_HEADER_MESSAGE_TYPE, tdsMessageType); stagingBuffer.put(TDS.PACKET_HEADER_MESSAGE_STATUS, (byte) tdsMessageStatus); stagingBuffer.put(TDS.PACKET_HEADER_MESSAGE_LENGTH, (byte) ((tdsMessageLength >> 8) & 0xFF)); // Note: message length is 16 bits, stagingBuffer.put(TDS.PACKET_HEADER_MESSAGE_LENGTH + 1, (byte) ((tdsMessageLength >> 0) & 0xFF)); // written BIG ENDIAN stagingBuffer.put(TDS.PACKET_HEADER_SPID, (byte) ((tdsChannel.getSPID() >> 8) & 0xFF)); // Note: SPID is 16 bits, stagingBuffer.put(TDS.PACKET_HEADER_SPID + 1, (byte) ((tdsChannel.getSPID() >> 0) & 0xFF)); // written BIG ENDIAN stagingBuffer.put(TDS.PACKET_HEADER_SEQUENCE_NUM, (byte) (packetNum % 256)); stagingBuffer.put(TDS.PACKET_HEADER_WINDOW, (byte) 0); // Window (Reserved/Not used) // Write the header to the log buffer too if logging. if (tdsChannel.isLoggingPackets()) { logBuffer.put(TDS.PACKET_HEADER_MESSAGE_TYPE, tdsMessageType); logBuffer.put(TDS.PACKET_HEADER_MESSAGE_STATUS, (byte) tdsMessageStatus); logBuffer.put(TDS.PACKET_HEADER_MESSAGE_LENGTH, (byte) ((tdsMessageLength >> 8) & 0xFF)); // Note: message length is 16 bits, logBuffer.put(TDS.PACKET_HEADER_MESSAGE_LENGTH + 1, (byte) ((tdsMessageLength >> 0) & 0xFF)); // written BIG ENDIAN logBuffer.put(TDS.PACKET_HEADER_SPID, (byte) ((tdsChannel.getSPID() >> 8) & 0xFF)); // Note: SPID is 16 bits, logBuffer.put(TDS.PACKET_HEADER_SPID + 1, (byte) ((tdsChannel.getSPID() >> 0) & 0xFF)); // written BIG ENDIAN logBuffer.put(TDS.PACKET_HEADER_SEQUENCE_NUM, (byte) (packetNum % 256)); logBuffer.put(TDS.PACKET_HEADER_WINDOW, (byte) 0); // Window (Reserved/Not used); } } void flush(boolean atEOM) throws SQLServerException { // First, flush any data left in the socket buffer. tdsChannel.write(socketBuffer.array(), socketBuffer.position(), socketBuffer.remaining()); socketBuffer.position(socketBuffer.limit()); // If there is data in the staging buffer that needs to be written // to the socket, the socket buffer is now empty, so swap buffers // and start writing data from the staging buffer. if (stagingBuffer.position() >= TDS_PACKET_HEADER_SIZE) { // Swap the packet buffers ... ByteBuffer swapBuffer = stagingBuffer; stagingBuffer = socketBuffer; socketBuffer = swapBuffer; // ... and prepare to send data from the from the new socket // buffer (the old staging buffer). // We need to use flip() rather than rewind() here so that // the socket buffer's limit is properly set for the last // packet, which may be shorter than the other packets. socketBuffer.flip(); stagingBuffer.clear(); // If we are logging TDS packets then log the packet we're about // to send over the wire now. if (tdsChannel.isLoggingPackets()) { tdsChannel.logPacket(logBuffer.array(), 0, socketBuffer.limit(), this.toString() + " sending packet (" + socketBuffer.limit() + " bytes)"); } // Prepare for the next packet if (!atEOM) preparePacket(); // Finally, start sending data from the new socket buffer. tdsChannel.write(socketBuffer.array(), socketBuffer.position(), socketBuffer.remaining()); socketBuffer.position(socketBuffer.limit()); } } // Composite write operations /** * Write out elements common to all RPC values. * * @param sName * the optional parameter name * @param bOut * boolean true if the value that follows is being registered as an ouput parameter * @param tdsType * TDS type of the value that follows */ void writeRPCNameValType(String sName, boolean bOut, TDSType tdsType) throws SQLServerException { int nNameLen = 0; if (null != sName) nNameLen = sName.length() + 1; // The @ prefix is required for the param writeByte((byte) nNameLen); // param name len if (nNameLen > 0) { writeChar('@'); writeString(sName); } if (null != cryptoMeta) writeByte((byte) (bOut ? 1 | TDS.AE_METADATA : 0 | TDS.AE_METADATA)); // status else writeByte((byte) (bOut ? 1 : 0)); // status writeByte(tdsType.byteValue()); // type } /** * Append a boolean value in RPC transmission format. * * @param sName * the optional parameter name * @param booleanValue * the data value * @param bOut * boolean true if the data value is being registered as an ouput parameter */ void writeRPCBit(String sName, Boolean booleanValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.BITN); writeByte((byte) 1); // max length of datatype if (null == booleanValue) { writeByte((byte) 0); // len of data bytes } else { writeByte((byte) 1); // length of datatype writeByte((byte) (booleanValue.booleanValue() ? 1 : 0)); } } /** * Append a short value in RPC transmission format. * * @param sName * the optional parameter name * @param shortValue * the data value * @param bOut * boolean true if the data value is being registered as an ouput parameter */ void writeRPCByte(String sName, Byte byteValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.INTN); writeByte((byte) 1); // max length of datatype if (null == byteValue) { writeByte((byte) 0); // len of data bytes } else { writeByte((byte) 1); // length of datatype writeByte(byteValue.byteValue()); } } /** * Append a short value in RPC transmission format. * * @param sName * the optional parameter name * @param shortValue * the data value * @param bOut * boolean true if the data value is being registered as an ouput parameter */ void writeRPCShort(String sName, Short shortValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.INTN); writeByte((byte) 2); // max length of datatype if (null == shortValue) { writeByte((byte) 0); // len of data bytes } else { writeByte((byte) 2); // length of datatype writeShort(shortValue.shortValue()); } } /** * Append an int value in RPC transmission format. * * @param sName * the optional parameter name * @param intValue * the data value * @param bOut * boolean true if the data value is being registered as an ouput parameter */ void writeRPCInt(String sName, Integer intValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.INTN); writeByte((byte) 4); // max length of datatype if (null == intValue) { writeByte((byte) 0); // len of data bytes } else { writeByte((byte) 4); // length of datatype writeInt(intValue.intValue()); } } /** * Append a long value in RPC transmission format. * * @param sName * the optional parameter name * @param longValue * the data value * @param bOut * boolean true if the data value is being registered as an ouput parameter */ void writeRPCLong(String sName, Long longValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.INTN); writeByte((byte) 8); // max length of datatype if (null == longValue) { writeByte((byte) 0); // len of data bytes } else { writeByte((byte) 8); // length of datatype writeLong(longValue.longValue()); } } /** * Append a real value in RPC transmission format. * * @param sName * the optional parameter name * @param floatValue * the data value * @param bOut * boolean true if the data value is being registered as an ouput parameter */ void writeRPCReal(String sName, Float floatValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.FLOATN); // Data and length if (null == floatValue) { writeByte((byte) 4); // max length writeByte((byte) 0); // actual length (0 == null) } else { writeByte((byte) 4); // max length writeByte((byte) 4); // actual length writeInt(Float.floatToRawIntBits(floatValue.floatValue())); } } void writeRPCSqlVariant(String sName, SqlVariant sqlVariantValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.SQL_VARIANT); // Data and length if (null == sqlVariantValue) { writeInt(0); // max length writeInt(0); // actual length } } /** * Append a double value in RPC transmission format. * * @param sName * the optional parameter name * @param doubleValue * the data value * @param bOut * boolean true if the data value is being registered as an ouput parameter */ void writeRPCDouble(String sName, Double doubleValue, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.FLOATN); int l = 8; writeByte((byte) l); // max length of datatype // Data and length if (null == doubleValue) { writeByte((byte) 0); // len of data bytes } else { writeByte((byte) l); // len of data bytes long bits = Double.doubleToLongBits(doubleValue.doubleValue()); long mask = 0xFF; int nShift = 0; for (int i = 0; i < 8; i++) { writeByte((byte) ((bits & mask) >> nShift)); nShift += 8; mask = mask << 8; } } } /** * Append a big decimal in RPC transmission format. * * @param sName * the optional parameter name * @param bdValue * the data value * @param nScale * the desired scale * @param bOut * boolean true if the data value is being registered as an ouput parameter */ void writeRPCBigDecimal(String sName, BigDecimal bdValue, int nScale, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.DECIMALN); writeByte((byte) 0x11); // maximum length writeByte((byte) SQLServerConnection.maxDecimalPrecision); // precision byte[] valueBytes = DDC.convertBigDecimalToBytes(bdValue, nScale); writeBytes(valueBytes, 0, valueBytes.length); } /** * Appends a standard v*max header for RPC parameter transmission. * * @param headerLength * the total length of the PLP data block. * @param isNull * true if the value is NULL. * @param collation * The SQL collation associated with the value that follows the v*max header. Null for non-textual types. */ void writeVMaxHeader(long headerLength, boolean isNull, SQLCollation collation) throws SQLServerException { // Send v*max length indicator 0xFFFF. writeShort((short) 0xFFFF); // Send collation if requested. if (null != collation) collation.writeCollation(this); // Handle null here and return, we're done here if it's null. if (isNull) { // Null header for v*max types is 0xFFFFFFFFFFFFFFFF. writeLong(0xFFFFFFFFFFFFFFFFL); } else if (DataTypes.UNKNOWN_STREAM_LENGTH == headerLength) { // Append v*max length. // UNKNOWN_PLP_LEN is 0xFFFFFFFFFFFFFFFE writeLong(0xFFFFFFFFFFFFFFFEL); // NOTE: Don't send the first chunk length, this will be calculated by caller. } else { // For v*max types with known length, length is <totallength8><chunklength4> // We're sending same total length as chunk length (as we're sending 1 chunk). writeLong(headerLength); } } /** * Utility for internal writeRPCString calls */ void writeRPCStringUnicode(String sValue) throws SQLServerException { writeRPCStringUnicode(null, sValue, false, null); } /** * Writes a string value as Unicode for RPC * * @param sName * the optional parameter name * @param sValue * the data value * @param bOut * boolean true if the data value is being registered as an ouput parameter * @param collation * the collation of the data value */ void writeRPCStringUnicode(String sName, String sValue, boolean bOut, SQLCollation collation) throws SQLServerException { boolean bValueNull = (sValue == null); int nValueLen = bValueNull ? 0 : (2 * sValue.length()); boolean isShortValue = nValueLen <= DataTypes.SHORT_VARTYPE_MAX_BYTES; // Textual RPC requires a collation. If none is provided, as is the case when // the SSType is non-textual, then use the database collation by default. if (null == collation) collation = con.getDatabaseCollation(); // Use PLP encoding on Yukon and later with long values and OUT parameters boolean usePLP = (!isShortValue || bOut); if (usePLP) { writeRPCNameValType(sName, bOut, TDSType.NVARCHAR); // Handle Yukon v*max type header here. writeVMaxHeader(nValueLen, // Length bValueNull, // Is null? collation); // Send the data. if (!bValueNull) { if (nValueLen > 0) { writeInt(nValueLen); writeString(sValue); } // Send the terminator PLP chunk. writeInt(0); } } else // non-PLP type { // Write maximum length of data if (isShortValue) { writeRPCNameValType(sName, bOut, TDSType.NVARCHAR); writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); } else { writeRPCNameValType(sName, bOut, TDSType.NTEXT); writeInt(DataTypes.IMAGE_TEXT_MAX_BYTES); } collation.writeCollation(this); // Data and length if (bValueNull) { writeShort((short) -1); // actual len } else { // Write actual length of data if (isShortValue) writeShort((short) nValueLen); else writeInt(nValueLen); // If length is zero, we're done. if (0 != nValueLen) writeString(sValue); // data } } } void writeTVP(TVP value) throws SQLServerException { if (!value.isNull()) { writeByte((byte) 0); // status } else { // Default TVP writeByte((byte) TDS.TVP_STATUS_DEFAULT); // default TVP } writeByte((byte) TDS.TDS_TVP); /* * TVP_TYPENAME = DbName OwningSchema TypeName */ // Database where TVP type resides if (null != value.getDbNameTVP()) { writeByte((byte) value.getDbNameTVP().length()); writeString(value.getDbNameTVP()); } else writeByte((byte) 0x00); // empty DB name // Schema where TVP type resides if (null != value.getOwningSchemaNameTVP()) { writeByte((byte) value.getOwningSchemaNameTVP().length()); writeString(value.getOwningSchemaNameTVP()); } else writeByte((byte) 0x00); // empty Schema name // TVP type name if (null != value.getTVPName()) { writeByte((byte) value.getTVPName().length()); writeString(value.getTVPName()); } else writeByte((byte) 0x00); // empty TVP name if (!value.isNull()) { writeTVPColumnMetaData(value); // optional OrderUnique metadata writeTvpOrderUnique(value); } else { writeShort((short) TDS.TVP_NULL_TOKEN); } // TVP_END_TOKEN writeByte((byte) 0x00); try { writeTVPRows(value); } catch (NumberFormatException e) { throw new SQLServerException(SQLServerException.getErrString("R_TVPInvalidColumnValue"), e); } catch (ClassCastException e) { throw new SQLServerException(SQLServerException.getErrString("R_TVPInvalidColumnValue"), e); } } void writeTVPRows(TVP value) throws SQLServerException { boolean tdsWritterCached = false; ByteBuffer cachedTVPHeaders = null; TDSCommand cachedCommand = null; boolean cachedRequestComplete = false; boolean cachedInterruptsEnabled = false; boolean cachedProcessedResponse = false; if (!value.isNull()) { // is used, the tdsWriter of the calling preparedStatement is overwritten by the SQLServerResultSet#next() method when fetching new rows. // Therefore, we need to send TVP data row by row before fetching new row. if (TVPType.ResultSet == value.tvpType) { if ((null != value.sourceResultSet) && (value.sourceResultSet instanceof SQLServerResultSet)) { SQLServerResultSet sourceResultSet = (SQLServerResultSet) value.sourceResultSet; SQLServerStatement src_stmt = (SQLServerStatement) sourceResultSet.getStatement(); int resultSetServerCursorId = sourceResultSet.getServerCursorId(); if (con.equals(src_stmt.getConnection()) && 0 != resultSetServerCursorId) { cachedTVPHeaders = ByteBuffer.allocate(stagingBuffer.capacity()).order(stagingBuffer.order()); cachedTVPHeaders.put(stagingBuffer.array(), 0, stagingBuffer.position()); cachedCommand = this.command; cachedRequestComplete = command.getRequestComplete(); cachedInterruptsEnabled = command.getInterruptsEnabled(); cachedProcessedResponse = command.getProcessedResponse(); tdsWritterCached = true; if (sourceResultSet.isForwardOnly()) { sourceResultSet.setFetchSize(1); } } } } Map<Integer, SQLServerMetaData> columnMetadata = value.getColumnMetadata(); Iterator<Entry<Integer, SQLServerMetaData>> columnsIterator; while (value.next()) { // restore command and TDS header, which have been overwritten by value.next() if (tdsWritterCached) { command = cachedCommand; stagingBuffer.clear(); logBuffer.clear(); writeBytes(cachedTVPHeaders.array(), 0, cachedTVPHeaders.position()); } Object[] rowData = value.getRowData(); // ROW writeByte((byte) TDS.TVP_ROW); columnsIterator = columnMetadata.entrySet().iterator(); int currentColumn = 0; while (columnsIterator.hasNext()) { Map.Entry<Integer, SQLServerMetaData> columnPair = columnsIterator.next(); // If useServerDefault is set, client MUST NOT emit TvpColumnData for the associated column if (columnPair.getValue().useServerDefault) { currentColumn++; continue; } JDBCType jdbcType = JDBCType.of(columnPair.getValue().javaSqlType); String currentColumnStringValue = null; Object currentObject = null; if (null != rowData) { // if rowData has value for the current column, retrieve it. If not, current column will stay null. if (rowData.length > currentColumn) { currentObject = rowData[currentColumn]; if (null != currentObject) { currentColumnStringValue = String.valueOf(currentObject); } } } writeInternalTVPRowValues(jdbcType, currentColumnStringValue, currentObject, columnPair, false); currentColumn++; } // send this row, read its response (throw exception in case of errors) and reset command status if (tdsWritterCached) { // TVP_END_TOKEN writeByte((byte) 0x00); writePacket(TDS.STATUS_BIT_EOM); TDSReader tdsReader = tdsChannel.getReader(command); int tokenType = tdsReader.peekTokenType(); if (TDS.TDS_ERR == tokenType) { StreamError databaseError = new StreamError(); databaseError.setFromTDS(tdsReader); SQLServerException.makeFromDatabaseError(con, null, databaseError.getMessage(), databaseError, false); } command.setInterruptsEnabled(true); command.setRequestComplete(false); } } } // reset command status which have been overwritten if (tdsWritterCached) { command.setRequestComplete(cachedRequestComplete); command.setInterruptsEnabled(cachedInterruptsEnabled); command.setProcessedResponse(cachedProcessedResponse); } else { // TVP_END_TOKEN writeByte((byte) 0x00); } } private void writeInternalTVPRowValues(JDBCType jdbcType, String currentColumnStringValue, Object currentObject, Map.Entry<Integer, SQLServerMetaData> columnPair, boolean isSqlVariant) throws SQLServerException { boolean isShortValue, isNull; int dataLength; switch (jdbcType) { case BIGINT: if (null == currentColumnStringValue) writeByte((byte) 0); else { if (isSqlVariant) { writeSqlVariantHeader(10, TDSType.INT8.byteValue(), (byte) 0); } else { writeByte((byte) 8); } writeLong(Long.valueOf(currentColumnStringValue).longValue()); } break; case BIT: if (null == currentColumnStringValue) writeByte((byte) 0); else { if (isSqlVariant) writeSqlVariantHeader(3, TDSType.BIT1.byteValue(), (byte) 0); else writeByte((byte) 1); writeByte((byte) (Boolean.valueOf(currentColumnStringValue).booleanValue() ? 1 : 0)); } break; case INTEGER: if (null == currentColumnStringValue) writeByte((byte) 0); else { if (!isSqlVariant) writeByte((byte) 4); else writeSqlVariantHeader(6, TDSType.INT4.byteValue(), (byte) 0); writeInt(Integer.valueOf(currentColumnStringValue).intValue()); } break; case SMALLINT: case TINYINT: if (null == currentColumnStringValue) writeByte((byte) 0); else { if (isSqlVariant) { writeSqlVariantHeader(6, TDSType.INT4.byteValue(), (byte) 0); writeInt(Integer.valueOf(currentColumnStringValue)); } else { writeByte((byte) 2); // length of datatype writeShort(Short.valueOf(currentColumnStringValue).shortValue()); } } break; case DECIMAL: case NUMERIC: if (null == currentColumnStringValue) writeByte((byte) 0); else { if (isSqlVariant) { writeSqlVariantHeader(21, TDSType.DECIMALN.byteValue(), (byte) 2); writeByte((byte) 38); // scale (byte)variantType.getScale() writeByte((byte) 4); // scale (byte)variantType.getScale() } else { writeByte((byte) TDSWriter.BIGDECIMAL_MAX_LENGTH); // maximum length } BigDecimal bdValue = new BigDecimal(currentColumnStringValue); /* * setScale of all BigDecimal value based on metadata as scale is not sent seperately for individual value. Use the rounding used * in Server. Say, for BigDecimal("0.1"), if scale in metdadata is 0, then ArithmeticException would be thrown if RoundingMode is * not set */ bdValue = bdValue.setScale(columnPair.getValue().scale, RoundingMode.HALF_UP); byte[] valueBytes = DDC.convertBigDecimalToBytes(bdValue, bdValue.scale()); // 1-byte for sign and 16-byte for integer byte[] byteValue = new byte[17]; // removing the precision and scale information from the valueBytes array System.arraycopy(valueBytes, 2, byteValue, 0, valueBytes.length - 2); writeBytes(byteValue); } break; case DOUBLE: if (null == currentColumnStringValue) writeByte((byte) 0); // len of data bytes else { if (isSqlVariant) { writeSqlVariantHeader(10, TDSType.FLOAT8.byteValue(), (byte) 0); writeDouble(Double.valueOf(currentColumnStringValue.toString())); break; } writeByte((byte) 8); // len of data bytes long bits = Double.doubleToLongBits(Double.valueOf(currentColumnStringValue).doubleValue()); long mask = 0xFF; int nShift = 0; for (int i = 0; i < 8; i++) { writeByte((byte) ((bits & mask) >> nShift)); nShift += 8; mask = mask << 8; } } break; case FLOAT: case REAL: if (null == currentColumnStringValue) writeByte((byte) 0); else { if (isSqlVariant) { writeSqlVariantHeader(6, TDSType.FLOAT4.byteValue(), (byte) 0); writeInt(Float.floatToRawIntBits(Float.valueOf(currentColumnStringValue).floatValue())); } else { writeByte((byte) 4); writeInt(Float.floatToRawIntBits(Float.valueOf(currentColumnStringValue).floatValue())); } } break; case DATE: case TIME: case TIMESTAMP: case DATETIMEOFFSET: case TIMESTAMP_WITH_TIMEZONE: case TIME_WITH_TIMEZONE: case CHAR: case VARCHAR: case NCHAR: case NVARCHAR: case LONGVARCHAR: case LONGNVARCHAR: case SQLXML: isShortValue = (2L * columnPair.getValue().precision) <= DataTypes.SHORT_VARTYPE_MAX_BYTES; isNull = (null == currentColumnStringValue); dataLength = isNull ? 0 : currentColumnStringValue.length() * 2; if (!isShortValue) { // check null if (isNull) // Null header for v*max types is 0xFFFFFFFFFFFFFFFF. writeLong(0xFFFFFFFFFFFFFFFFL); if (isSqlVariant) { // for now we send as bigger type, but is sendStringParameterAsUnicoe is set to false we can't send nvarchar // since we are writing as nvarchar we need to write as tdstype.bigvarchar value because if we // want to supprot varchar(8000) it becomes as nvarchar, 8000*2 therefore we should send as longvarchar, // but we cannot send more than 8000 cause sql_variant datatype in sql server does not support it. // then throw exception if user is sending more than that if (dataLength > 2 * DataTypes.SHORT_VARTYPE_MAX_BYTES) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_invalidStringValue")); throw new SQLServerException(null, form.format(new Object[] {}), null, 0, false); } int length = currentColumnStringValue.length(); writeSqlVariantHeader(9 + length, TDSType.BIGVARCHAR.byteValue(), (byte) 0x07); SQLCollation col = con.getDatabaseCollation(); // write collation for sql variant writeInt(col.getCollationInfo()); writeByte((byte) col.getCollationSortID()); writeShort((short) (length)); writeBytes(currentColumnStringValue.getBytes()); break; } else if (DataTypes.UNKNOWN_STREAM_LENGTH == dataLength) // Append v*max length. // UNKNOWN_PLP_LEN is 0xFFFFFFFFFFFFFFFE writeLong(0xFFFFFFFFFFFFFFFEL); else // For v*max types with known length, length is <totallength8><chunklength4> writeLong(dataLength); if (!isNull) { if (dataLength > 0) { writeInt(dataLength); writeString(currentColumnStringValue); } // Send the terminator PLP chunk. writeInt(0); } } else { if (isNull) writeShort((short) -1); // actual len else { if (isSqlVariant) { // for now we send as bigger type, but is sendStringParameterAsUnicoe is set to false we can't send nvarchar // check for this int length = currentColumnStringValue.length() * 2; writeSqlVariantHeader(9 + length, TDSType.NVARCHAR.byteValue(), (byte) 7); SQLCollation col = con.getDatabaseCollation(); // write collation for sql variant writeInt(col.getCollationInfo()); writeByte((byte) col.getCollationSortID()); int stringLength = currentColumnStringValue.length(); byte[] typevarlen = new byte[2]; typevarlen[0] = (byte) (2 * stringLength & 0xFF); typevarlen[1] = (byte) ((2 * stringLength >> 8) & 0xFF); writeBytes(typevarlen); writeString(currentColumnStringValue); break; } else { writeShort((short) dataLength); writeString(currentColumnStringValue); } } } break; case BINARY: case VARBINARY: case LONGVARBINARY: // Handle conversions as done in other types. isShortValue = columnPair.getValue().precision <= DataTypes.SHORT_VARTYPE_MAX_BYTES; isNull = (null == currentObject); if (currentObject instanceof String) dataLength = isNull ? 0 : (toByteArray(currentObject.toString())).length; else dataLength = isNull ? 0 : ((byte[]) currentObject).length; if (!isShortValue) { // check null if (isNull) // Null header for v*max types is 0xFFFFFFFFFFFFFFFF. writeLong(0xFFFFFFFFFFFFFFFFL); else if (DataTypes.UNKNOWN_STREAM_LENGTH == dataLength) // Append v*max length. // UNKNOWN_PLP_LEN is 0xFFFFFFFFFFFFFFFE writeLong(0xFFFFFFFFFFFFFFFEL); else // For v*max types with known length, length is <totallength8><chunklength4> writeLong(dataLength); if (!isNull) { if (dataLength > 0) { writeInt(dataLength); if (currentObject instanceof String) writeBytes(toByteArray(currentObject.toString())); else writeBytes((byte[]) currentObject); } // Send the terminator PLP chunk. writeInt(0); } } else { if (isNull) writeShort((short) -1); // actual len else { writeShort((short) dataLength); if (currentObject instanceof String) writeBytes(toByteArray(currentObject.toString())); else writeBytes((byte[]) currentObject); } } break; case SQL_VARIANT: boolean isShiloh = 8 >= con.getServerMajorVersion() ? true : false; if (isShiloh) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_SQLVariantSupport")); throw new SQLServerException(null, form.format(new Object[] {}), null, 0, false); } JDBCType internalJDBCType; JavaType javaType = JavaType.of(currentObject); internalJDBCType = javaType.getJDBCType(SSType.UNKNOWN, jdbcType); writeInternalTVPRowValues(internalJDBCType, currentColumnStringValue, currentObject, columnPair, true); break; default: assert false : "Unexpected JDBC type " + jdbcType.toString(); } } /** * writes Header for sql_variant for TVP * @param length * @param tdsType * @param probBytes * @throws SQLServerException */ private void writeSqlVariantHeader(int length, byte tdsType, byte probBytes) throws SQLServerException { writeInt(length); writeByte(tdsType); writeByte(probBytes); } private static byte[] toByteArray(String s) { return DatatypeConverter.parseHexBinary(s); } void writeTVPColumnMetaData(TVP value) throws SQLServerException { boolean isShortValue; // TVP_COLMETADATA writeShort((short) value.getTVPColumnCount()); Map<Integer, SQLServerMetaData> columnMetadata = value.getColumnMetadata(); Iterator<Entry<Integer, SQLServerMetaData>> columnsIterator = columnMetadata.entrySet().iterator(); /* * TypeColumnMetaData = UserType Flags TYPE_INFO ColName ; */ while (columnsIterator.hasNext()) { Map.Entry<Integer, SQLServerMetaData> pair = columnsIterator.next(); JDBCType jdbcType = JDBCType.of(pair.getValue().javaSqlType); boolean useServerDefault = pair.getValue().useServerDefault; // ULONG ; UserType of column // The value will be 0x0000 with the exceptions of TIMESTAMP (0x0050) and alias types (greater than 0x00FF). writeInt(0); /* * Flags = fNullable ; Column is nullable - %x01 fCaseSen -- Ignored ; usUpdateable -- Ignored ; fIdentity ; Column is identity column - * %x10 fComputed ; Column is computed - %x20 usReservedODBC -- Ignored ; fFixedLenCLRType-- Ignored ; fDefault ; Column is default value * - %x200 usReserved -- Ignored ; */ short flags = TDS.FLAG_NULLABLE; if (useServerDefault) { flags |= TDS.FLAG_TVP_DEFAULT_COLUMN; } writeShort(flags); // Type info switch (jdbcType) { case BIGINT: writeByte(TDSType.INTN.byteValue()); writeByte((byte) 8); // max length of datatype break; case BIT: writeByte(TDSType.BITN.byteValue()); writeByte((byte) 1); // max length of datatype break; case INTEGER: writeByte(TDSType.INTN.byteValue()); writeByte((byte) 4); // max length of datatype break; case SMALLINT: case TINYINT: writeByte(TDSType.INTN.byteValue()); writeByte((byte) 2); // max length of datatype break; case DECIMAL: case NUMERIC: writeByte(TDSType.NUMERICN.byteValue()); writeByte((byte) 0x11); // maximum length writeByte((byte) pair.getValue().precision); writeByte((byte) pair.getValue().scale); break; case DOUBLE: writeByte(TDSType.FLOATN.byteValue()); writeByte((byte) 8); // max length of datatype break; case FLOAT: case REAL: writeByte(TDSType.FLOATN.byteValue()); writeByte((byte) 4); // max length of datatype break; case DATE: case TIME: case TIMESTAMP: case DATETIMEOFFSET: case TIMESTAMP_WITH_TIMEZONE: case TIME_WITH_TIMEZONE: case CHAR: case VARCHAR: case NCHAR: case NVARCHAR: case LONGVARCHAR: case LONGNVARCHAR: case SQLXML: writeByte(TDSType.NVARCHAR.byteValue()); isShortValue = (2L * pair.getValue().precision) <= DataTypes.SHORT_VARTYPE_MAX_BYTES; // Use PLP encoding on Yukon and later with long values if (!isShortValue) // PLP { // Handle Yukon v*max type header here. writeShort((short) 0xFFFF); con.getDatabaseCollation().writeCollation(this); } else // non PLP { writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); con.getDatabaseCollation().writeCollation(this); } break; case BINARY: case VARBINARY: case LONGVARBINARY: writeByte(TDSType.BIGVARBINARY.byteValue()); isShortValue = pair.getValue().precision <= DataTypes.SHORT_VARTYPE_MAX_BYTES; // Use PLP encoding on Yukon and later with long values if (!isShortValue) // PLP // Handle Yukon v*max type header here. writeShort((short) 0xFFFF); else // non PLP writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); break; case SQL_VARIANT: case OTHER: writeByte(TDSType.SQL_VARIANT.byteValue()); writeInt(8009);// write length of sql variant 8009 break; default: assert false : "Unexpected JDBC type " + jdbcType.toString(); } // Column name - must be null (from TDS - TVP_COLMETADATA) writeByte((byte) 0x00); // [TVP_ORDER_UNIQUE] // [TVP_COLUMN_ORDERING] } } void writeTvpOrderUnique(TVP value) throws SQLServerException { /* * TVP_ORDER_UNIQUE = TVP_ORDER_UNIQUE_TOKEN (Count <Count>(ColNum OrderUniqueFlags)) */ Map<Integer, SQLServerMetaData> columnMetadata = value.getColumnMetadata(); Iterator<Entry<Integer, SQLServerMetaData>> columnsIterator = columnMetadata.entrySet().iterator(); LinkedList<TdsOrderUnique> columnList = new LinkedList<TdsOrderUnique>(); while (columnsIterator.hasNext()) { byte flags = 0; Map.Entry<Integer, SQLServerMetaData> pair = columnsIterator.next(); SQLServerMetaData metaData = pair.getValue(); if (SQLServerSortOrder.Ascending == metaData.sortOrder) flags = TDS.TVP_ORDERASC_FLAG; else if (SQLServerSortOrder.Descending == metaData.sortOrder) flags = TDS.TVP_ORDERDESC_FLAG; if (metaData.isUniqueKey) flags |= TDS.TVP_UNIQUE_FLAG; // Remember this column if any flags were set if (0 != flags) columnList.add(new TdsOrderUnique(pair.getKey(), flags)); } // Write flagged columns if (!columnList.isEmpty()) { writeByte((byte) TDS.TVP_ORDER_UNIQUE_TOKEN); writeShort((short) columnList.size()); for (TdsOrderUnique column : columnList) { writeShort((short) (column.columnOrdinal + 1)); writeByte(column.flags); } } } private class TdsOrderUnique { int columnOrdinal; byte flags; TdsOrderUnique(int ordinal, byte flags) { this.columnOrdinal = ordinal; this.flags = flags; } } void setCryptoMetaData(CryptoMetadata cryptoMetaForBulk) { this.cryptoMeta = cryptoMetaForBulk; } CryptoMetadata getCryptoMetaData() { return cryptoMeta; } void writeEncryptedRPCByteArray(byte bValue[]) throws SQLServerException { boolean bValueNull = (bValue == null); long nValueLen = bValueNull ? 0 : bValue.length; boolean isShortValue = (nValueLen <= DataTypes.SHORT_VARTYPE_MAX_BYTES); boolean isPLP = (!isShortValue) && (nValueLen <= DataTypes.MAX_VARTYPE_MAX_BYTES); // Handle Shiloh types here. if (isShortValue) { writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); } else if (isPLP) { writeShort((short) DataTypes.SQL_USHORTVARMAXLEN); } else { writeInt(DataTypes.IMAGE_TEXT_MAX_BYTES); } // Data and length if (bValueNull) { writeShort((short) -1); // actual len } else { if (isShortValue) { writeShort((short) nValueLen); // actual len } else if (isPLP) { writeLong(nValueLen); // actual length } else { writeInt((int) nValueLen); // actual len } // If length is zero, we're done. if (0 != nValueLen) { if (isPLP) { writeInt((int) nValueLen); } writeBytes(bValue); } if (isPLP) { writeInt(0); // PLP_TERMINATOR, 0x00000000 } } } void writeEncryptedRPCPLP() throws SQLServerException { writeShort((short) DataTypes.SQL_USHORTVARMAXLEN); writeLong((long) 0); // actual length writeInt(0); // PLP_TERMINATOR, 0x00000000 } void writeCryptoMetaData() throws SQLServerException { writeByte(cryptoMeta.cipherAlgorithmId); writeByte(cryptoMeta.encryptionType.getValue()); writeInt(cryptoMeta.cekTableEntry.getColumnEncryptionKeyValues().get(0).databaseId); writeInt(cryptoMeta.cekTableEntry.getColumnEncryptionKeyValues().get(0).cekId); writeInt(cryptoMeta.cekTableEntry.getColumnEncryptionKeyValues().get(0).cekVersion); writeBytes(cryptoMeta.cekTableEntry.getColumnEncryptionKeyValues().get(0).cekMdVersion); writeByte(cryptoMeta.normalizationRuleVersion); } void writeRPCByteArray(String sName, byte bValue[], boolean bOut, JDBCType jdbcType, SQLCollation collation) throws SQLServerException { boolean bValueNull = (bValue == null); int nValueLen = bValueNull ? 0 : bValue.length; boolean isShortValue = (nValueLen <= DataTypes.SHORT_VARTYPE_MAX_BYTES); // Use PLP encoding on Yukon and later with long values and OUT parameters boolean usePLP = (!isShortValue || bOut); TDSType tdsType; if (null != cryptoMeta) { // send encrypted data as BIGVARBINARY tdsType = (isShortValue || usePLP) ? TDSType.BIGVARBINARY : TDSType.IMAGE; collation = null; } else switch (jdbcType) { case BINARY: case VARBINARY: case LONGVARBINARY: case BLOB: default: tdsType = (isShortValue || usePLP) ? TDSType.BIGVARBINARY : TDSType.IMAGE; collation = null; break; case CHAR: case VARCHAR: case LONGVARCHAR: case CLOB: tdsType = (isShortValue || usePLP) ? TDSType.BIGVARCHAR : TDSType.TEXT; if (null == collation) collation = con.getDatabaseCollation(); break; case NCHAR: case NVARCHAR: case LONGNVARCHAR: case NCLOB: tdsType = (isShortValue || usePLP) ? TDSType.NVARCHAR : TDSType.NTEXT; if (null == collation) collation = con.getDatabaseCollation(); break; } writeRPCNameValType(sName, bOut, tdsType); if (usePLP) { // Handle Yukon v*max type header here. writeVMaxHeader(nValueLen, bValueNull, collation); // Send the data. if (!bValueNull) { if (nValueLen > 0) { writeInt(nValueLen); writeBytes(bValue); } // Send the terminator PLP chunk. writeInt(0); } } else // non-PLP type { // Handle Shiloh types here. if (isShortValue) { writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); } else { writeInt(DataTypes.IMAGE_TEXT_MAX_BYTES); } if (null != collation) collation.writeCollation(this); // Data and length if (bValueNull) { writeShort((short) -1); // actual len } else { if (isShortValue) writeShort((short) nValueLen); // actual len else writeInt(nValueLen); // actual len // If length is zero, we're done. if (0 != nValueLen) writeBytes(bValue); } } } /** * Append a timestamp in RPC transmission format as a SQL Server DATETIME data type * * @param sName * the optional parameter name * @param cal * Pure Gregorian calendar containing the timestamp, including its associated time zone * @param subSecondNanos * the sub-second nanoseconds (0 - 999,999,999) * @param bOut * boolean true if the data value is being registered as an ouput parameter * */ void writeRPCDateTime(String sName, GregorianCalendar cal, int subSecondNanos, boolean bOut) throws SQLServerException { assert (subSecondNanos >= 0) && (subSecondNanos < Nanos.PER_SECOND) : "Invalid subNanoSeconds value: " + subSecondNanos; assert (cal != null) || (subSecondNanos == 0) : "Invalid subNanoSeconds value when calendar is null: " + subSecondNanos; writeRPCNameValType(sName, bOut, TDSType.DATETIMEN); writeByte((byte) 8); // max length of datatype if (null == cal) { writeByte((byte) 0); // len of data bytes return; } writeByte((byte) 8); // len of data bytes // We need to extract the Calendar's current date & time in terms // of the number of days since the SQL Base Date (1/1/1900) plus // the number of milliseconds since midnight in the current day. // We cannot rely on any pre-calculated value for the number of // milliseconds in a day or the number of milliseconds since the // base date to do this because days with DST changes are shorter // or longer than "normal" days. // ASSUMPTION: We assume we are dealing with a GregorianCalendar here. // If not, we have no basis in which to compare dates. E.g. if we // are dealing with a Chinese Calendar implementation which does not // use the same value for Calendar.YEAR as the GregorianCalendar, // we cannot meaningfully compute a value relative to 1/1/1900. // First, figure out how many days there have been since the SQL Base Date. // These are based on SQL Server algorithms int daysSinceSQLBaseDate = DDC.daysSinceBaseDate(cal.get(Calendar.YEAR), cal.get(Calendar.DAY_OF_YEAR), TDS.BASE_YEAR_1900); // Next, figure out the number of milliseconds since midnight of the current day. int millisSinceMidnight = (subSecondNanos + Nanos.PER_MILLISECOND / 2) / Nanos.PER_MILLISECOND + // Millis into the current second 1000 * cal.get(Calendar.SECOND) + // Seconds into the current minute 60 * 1000 * cal.get(Calendar.MINUTE) + // Minutes into the current hour 60 * 60 * 1000 * cal.get(Calendar.HOUR_OF_DAY); // Hours into the current day // The last millisecond of the current day is always rounded to the first millisecond // of the next day because DATETIME is only accurate to 1/300th of a second. if (millisSinceMidnight >= 1000 * 60 * 60 * 24 - 1) { ++daysSinceSQLBaseDate; millisSinceMidnight = 0; } // Last-ditch verification that the value is in the valid range for the // DATETIMEN TDS data type (1/1/1753 to 12/31/9999). If it's not, then // throw an exception now so that statement execution is safely canceled. // Attempting to put an invalid value on the wire would result in a TDS // exception, which would close the connection. // These are based on SQL Server algorithms if (daysSinceSQLBaseDate < DDC.daysSinceBaseDate(1753, 1, TDS.BASE_YEAR_1900) || daysSinceSQLBaseDate >= DDC.daysSinceBaseDate(10000, 1, TDS.BASE_YEAR_1900)) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {SSType.DATETIME}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_DATETIME_FIELD_OVERFLOW, DriverError.NOT_SET, null); } // And put it all on the wire... // Number of days since the SQL Server Base Date (January 1, 1900) writeInt(daysSinceSQLBaseDate); // Milliseconds since midnight (at a resolution of three hundredths of a second) writeInt((3 * millisSinceMidnight + 5) / 10); } void writeRPCTime(String sName, GregorianCalendar localCalendar, int subSecondNanos, int scale, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.TIMEN); writeByte((byte) scale); if (null == localCalendar) { writeByte((byte) 0); return; } writeByte((byte) TDS.timeValueLength(scale)); writeScaledTemporal(localCalendar, subSecondNanos, scale, SSType.TIME); } void writeRPCDate(String sName, GregorianCalendar localCalendar, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.DATEN); if (null == localCalendar) { writeByte((byte) 0); return; } writeByte((byte) TDS.DAYS_INTO_CE_LENGTH); writeScaledTemporal(localCalendar, 0, // subsecond nanos (none for a date value) 0, // scale (dates are not scaled) SSType.DATE); } void writeEncryptedRPCTime(String sName, GregorianCalendar localCalendar, int subSecondNanos, int scale, boolean bOut) throws SQLServerException { if (con.getSendTimeAsDatetime()) { throw new SQLServerException(SQLServerException.getErrString("R_sendTimeAsDateTimeForAE"), null); } writeRPCNameValType(sName, bOut, TDSType.BIGVARBINARY); if (null == localCalendar) writeEncryptedRPCByteArray(null); else writeEncryptedRPCByteArray(writeEncryptedScaledTemporal(localCalendar, subSecondNanos, scale, SSType.TIME, (short) 0)); writeByte(TDSType.TIMEN.byteValue()); writeByte((byte) scale); writeCryptoMetaData(); } void writeEncryptedRPCDate(String sName, GregorianCalendar localCalendar, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.BIGVARBINARY); if (null == localCalendar) writeEncryptedRPCByteArray(null); else writeEncryptedRPCByteArray(writeEncryptedScaledTemporal(localCalendar, 0, // subsecond nanos (none for a date value) 0, // scale (dates are not scaled) SSType.DATE, (short) 0)); writeByte(TDSType.DATEN.byteValue()); writeCryptoMetaData(); } void writeEncryptedRPCDateTime(String sName, GregorianCalendar cal, int subSecondNanos, boolean bOut, JDBCType jdbcType) throws SQLServerException { assert (subSecondNanos >= 0) && (subSecondNanos < Nanos.PER_SECOND) : "Invalid subNanoSeconds value: " + subSecondNanos; assert (cal != null) || (subSecondNanos == 0) : "Invalid subNanoSeconds value when calendar is null: " + subSecondNanos; writeRPCNameValType(sName, bOut, TDSType.BIGVARBINARY); if (null == cal) writeEncryptedRPCByteArray(null); else writeEncryptedRPCByteArray(getEncryptedDateTimeAsBytes(cal, subSecondNanos, jdbcType)); if (JDBCType.SMALLDATETIME == jdbcType) { writeByte(TDSType.DATETIMEN.byteValue()); writeByte((byte) 4); } else { writeByte(TDSType.DATETIMEN.byteValue()); writeByte((byte) 8); } writeCryptoMetaData(); } // getEncryptedDateTimeAsBytes is called if jdbcType/ssType is SMALLDATETIME or DATETIME byte[] getEncryptedDateTimeAsBytes(GregorianCalendar cal, int subSecondNanos, JDBCType jdbcType) throws SQLServerException { int daysSinceSQLBaseDate = DDC.daysSinceBaseDate(cal.get(Calendar.YEAR), cal.get(Calendar.DAY_OF_YEAR), TDS.BASE_YEAR_1900); // Next, figure out the number of milliseconds since midnight of the current day. int millisSinceMidnight = (subSecondNanos + Nanos.PER_MILLISECOND / 2) / Nanos.PER_MILLISECOND + // Millis into the current second 1000 * cal.get(Calendar.SECOND) + // Seconds into the current minute 60 * 1000 * cal.get(Calendar.MINUTE) + // Minutes into the current hour 60 * 60 * 1000 * cal.get(Calendar.HOUR_OF_DAY); // Hours into the current day // The last millisecond of the current day is always rounded to the first millisecond // of the next day because DATETIME is only accurate to 1/300th of a second. if (millisSinceMidnight >= 1000 * 60 * 60 * 24 - 1) { ++daysSinceSQLBaseDate; millisSinceMidnight = 0; } if (JDBCType.SMALLDATETIME == jdbcType) { int secondsSinceMidnight = (millisSinceMidnight / 1000); int minutesSinceMidnight = (secondsSinceMidnight / 60); // Values that are 29.998 seconds or less are rounded down to the nearest minute minutesSinceMidnight = ((secondsSinceMidnight % 60) > 29.998) ? minutesSinceMidnight + 1 : minutesSinceMidnight; // minutesSinceMidnight for (23:59:30) int maxMinutesSinceMidnight_SmallDateTime = 1440; // Verification for smalldatetime to be within valid range of (1900.01.01) to (2079.06.06) // smalldatetime for unencrypted does not allow insertion of 2079.06.06 23:59:59 and it is rounded up // to 2079.06.07 00:00:00, therefore, we are checking minutesSinceMidnight for that condition. If it's not within valid range, then // throw an exception now so that statement execution is safely canceled. // 157 is the calculated day of year from 06-06 , 1440 is minutesince midnight for (23:59:30) if ((daysSinceSQLBaseDate < DDC.daysSinceBaseDate(1900, 1, TDS.BASE_YEAR_1900) || daysSinceSQLBaseDate > DDC.daysSinceBaseDate(2079, 157, TDS.BASE_YEAR_1900)) || (daysSinceSQLBaseDate == DDC.daysSinceBaseDate(2079, 157, TDS.BASE_YEAR_1900) && minutesSinceMidnight >= maxMinutesSinceMidnight_SmallDateTime)) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {SSType.SMALLDATETIME}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_DATETIME_FIELD_OVERFLOW, DriverError.NOT_SET, null); } ByteBuffer days = ByteBuffer.allocate(2).order(ByteOrder.LITTLE_ENDIAN); days.putShort((short) daysSinceSQLBaseDate); ByteBuffer seconds = ByteBuffer.allocate(2).order(ByteOrder.LITTLE_ENDIAN); seconds.putShort((short) minutesSinceMidnight); byte[] value = new byte[4]; System.arraycopy(days.array(), 0, value, 0, 2); System.arraycopy(seconds.array(), 0, value, 2, 2); return SQLServerSecurityUtility.encryptWithKey(value, cryptoMeta, con); } else if (JDBCType.DATETIME == jdbcType) { // Last-ditch verification that the value is in the valid range for the // DATETIMEN TDS data type (1/1/1753 to 12/31/9999). If it's not, then // throw an exception now so that statement execution is safely canceled. // Attempting to put an invalid value on the wire would result in a TDS // exception, which would close the connection. // These are based on SQL Server algorithms // And put it all on the wire... if (daysSinceSQLBaseDate < DDC.daysSinceBaseDate(1753, 1, TDS.BASE_YEAR_1900) || daysSinceSQLBaseDate >= DDC.daysSinceBaseDate(10000, 1, TDS.BASE_YEAR_1900)) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {SSType.DATETIME}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_DATETIME_FIELD_OVERFLOW, DriverError.NOT_SET, null); } // Number of days since the SQL Server Base Date (January 1, 1900) ByteBuffer days = ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN); days.putInt(daysSinceSQLBaseDate); ByteBuffer seconds = ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN); seconds.putInt((3 * millisSinceMidnight + 5) / 10); byte[] value = new byte[8]; System.arraycopy(days.array(), 0, value, 0, 4); System.arraycopy(seconds.array(), 0, value, 4, 4); return SQLServerSecurityUtility.encryptWithKey(value, cryptoMeta, con); } assert false : "Unexpected JDBCType type " + jdbcType; return null; } void writeEncryptedRPCDateTime2(String sName, GregorianCalendar localCalendar, int subSecondNanos, int scale, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.BIGVARBINARY); if (null == localCalendar) writeEncryptedRPCByteArray(null); else writeEncryptedRPCByteArray(writeEncryptedScaledTemporal(localCalendar, subSecondNanos, scale, SSType.DATETIME2, (short) 0)); writeByte(TDSType.DATETIME2N.byteValue()); writeByte((byte) (scale)); writeCryptoMetaData(); } void writeEncryptedRPCDateTimeOffset(String sName, GregorianCalendar utcCalendar, int minutesOffset, int subSecondNanos, int scale, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.BIGVARBINARY); if (null == utcCalendar) writeEncryptedRPCByteArray(null); else { assert 0 == utcCalendar.get(Calendar.ZONE_OFFSET); writeEncryptedRPCByteArray( writeEncryptedScaledTemporal(utcCalendar, subSecondNanos, scale, SSType.DATETIMEOFFSET, (short) minutesOffset)); } writeByte(TDSType.DATETIMEOFFSETN.byteValue()); writeByte((byte) (scale)); writeCryptoMetaData(); } void writeRPCDateTime2(String sName, GregorianCalendar localCalendar, int subSecondNanos, int scale, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.DATETIME2N); writeByte((byte) scale); if (null == localCalendar) { writeByte((byte) 0); return; } writeByte((byte) TDS.datetime2ValueLength(scale)); writeScaledTemporal(localCalendar, subSecondNanos, scale, SSType.DATETIME2); } void writeRPCDateTimeOffset(String sName, GregorianCalendar utcCalendar, int minutesOffset, int subSecondNanos, int scale, boolean bOut) throws SQLServerException { writeRPCNameValType(sName, bOut, TDSType.DATETIMEOFFSETN); writeByte((byte) scale); if (null == utcCalendar) { writeByte((byte) 0); return; } assert 0 == utcCalendar.get(Calendar.ZONE_OFFSET); writeByte((byte) TDS.datetimeoffsetValueLength(scale)); writeScaledTemporal(utcCalendar, subSecondNanos, scale, SSType.DATETIMEOFFSET); writeShort((short) minutesOffset); } void writeRPCSQLVariant(String sName, String value, boolean bOut) throws SQLServerException { writeRPCStringUnicode(value); } /** * Returns subSecondNanos rounded to the maximum precision supported. The maximum fractional scale is MAX_FRACTIONAL_SECONDS_SCALE(7). Eg1: if you * pass 456,790,123 the function would return 456,790,100 Eg2: if you pass 456,790,150 the function would return 456,790,200 Eg3: if you pass * 999,999,951 the function would return 1,000,000,000 This is done to ensure that we have consistent rounding behaviour in setters and getters. * Bug #507919 */ private int getRoundedSubSecondNanos(int subSecondNanos) { int roundedNanos = ((subSecondNanos + (Nanos.PER_MAX_SCALE_INTERVAL / 2)) / Nanos.PER_MAX_SCALE_INTERVAL) * Nanos.PER_MAX_SCALE_INTERVAL; return roundedNanos; } /** * Writes to the TDS channel a temporal value as an instance instance of one of the scaled temporal SQL types: DATE, TIME, DATETIME2, or * DATETIMEOFFSET. * * @param cal * Calendar representing the value to write, except for any sub-second nanoseconds * @param subSecondNanos * the sub-second nanoseconds (0 - 999,999,999) * @param scale * the scale (in digits: 0 - 7) to use for the sub-second nanos component * @param ssType * the SQL Server data type (DATE, TIME, DATETIME2, or DATETIMEOFFSET) * * @throws SQLServerException * if an I/O error occurs or if the value is not in the valid range */ private void writeScaledTemporal(GregorianCalendar cal, int subSecondNanos, int scale, SSType ssType) throws SQLServerException { assert con.isKatmaiOrLater(); assert SSType.DATE == ssType || SSType.TIME == ssType || SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType : "Unexpected SSType: " + ssType; // First, for types with a time component, write the scaled nanos since midnight if (SSType.TIME == ssType || SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType) { assert subSecondNanos >= 0; assert subSecondNanos < Nanos.PER_SECOND; assert scale >= 0; assert scale <= TDS.MAX_FRACTIONAL_SECONDS_SCALE; int secondsSinceMidnight = cal.get(Calendar.SECOND) + 60 * cal.get(Calendar.MINUTE) + 60 * 60 * cal.get(Calendar.HOUR_OF_DAY); // Scale nanos since midnight to the desired scale, rounding the value as necessary long divisor = Nanos.PER_MAX_SCALE_INTERVAL * (long) Math.pow(10, TDS.MAX_FRACTIONAL_SECONDS_SCALE - scale); // The scaledNanos variable represents the fractional seconds of the value at the scale // indicated by the scale variable. So, for example, scaledNanos = 3 means 300 nanoseconds // at scale TDS.MAX_FRACTIONAL_SECONDS_SCALE, but 3000 nanoseconds at // TDS.MAX_FRACTIONAL_SECONDS_SCALE - 1 long scaledNanos = ((long) Nanos.PER_SECOND * secondsSinceMidnight + getRoundedSubSecondNanos(subSecondNanos) + divisor / 2) / divisor; // SQL Server rounding behavior indicates that it always rounds up unless // we are at the max value of the type(NOT every day), in which case it truncates. // If rounding nanos to the specified scale rolls the value to the next day ... if (Nanos.PER_DAY / divisor == scaledNanos) { // If the type is time, always truncate if (SSType.TIME == ssType) { --scaledNanos; } // If the type is datetime2 or datetimeoffset, truncate only if its the max value supported else { assert SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType : "Unexpected SSType: " + ssType; // ... then bump the date, provided that the resulting date is still within // the valid date range. // Extreme edge case (literally, the VERY edge...): // If nanos overflow rolls the date value out of range (that is, we have a value // a few nanoseconds later than 9999-12-31 23:59:59) then truncate the nanos // instead of rolling. // This case is very likely never hit by "real world" applications, but exists // here as a security measure to ensure that such values don't result in a // connection-closing TDS exception. cal.add(Calendar.SECOND, 1); if (cal.get(Calendar.YEAR) <= 9999) { scaledNanos = 0; } else { cal.add(Calendar.SECOND, -1); --scaledNanos; } } } // Encode the scaled nanos to TDS int encodedLength = TDS.nanosSinceMidnightLength(scale); byte[] encodedBytes = scaledNanosToEncodedBytes(scaledNanos, encodedLength); writeBytes(encodedBytes); } // Second, for types with a date component, write the days into the Common Era if (SSType.DATE == ssType || SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType) { // Computation of the number of days into the Common Era assumes that // the DAY_OF_YEAR field reflects a pure Gregorian calendar - one that // uses Gregorian leap year rules across the entire range of dates. // For the DAY_OF_YEAR field to accurately reflect pure Gregorian behavior, // we need to use a pure Gregorian calendar for dates that are Julian dates // under a standard Gregorian calendar and for (Gregorian) dates later than // the cutover date in the cutover year. if (cal.getTimeInMillis() < GregorianChange.STANDARD_CHANGE_DATE.getTime() || cal.getActualMaximum(Calendar.DAY_OF_YEAR) < TDS.DAYS_PER_YEAR) { int year = cal.get(Calendar.YEAR); int month = cal.get(Calendar.MONTH); int date = cal.get(Calendar.DATE); // Set the cutover as early as possible (pure Gregorian behavior) cal.setGregorianChange(GregorianChange.PURE_CHANGE_DATE); // Initialize the date field by field (preserving the "wall calendar" value) cal.set(year, month, date); } int daysIntoCE = DDC.daysSinceBaseDate(cal.get(Calendar.YEAR), cal.get(Calendar.DAY_OF_YEAR), 1); // Last-ditch verification that the value is in the valid range for the // DATE/DATETIME2/DATETIMEOFFSET TDS data type (1/1/0001 to 12/31/9999). // If it's not, then throw an exception now so that statement execution // is safely canceled. Attempting to put an invalid value on the wire // would result in a TDS exception, which would close the connection. if (daysIntoCE < 0 || daysIntoCE >= DDC.daysSinceBaseDate(10000, 1, 1)) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {ssType}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_DATETIME_FIELD_OVERFLOW, DriverError.NOT_SET, null); } byte encodedBytes[] = new byte[3]; encodedBytes[0] = (byte) ((daysIntoCE >> 0) & 0xFF); encodedBytes[1] = (byte) ((daysIntoCE >> 8) & 0xFF); encodedBytes[2] = (byte) ((daysIntoCE >> 16) & 0xFF); writeBytes(encodedBytes); } } /** * Writes to the TDS channel a temporal value as an instance instance of one of the scaled temporal SQL types: DATE, TIME, DATETIME2, or * DATETIMEOFFSET. * * @param cal * Calendar representing the value to write, except for any sub-second nanoseconds * @param subSecondNanos * the sub-second nanoseconds (0 - 999,999,999) * @param scale * the scale (in digits: 0 - 7) to use for the sub-second nanos component * @param ssType * the SQL Server data type (DATE, TIME, DATETIME2, or DATETIMEOFFSET) * @param minutesOffset * the offset value for DATETIMEOFFSET * @throws SQLServerException * if an I/O error occurs or if the value is not in the valid range */ byte[] writeEncryptedScaledTemporal(GregorianCalendar cal, int subSecondNanos, int scale, SSType ssType, short minutesOffset) throws SQLServerException { assert con.isKatmaiOrLater(); assert SSType.DATE == ssType || SSType.TIME == ssType || SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType : "Unexpected SSType: " + ssType; // store the time and minutesOffset portion of DATETIME2 and DATETIMEOFFSET to be used with date portion byte encodedBytesForEncryption[] = null; int secondsSinceMidnight = 0; long divisor = 0; long scaledNanos = 0; // First, for types with a time component, write the scaled nanos since midnight if (SSType.TIME == ssType || SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType) { assert subSecondNanos >= 0; assert subSecondNanos < Nanos.PER_SECOND; assert scale >= 0; assert scale <= TDS.MAX_FRACTIONAL_SECONDS_SCALE; secondsSinceMidnight = cal.get(Calendar.SECOND) + 60 * cal.get(Calendar.MINUTE) + 60 * 60 * cal.get(Calendar.HOUR_OF_DAY); // Scale nanos since midnight to the desired scale, rounding the value as necessary divisor = Nanos.PER_MAX_SCALE_INTERVAL * (long) Math.pow(10, TDS.MAX_FRACTIONAL_SECONDS_SCALE - scale); // The scaledNanos variable represents the fractional seconds of the value at the scale // indicated by the scale variable. So, for example, scaledNanos = 3 means 300 nanoseconds // at scale TDS.MAX_FRACTIONAL_SECONDS_SCALE, but 3000 nanoseconds at // TDS.MAX_FRACTIONAL_SECONDS_SCALE - 1 scaledNanos = (((long) Nanos.PER_SECOND * secondsSinceMidnight + getRoundedSubSecondNanos(subSecondNanos) + divisor / 2) / divisor) * divisor / 100; // for encrypted time value, SQL server cannot do rounding or casting, // So, driver needs to cast it before encryption. if (SSType.TIME == ssType && 864000000000L <= scaledNanos) { scaledNanos = (((long) Nanos.PER_SECOND * secondsSinceMidnight + getRoundedSubSecondNanos(subSecondNanos)) / divisor) * divisor / 100; } // SQL Server rounding behavior indicates that it always rounds up unless // we are at the max value of the type(NOT every day), in which case it truncates. // If rounding nanos to the specified scale rolls the value to the next day ... if (Nanos.PER_DAY / divisor == scaledNanos) { // If the type is time, always truncate if (SSType.TIME == ssType) { --scaledNanos; } // If the type is datetime2 or datetimeoffset, truncate only if its the max value supported else { assert SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType : "Unexpected SSType: " + ssType; // ... then bump the date, provided that the resulting date is still within // the valid date range. // Extreme edge case (literally, the VERY edge...): // If nanos overflow rolls the date value out of range (that is, we have a value // a few nanoseconds later than 9999-12-31 23:59:59) then truncate the nanos // instead of rolling. // This case is very likely never hit by "real world" applications, but exists // here as a security measure to ensure that such values don't result in a // connection-closing TDS exception. cal.add(Calendar.SECOND, 1); if (cal.get(Calendar.YEAR) <= 9999) { scaledNanos = 0; } else { cal.add(Calendar.SECOND, -1); --scaledNanos; } } } // Encode the scaled nanos to TDS int encodedLength = TDS.nanosSinceMidnightLength(TDS.MAX_FRACTIONAL_SECONDS_SCALE); byte[] encodedBytes = scaledNanosToEncodedBytes(scaledNanos, encodedLength); if (SSType.TIME == ssType) { byte[] cipherText = SQLServerSecurityUtility.encryptWithKey(encodedBytes, cryptoMeta, con); return cipherText; } else if (SSType.DATETIME2 == ssType) { // for DATETIME2 sends both date and time part together for encryption encodedBytesForEncryption = new byte[encodedLength + 3]; System.arraycopy(encodedBytes, 0, encodedBytesForEncryption, 0, encodedBytes.length); } else if (SSType.DATETIMEOFFSET == ssType) { // for DATETIMEOFFSET sends date, time and offset part together for encryption encodedBytesForEncryption = new byte[encodedLength + 5]; System.arraycopy(encodedBytes, 0, encodedBytesForEncryption, 0, encodedBytes.length); } } // Second, for types with a date component, write the days into the Common Era if (SSType.DATE == ssType || SSType.DATETIME2 == ssType || SSType.DATETIMEOFFSET == ssType) { // Computation of the number of days into the Common Era assumes that // the DAY_OF_YEAR field reflects a pure Gregorian calendar - one that // uses Gregorian leap year rules across the entire range of dates. // For the DAY_OF_YEAR field to accurately reflect pure Gregorian behavior, // we need to use a pure Gregorian calendar for dates that are Julian dates // under a standard Gregorian calendar and for (Gregorian) dates later than // the cutover date in the cutover year. if (cal.getTimeInMillis() < GregorianChange.STANDARD_CHANGE_DATE.getTime() || cal.getActualMaximum(Calendar.DAY_OF_YEAR) < TDS.DAYS_PER_YEAR) { int year = cal.get(Calendar.YEAR); int month = cal.get(Calendar.MONTH); int date = cal.get(Calendar.DATE); // Set the cutover as early as possible (pure Gregorian behavior) cal.setGregorianChange(GregorianChange.PURE_CHANGE_DATE); // Initialize the date field by field (preserving the "wall calendar" value) cal.set(year, month, date); } int daysIntoCE = DDC.daysSinceBaseDate(cal.get(Calendar.YEAR), cal.get(Calendar.DAY_OF_YEAR), 1); // Last-ditch verification that the value is in the valid range for the // DATE/DATETIME2/DATETIMEOFFSET TDS data type (1/1/0001 to 12/31/9999). // If it's not, then throw an exception now so that statement execution // is safely canceled. Attempting to put an invalid value on the wire // would result in a TDS exception, which would close the connection. if (daysIntoCE < 0 || daysIntoCE >= DDC.daysSinceBaseDate(10000, 1, 1)) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_valueOutOfRange")); Object[] msgArgs = {ssType}; throw new SQLServerException(form.format(msgArgs), SQLState.DATA_EXCEPTION_DATETIME_FIELD_OVERFLOW, DriverError.NOT_SET, null); } byte encodedBytes[] = new byte[3]; encodedBytes[0] = (byte) ((daysIntoCE >> 0) & 0xFF); encodedBytes[1] = (byte) ((daysIntoCE >> 8) & 0xFF); encodedBytes[2] = (byte) ((daysIntoCE >> 16) & 0xFF); byte[] cipherText; if (SSType.DATE == ssType) { cipherText = SQLServerSecurityUtility.encryptWithKey(encodedBytes, cryptoMeta, con); } else if (SSType.DATETIME2 == ssType) { // for Max value, does not round up, do casting instead. if (3652058 == daysIntoCE) { // 9999-12-31 if (864000000000L == scaledNanos) { // 24:00:00 in nanoseconds // does not round up scaledNanos = (((long) Nanos.PER_SECOND * secondsSinceMidnight + getRoundedSubSecondNanos(subSecondNanos)) / divisor) * divisor / 100; int encodedLength = TDS.nanosSinceMidnightLength(TDS.MAX_FRACTIONAL_SECONDS_SCALE); byte[] encodedNanoBytes = scaledNanosToEncodedBytes(scaledNanos, encodedLength); // for DATETIME2 sends both date and time part together for encryption encodedBytesForEncryption = new byte[encodedLength + 3]; System.arraycopy(encodedNanoBytes, 0, encodedBytesForEncryption, 0, encodedNanoBytes.length); } } // Copy the 3 byte date value System.arraycopy(encodedBytes, 0, encodedBytesForEncryption, (encodedBytesForEncryption.length - 3), 3); cipherText = SQLServerSecurityUtility.encryptWithKey(encodedBytesForEncryption, cryptoMeta, con); } else { // for Max value, does not round up, do casting instead. if (3652058 == daysIntoCE) { // 9999-12-31 if (864000000000L == scaledNanos) { // 24:00:00 in nanoseconds // does not round up scaledNanos = (((long) Nanos.PER_SECOND * secondsSinceMidnight + getRoundedSubSecondNanos(subSecondNanos)) / divisor) * divisor / 100; int encodedLength = TDS.nanosSinceMidnightLength(TDS.MAX_FRACTIONAL_SECONDS_SCALE); byte[] encodedNanoBytes = scaledNanosToEncodedBytes(scaledNanos, encodedLength); // for DATETIMEOFFSET sends date, time and offset part together for encryption encodedBytesForEncryption = new byte[encodedLength + 5]; System.arraycopy(encodedNanoBytes, 0, encodedBytesForEncryption, 0, encodedNanoBytes.length); } } // Copy the 3 byte date value System.arraycopy(encodedBytes, 0, encodedBytesForEncryption, (encodedBytesForEncryption.length - 5), 3); // Copy the 2 byte minutesOffset value System.arraycopy(ByteBuffer.allocate(Short.SIZE / Byte.SIZE).order(ByteOrder.LITTLE_ENDIAN).putShort(minutesOffset).array(), 0, encodedBytesForEncryption, (encodedBytesForEncryption.length - 2), 2); cipherText = SQLServerSecurityUtility.encryptWithKey(encodedBytesForEncryption, cryptoMeta, con); } return cipherText; } // Invalid type ssType. This condition should never happen. MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_unknownSSType")); Object[] msgArgs = {ssType}; SQLServerException.makeFromDriverError(null, null, form.format(msgArgs), null, true); return null; } private byte[] scaledNanosToEncodedBytes(long scaledNanos, int encodedLength) { byte encodedBytes[] = new byte[encodedLength]; for (int i = 0; i < encodedLength; i++) encodedBytes[i] = (byte) ((scaledNanos >> (8 * i)) & 0xFF); return encodedBytes; } /** * Append the data in a stream in RPC transmission format. * * @param sName * the optional parameter name * @param stream * is the stream * @param streamLength * length of the stream (may be unknown) * @param bOut * boolean true if the data value is being registered as an ouput parameter * @param jdbcType * The JDBC type used to determine whether the value is textual or non-textual. * @param collation * The SQL collation associated with the value. Null for non-textual SQL Server types. * @throws SQLServerException */ void writeRPCInputStream(String sName, InputStream stream, long streamLength, boolean bOut, JDBCType jdbcType, SQLCollation collation) throws SQLServerException { assert null != stream; assert DataTypes.UNKNOWN_STREAM_LENGTH == streamLength || streamLength >= 0; // Send long values and values with unknown length // using PLP chunking on Yukon and later. boolean usePLP = (DataTypes.UNKNOWN_STREAM_LENGTH == streamLength || streamLength > DataTypes.SHORT_VARTYPE_MAX_BYTES); if (usePLP) { assert DataTypes.UNKNOWN_STREAM_LENGTH == streamLength || streamLength <= DataTypes.MAX_VARTYPE_MAX_BYTES; writeRPCNameValType(sName, bOut, jdbcType.isTextual() ? TDSType.BIGVARCHAR : TDSType.BIGVARBINARY); // Handle Yukon v*max type header here. writeVMaxHeader(streamLength, false, jdbcType.isTextual() ? collation : null); } // Send non-PLP in all other cases else { // If the length of the InputStream is unknown then we need to buffer the entire stream // in memory so that we can determine its length and send that length to the server // before the stream data itself. if (DataTypes.UNKNOWN_STREAM_LENGTH == streamLength) { // Create ByteArrayOutputStream with initial buffer size of 8K to handle typical // binary field sizes more efficiently. Note we can grow beyond 8000 bytes. ByteArrayOutputStream baos = new ByteArrayOutputStream(8000); streamLength = 0L; // Since Shiloh is limited to 64K TDS packets, that's a good upper bound on the maximum // length of InputStream we should try to handle before throwing an exception. long maxStreamLength = 65535L * con.getTDSPacketSize(); try { byte buff[] = new byte[8000]; int bytesRead; while (streamLength < maxStreamLength && -1 != (bytesRead = stream.read(buff, 0, buff.length))) { baos.write(buff); streamLength += bytesRead; } } catch (IOException e) { throw new SQLServerException(e.getMessage(), SQLState.DATA_EXCEPTION_NOT_SPECIFIC, DriverError.NOT_SET, e); } if (streamLength >= maxStreamLength) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_invalidLength")); Object[] msgArgs = {Long.valueOf(streamLength)}; SQLServerException.makeFromDriverError(null, null, form.format(msgArgs), "", true); } assert streamLength <= Integer.MAX_VALUE; stream = new ByteArrayInputStream(baos.toByteArray(), 0, (int) streamLength); } assert 0 <= streamLength && streamLength <= DataTypes.IMAGE_TEXT_MAX_BYTES; boolean useVarType = streamLength <= DataTypes.SHORT_VARTYPE_MAX_BYTES; writeRPCNameValType(sName, bOut, jdbcType.isTextual() ? (useVarType ? TDSType.BIGVARCHAR : TDSType.TEXT) : (useVarType ? TDSType.BIGVARBINARY : TDSType.IMAGE)); // Write maximum length, optional collation, and actual length if (useVarType) { writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); if (jdbcType.isTextual()) collation.writeCollation(this); writeShort((short) streamLength); } else { writeInt(DataTypes.IMAGE_TEXT_MAX_BYTES); if (jdbcType.isTextual()) collation.writeCollation(this); writeInt((int) streamLength); } } // Write the data writeStream(stream, streamLength, usePLP); } /** * Append the XML data in a stream in RPC transmission format. * * @param sName * the optional parameter name * @param stream * is the stream * @param streamLength * length of the stream (may be unknown) * @param bOut * boolean true if the data value is being registered as an ouput parameter * @throws SQLServerException */ void writeRPCXML(String sName, InputStream stream, long streamLength, boolean bOut) throws SQLServerException { assert DataTypes.UNKNOWN_STREAM_LENGTH == streamLength || streamLength >= 0; assert DataTypes.UNKNOWN_STREAM_LENGTH == streamLength || streamLength <= DataTypes.MAX_VARTYPE_MAX_BYTES; writeRPCNameValType(sName, bOut, TDSType.XML); writeByte((byte) 0); // No schema // Handle null here and return, we're done here if it's null. if (null == stream) { // Null header for v*max types is 0xFFFFFFFFFFFFFFFF. writeLong(0xFFFFFFFFFFFFFFFFL); } else if (DataTypes.UNKNOWN_STREAM_LENGTH == streamLength) { // Append v*max length. // UNKNOWN_PLP_LEN is 0xFFFFFFFFFFFFFFFE writeLong(0xFFFFFFFFFFFFFFFEL); // NOTE: Don't send the first chunk length, this will be calculated by caller. } else { // For v*max types with known length, length is <totallength8><chunklength4> // We're sending same total length as chunk length (as we're sending 1 chunk). writeLong(streamLength); } if (null != stream) // Write the data writeStream(stream, streamLength, true); } /** * Append the data in a character reader in RPC transmission format. * * @param sName * the optional parameter name * @param re * the reader * @param reLength * the reader data length (in characters) * @param bOut * boolean true if the data value is being registered as an ouput parameter * @param collation * The SQL collation associated with the value. Null for non-textual SQL Server types. * @throws SQLServerException */ void writeRPCReaderUnicode(String sName, Reader re, long reLength, boolean bOut, SQLCollation collation) throws SQLServerException { assert null != re; assert DataTypes.UNKNOWN_STREAM_LENGTH == reLength || reLength >= 0; // Textual RPC requires a collation. If none is provided, as is the case when // the SSType is non-textual, then use the database collation by default. if (null == collation) collation = con.getDatabaseCollation(); // Send long values and values with unknown length // using PLP chunking on Yukon and later. boolean usePLP = (DataTypes.UNKNOWN_STREAM_LENGTH == reLength || reLength > DataTypes.SHORT_VARTYPE_MAX_CHARS); if (usePLP) { assert DataTypes.UNKNOWN_STREAM_LENGTH == reLength || reLength <= DataTypes.MAX_VARTYPE_MAX_CHARS; writeRPCNameValType(sName, bOut, TDSType.NVARCHAR); // Handle Yukon v*max type header here. writeVMaxHeader((DataTypes.UNKNOWN_STREAM_LENGTH == reLength) ? DataTypes.UNKNOWN_STREAM_LENGTH : 2 * reLength, // Length (in bytes) false, collation); } // Send non-PLP in all other cases else { // Length must be known if we're not sending PLP-chunked data. Yukon is handled above. // For Shiloh, this is enforced in DTV by converting the Reader to some other length- // prefixed value in the setter. assert 0 <= reLength && reLength <= DataTypes.NTEXT_MAX_CHARS; // For non-PLP types, use the long TEXT type rather than the short VARCHAR // type if the stream is too long to fit in the latter or if we don't know the length up // front so we have to assume that it might be too long. boolean useVarType = reLength <= DataTypes.SHORT_VARTYPE_MAX_CHARS; writeRPCNameValType(sName, bOut, useVarType ? TDSType.NVARCHAR : TDSType.NTEXT); // Write maximum length, collation, and actual length of the data if (useVarType) { writeShort((short) DataTypes.SHORT_VARTYPE_MAX_BYTES); collation.writeCollation(this); writeShort((short) (2 * reLength)); } else { writeInt(DataTypes.NTEXT_MAX_CHARS); collation.writeCollation(this); writeInt((int) (2 * reLength)); } } // Write the data writeReader(re, reLength, usePLP); } } /** * TDSPacket provides a mechanism for chaining TDS response packets together in a singly-linked list. * * Having both the link and the data in the same class allows TDSReader marks (see below) to automatically hold onto exactly as much response data as * they need, and no more. Java reference semantics ensure that a mark holds onto its referenced packet and subsequent packets (through next * references). When all marked references to a packet go away, the packet, and any linked unmarked packets, can be reclaimed by GC. */ final class TDSPacket { final byte[] header = new byte[TDS.PACKET_HEADER_SIZE]; final byte[] payload; int payloadLength; volatile TDSPacket next; final public String toString() { return "TDSPacket(SPID:" + Util.readUnsignedShortBigEndian(header, TDS.PACKET_HEADER_SPID) + " Seq:" + header[TDS.PACKET_HEADER_SEQUENCE_NUM] + ")"; } TDSPacket(int size) { payload = new byte[size]; payloadLength = 0; next = null; } final boolean isEOM() { return TDS.STATUS_BIT_EOM == (header[TDS.PACKET_HEADER_MESSAGE_STATUS] & TDS.STATUS_BIT_EOM); } }; /** * TDSReaderMark encapsulates a fixed position in the response data stream. * * Response data is quantized into a linked chain of packets. A mark refers to a specific location in a specific packet and relies on Java's reference * semantics to automatically keep all subsequent packets accessible until the mark is destroyed. */ final class TDSReaderMark { final TDSPacket packet; final int payloadOffset; TDSReaderMark(TDSPacket packet, int payloadOffset) { this.packet = packet; this.payloadOffset = payloadOffset; } } /** * TDSReader encapsulates the TDS response data stream. * * Bytes are read from SQL Server into a FIFO of packets. Reader methods traverse the packets to access the data. */ final class TDSReader { private final static Logger logger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.TDS.Reader"); final private String traceID; final public String toString() { return traceID; } private final TDSChannel tdsChannel; private final SQLServerConnection con; private final TDSCommand command; final TDSCommand getCommand() { assert null != command; return command; } final SQLServerConnection getConnection() { return con; } private TDSPacket currentPacket = new TDSPacket(0); private TDSPacket lastPacket = currentPacket; private int payloadOffset = 0; private int packetNum = 0; private boolean isStreaming = true; private boolean useColumnEncryption = false; private boolean serverSupportsColumnEncryption = false; private final byte valueBytes[] = new byte[256]; private static final AtomicInteger lastReaderID = new AtomicInteger(0); private static int nextReaderID() { return lastReaderID.incrementAndGet(); } TDSReader(TDSChannel tdsChannel, SQLServerConnection con, TDSCommand command) { this.tdsChannel = tdsChannel; this.con = con; this.command = command; // may be null // if the logging level is not detailed than fine or more we will not have proper readerids. if (logger.isLoggable(Level.FINE)) traceID = "TDSReader@" + nextReaderID() + " (" + con.toString() + ")"; else traceID = con.toString(); if (con.isColumnEncryptionSettingEnabled()) { useColumnEncryption = true; } serverSupportsColumnEncryption = con.getServerSupportsColumnEncryption(); } final boolean isColumnEncryptionSettingEnabled() { return useColumnEncryption; } final boolean getServerSupportsColumnEncryption() { return serverSupportsColumnEncryption; } final void throwInvalidTDS() throws SQLServerException { if (logger.isLoggable(Level.SEVERE)) logger.severe(toString() + " got unexpected value in TDS response at offset:" + payloadOffset); con.throwInvalidTDS(); } final void throwInvalidTDSToken(String tokenName) throws SQLServerException { if (logger.isLoggable(Level.SEVERE)) logger.severe(toString() + " got unexpected value in TDS response at offset:" + payloadOffset); con.throwInvalidTDSToken(tokenName); } /** * Ensures that payload data is available to be read, automatically advancing to (and possibly reading) the next packet. * * @return true if additional data is available to be read false if no more data is available */ private boolean ensurePayload() throws SQLServerException { if (payloadOffset == currentPacket.payloadLength) if (!nextPacket()) return false; assert payloadOffset < currentPacket.payloadLength; return true; } /** * Advance (and possibly read) the next packet. * * @return true if additional data is available to be read false if no more data is available */ private boolean nextPacket() throws SQLServerException { assert null != currentPacket; // Shouldn't call this function unless we're at the end of the current packet... TDSPacket consumedPacket = currentPacket; assert payloadOffset == consumedPacket.payloadLength; // If no buffered packets are left then maybe we can read one... // This action must be synchronized against against another thread calling // readAllPackets() to read in ALL of the remaining packets of the current response. if (null == consumedPacket.next) { readPacket(); if (null == consumedPacket.next) return false; } // Advance to that packet. If we are streaming through the // response, then unlink the current packet from the next // before moving to allow the packet to be reclaimed. TDSPacket nextPacket = consumedPacket.next; if (isStreaming) { if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Moving to next packet -- unlinking consumed packet"); consumedPacket.next = null; } currentPacket = nextPacket; payloadOffset = 0; return true; } /** * Reads the next packet of the TDS channel. * * This method is synchronized to guard against simultaneously reading packets from one thread that is processing the response and another thread * that is trying to buffer it with TDSCommand.detach(). */ synchronized final boolean readPacket() throws SQLServerException { if (null != command && !command.readingResponse()) return false; // Number of packets in should always be less than number of packets out. // If the server has been notified for an interrupt, it may be less by // more than one packet. assert tdsChannel.numMsgsRcvd < tdsChannel.numMsgsSent : "numMsgsRcvd:" + tdsChannel.numMsgsRcvd + " should be less than numMsgsSent:" + tdsChannel.numMsgsSent; TDSPacket newPacket = new TDSPacket(con.getTDSPacketSize()); // First, read the packet header. for (int headerBytesRead = 0; headerBytesRead < TDS.PACKET_HEADER_SIZE;) { int bytesRead = tdsChannel.read(newPacket.header, headerBytesRead, TDS.PACKET_HEADER_SIZE - headerBytesRead); if (bytesRead < 0) { if (logger.isLoggable(Level.FINER)) logger.finer(toString() + " Premature EOS in response. packetNum:" + packetNum + " headerBytesRead:" + headerBytesRead); con.terminate(SQLServerException.DRIVER_ERROR_IO_FAILED, ((0 == packetNum && 0 == headerBytesRead) ? SQLServerException.getErrString("R_noServerResponse") : SQLServerException.getErrString("R_truncatedServerResponse"))); } headerBytesRead += bytesRead; } // Header size is a 2 byte unsigned short integer in big-endian order. int packetLength = Util.readUnsignedShortBigEndian(newPacket.header, TDS.PACKET_HEADER_MESSAGE_LENGTH); // Make header size is properly bounded and compute length of the packet payload. if (packetLength < TDS.PACKET_HEADER_SIZE || packetLength > con.getTDSPacketSize()) { if (logger.isLoggable(Level.WARNING)) { logger.warning( toString() + " TDS header contained invalid packet length:" + packetLength + "; packet size:" + con.getTDSPacketSize()); } throwInvalidTDS(); } newPacket.payloadLength = packetLength - TDS.PACKET_HEADER_SIZE; // Just grab the SPID for logging (another big-endian unsigned short). tdsChannel.setSPID(Util.readUnsignedShortBigEndian(newPacket.header, TDS.PACKET_HEADER_SPID)); // Packet header looks good enough. // When logging, copy the packet header to the log buffer. byte[] logBuffer = null; if (tdsChannel.isLoggingPackets()) { logBuffer = new byte[packetLength]; System.arraycopy(newPacket.header, 0, logBuffer, 0, TDS.PACKET_HEADER_SIZE); } // Now for the payload... for (int payloadBytesRead = 0; payloadBytesRead < newPacket.payloadLength;) { int bytesRead = tdsChannel.read(newPacket.payload, payloadBytesRead, newPacket.payloadLength - payloadBytesRead); if (bytesRead < 0) con.terminate(SQLServerException.DRIVER_ERROR_IO_FAILED, SQLServerException.getErrString("R_truncatedServerResponse")); payloadBytesRead += bytesRead; } ++packetNum; lastPacket.next = newPacket; lastPacket = newPacket; // When logging, append the payload to the log buffer and write out the whole thing. if (tdsChannel.isLoggingPackets()) { System.arraycopy(newPacket.payload, 0, logBuffer, TDS.PACKET_HEADER_SIZE, newPacket.payloadLength); tdsChannel.logPacket(logBuffer, 0, packetLength, this.toString() + " received Packet:" + packetNum + " (" + newPacket.payloadLength + " bytes)"); } // If end of message, then bump the count of messages received and disable // interrupts. If an interrupt happened prior to disabling, then expect // to read the attention ack packet as well. if (newPacket.isEOM()) { ++tdsChannel.numMsgsRcvd; // Notify the command (if any) that we've reached the end of the response. if (null != command) command.onResponseEOM(); } return true; } final TDSReaderMark mark() { TDSReaderMark mark = new TDSReaderMark(currentPacket, payloadOffset); isStreaming = false; if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Buffering from: " + mark.toString()); return mark; } final void reset(TDSReaderMark mark) { if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Resetting to: " + mark.toString()); currentPacket = mark.packet; payloadOffset = mark.payloadOffset; } final void stream() { isStreaming = true; } /** * Returns the number of bytes that can be read (or skipped over) from this TDSReader without blocking by the next caller of a method for this * TDSReader. * * @return the actual number of bytes available. */ final int available() { // The number of bytes that can be read without blocking is just the number // of bytes that are currently buffered. That is the number of bytes left // in the current packet plus the number of bytes in the remaining packets. int available = currentPacket.payloadLength - payloadOffset; for (TDSPacket packet = currentPacket.next; null != packet; packet = packet.next) available += packet.payloadLength; return available; } /** * * @return number of bytes available in the current packet */ final int availableCurrentPacket() { /* * The number of bytes that can be read from the current chunk, without including the next chunk that is buffered. This is so the driver can * confirm if the next chunk sent is new packet or just continuation */ int available = currentPacket.payloadLength - payloadOffset; return available; } final int peekTokenType() throws SQLServerException { // Check whether we're at EOF if (!ensurePayload()) return -1; // Peek at the current byte (don't increment payloadOffset!) return currentPacket.payload[payloadOffset] & 0xFF; } final short peekStatusFlag() throws SQLServerException { // skip the current packet(i.e, TDS packet type) and peek into the status flag (USHORT) if (payloadOffset + 3 <= currentPacket.payloadLength) { short value = Util.readShort(currentPacket.payload, payloadOffset + 1); return value; } return 0; } final int readUnsignedByte() throws SQLServerException { // Ensure that we have a packet to read from. if (!ensurePayload()) throwInvalidTDS(); return currentPacket.payload[payloadOffset++] & 0xFF; } final short readShort() throws SQLServerException { if (payloadOffset + 2 <= currentPacket.payloadLength) { short value = Util.readShort(currentPacket.payload, payloadOffset); payloadOffset += 2; return value; } return Util.readShort(readWrappedBytes(2), 0); } final int readUnsignedShort() throws SQLServerException { if (payloadOffset + 2 <= currentPacket.payloadLength) { int value = Util.readUnsignedShort(currentPacket.payload, payloadOffset); payloadOffset += 2; return value; } return Util.readUnsignedShort(readWrappedBytes(2), 0); } final String readUnicodeString(int length) throws SQLServerException { int byteLength = 2 * length; byte bytes[] = new byte[byteLength]; readBytes(bytes, 0, byteLength); return Util.readUnicodeString(bytes, 0, byteLength, con); } final char readChar() throws SQLServerException { return (char) readShort(); } final int readInt() throws SQLServerException { if (payloadOffset + 4 <= currentPacket.payloadLength) { int value = Util.readInt(currentPacket.payload, payloadOffset); payloadOffset += 4; return value; } return Util.readInt(readWrappedBytes(4), 0); } final int readIntBigEndian() throws SQLServerException { if (payloadOffset + 4 <= currentPacket.payloadLength) { int value = Util.readIntBigEndian(currentPacket.payload, payloadOffset); payloadOffset += 4; return value; } return Util.readIntBigEndian(readWrappedBytes(4), 0); } final long readUnsignedInt() throws SQLServerException { return readInt() & 0xFFFFFFFFL; } final long readLong() throws SQLServerException { if (payloadOffset + 8 <= currentPacket.payloadLength) { long value = Util.readLong(currentPacket.payload, payloadOffset); payloadOffset += 8; return value; } return Util.readLong(readWrappedBytes(8), 0); } final void readBytes(byte[] value, int valueOffset, int valueLength) throws SQLServerException { for (int bytesRead = 0; bytesRead < valueLength;) { // Ensure that we have a packet to read from. if (!ensurePayload()) throwInvalidTDS(); // Figure out how many bytes to copy from the current packet // (the lesser of the remaining value bytes and the bytes left in the packet). int bytesToCopy = valueLength - bytesRead; if (bytesToCopy > currentPacket.payloadLength - payloadOffset) bytesToCopy = currentPacket.payloadLength - payloadOffset; // Copy some bytes from the current packet to the destination value. if (logger.isLoggable(Level.FINEST)) logger.finest(toString() + " Reading " + bytesToCopy + " bytes from offset " + payloadOffset); System.arraycopy(currentPacket.payload, payloadOffset, value, valueOffset + bytesRead, bytesToCopy); bytesRead += bytesToCopy; payloadOffset += bytesToCopy; } } final byte[] readWrappedBytes(int valueLength) throws SQLServerException { assert valueLength <= valueBytes.length; readBytes(valueBytes, 0, valueLength); return valueBytes; } final Object readDecimal(int valueLength, TypeInfo typeInfo, JDBCType jdbcType, StreamType streamType) throws SQLServerException { if (valueLength > valueBytes.length) { if (logger.isLoggable(Level.WARNING)) { logger.warning(toString() + " Invalid value length:" + valueLength); } throwInvalidTDS(); } readBytes(valueBytes, 0, valueLength); return DDC.convertBigDecimalToObject(Util.readBigDecimal(valueBytes, valueLength, typeInfo.getScale()), jdbcType, streamType); } final Object readMoney(int valueLength, JDBCType jdbcType, StreamType streamType) throws SQLServerException { BigInteger bi; switch (valueLength) { case 8: // money { int intBitsHi = readInt(); int intBitsLo = readInt(); if (JDBCType.BINARY == jdbcType) { byte value[] = new byte[8]; Util.writeIntBigEndian(intBitsHi, value, 0); Util.writeIntBigEndian(intBitsLo, value, 4); return value; } bi = BigInteger.valueOf(((long) intBitsHi << 32) | (intBitsLo & 0xFFFFFFFFL)); break; } case 4: // smallmoney if (JDBCType.BINARY == jdbcType) { byte value[] = new byte[4]; Util.writeIntBigEndian(readInt(), value, 0); return value; } bi = BigInteger.valueOf(readInt()); break; default: throwInvalidTDS(); return null; } return DDC.convertBigDecimalToObject(new BigDecimal(bi, 4), jdbcType, streamType); } final Object readReal(int valueLength, JDBCType jdbcType, StreamType streamType) throws SQLServerException { if (4 != valueLength) throwInvalidTDS(); return DDC.convertFloatToObject(Float.intBitsToFloat(readInt()), jdbcType, streamType); } final Object readFloat(int valueLength, JDBCType jdbcType, StreamType streamType) throws SQLServerException { if (8 != valueLength) throwInvalidTDS(); return DDC.convertDoubleToObject(Double.longBitsToDouble(readLong()), jdbcType, streamType); } final Object readDateTime(int valueLength, Calendar appTimeZoneCalendar, JDBCType jdbcType, StreamType streamType) throws SQLServerException { // Build and return the right kind of temporal object. int daysSinceSQLBaseDate; int ticksSinceMidnight; int msecSinceMidnight; switch (valueLength) { case 8: // SQL datetime is 4 bytes for days since SQL Base Date // (January 1, 1900 00:00:00 GMT) and 4 bytes for // the number of three hundredths (1/300) of a second // since midnight. daysSinceSQLBaseDate = readInt(); ticksSinceMidnight = readInt(); if (JDBCType.BINARY == jdbcType) { byte value[] = new byte[8]; Util.writeIntBigEndian(daysSinceSQLBaseDate, value, 0); Util.writeIntBigEndian(ticksSinceMidnight, value, 4); return value; } msecSinceMidnight = (ticksSinceMidnight * 10 + 1) / 3; // Convert to msec (1 tick = 1 300th of a sec = 3 msec) break; case 4: // SQL smalldatetime has less precision. It stores 2 bytes // for the days since SQL Base Date and 2 bytes for minutes // after midnight. daysSinceSQLBaseDate = readUnsignedShort(); ticksSinceMidnight = readUnsignedShort(); if (JDBCType.BINARY == jdbcType) { byte value[] = new byte[4]; Util.writeShortBigEndian((short) daysSinceSQLBaseDate, value, 0); Util.writeShortBigEndian((short) ticksSinceMidnight, value, 2); return value; } msecSinceMidnight = ticksSinceMidnight * 60 * 1000; // Convert to msec (1 tick = 1 min = 60,000 msec) break; default: throwInvalidTDS(); return null; } // Convert the DATETIME/SMALLDATETIME value to the desired Java type. return DDC.convertTemporalToObject(jdbcType, SSType.DATETIME, appTimeZoneCalendar, daysSinceSQLBaseDate, msecSinceMidnight, 0); // scale // (ignored // for // fixed-scale // DATETIME/SMALLDATETIME // types) } final Object readDate(int valueLength, Calendar appTimeZoneCalendar, JDBCType jdbcType) throws SQLServerException { if (TDS.DAYS_INTO_CE_LENGTH != valueLength) throwInvalidTDS(); // Initialize the date fields to their appropriate values. int localDaysIntoCE = readDaysIntoCE(); // Convert the DATE value to the desired Java type. return DDC.convertTemporalToObject(jdbcType, SSType.DATE, appTimeZoneCalendar, localDaysIntoCE, 0, // midnight local to app time zone 0); // scale (ignored for DATE) } final Object readTime(int valueLength, TypeInfo typeInfo, Calendar appTimeZoneCalendar, JDBCType jdbcType) throws SQLServerException { if (TDS.timeValueLength(typeInfo.getScale()) != valueLength) throwInvalidTDS(); // Read the value from the server long localNanosSinceMidnight = readNanosSinceMidnight(typeInfo.getScale()); // Convert the TIME value to the desired Java type. return DDC.convertTemporalToObject(jdbcType, SSType.TIME, appTimeZoneCalendar, 0, localNanosSinceMidnight, typeInfo.getScale()); } final Object readDateTime2(int valueLength, TypeInfo typeInfo, Calendar appTimeZoneCalendar, JDBCType jdbcType) throws SQLServerException { if (TDS.datetime2ValueLength(typeInfo.getScale()) != valueLength) throwInvalidTDS(); // Read the value's constituent components long localNanosSinceMidnight = readNanosSinceMidnight(typeInfo.getScale()); int localDaysIntoCE = readDaysIntoCE(); // Convert the DATETIME2 value to the desired Java type. return DDC.convertTemporalToObject(jdbcType, SSType.DATETIME2, appTimeZoneCalendar, localDaysIntoCE, localNanosSinceMidnight, typeInfo.getScale()); } final Object readDateTimeOffset(int valueLength, TypeInfo typeInfo, JDBCType jdbcType) throws SQLServerException { if (TDS.datetimeoffsetValueLength(typeInfo.getScale()) != valueLength) throwInvalidTDS(); // The nanos since midnight and days into Common Era parts of DATETIMEOFFSET values // are in UTC. Use the minutes offset part to convert to local. long utcNanosSinceMidnight = readNanosSinceMidnight(typeInfo.getScale()); int utcDaysIntoCE = readDaysIntoCE(); int localMinutesOffset = readShort(); // Convert the DATETIMEOFFSET value to the desired Java type. return DDC.convertTemporalToObject(jdbcType, SSType.DATETIMEOFFSET, new GregorianCalendar(new SimpleTimeZone(localMinutesOffset * 60 * 1000, ""), Locale.US), utcDaysIntoCE, utcNanosSinceMidnight, typeInfo.getScale()); } private int readDaysIntoCE() throws SQLServerException { byte value[] = new byte[TDS.DAYS_INTO_CE_LENGTH]; readBytes(value, 0, value.length); int daysIntoCE = 0; for (int i = 0; i < value.length; i++) daysIntoCE |= ((value[i] & 0xFF) << (8 * i)); // Theoretically should never encounter a value that is outside of the valid date range if (daysIntoCE < 0) throwInvalidTDS(); return daysIntoCE; } // Scale multipliers used to convert variable-scaled temporal values to a fixed 100ns scale. // Using this array is measurably faster than using Math.pow(10, ...) private final static int[] SCALED_MULTIPLIERS = {10000000, 1000000, 100000, 10000, 1000, 100, 10, 1}; private long readNanosSinceMidnight(int scale) throws SQLServerException { assert 0 <= scale && scale <= TDS.MAX_FRACTIONAL_SECONDS_SCALE; byte value[] = new byte[TDS.nanosSinceMidnightLength(scale)]; readBytes(value, 0, value.length); long hundredNanosSinceMidnight = 0; for (int i = 0; i < value.length; i++) hundredNanosSinceMidnight |= (value[i] & 0xFFL) << (8 * i); hundredNanosSinceMidnight *= SCALED_MULTIPLIERS[scale]; if (!(0 <= hundredNanosSinceMidnight && hundredNanosSinceMidnight < Nanos.PER_DAY / 100)) throwInvalidTDS(); return 100 * hundredNanosSinceMidnight; } final static String guidTemplate = "NNNNNNNN-NNNN-NNNN-NNNN-NNNNNNNNNNNN"; final Object readGUID(int valueLength, JDBCType jdbcType, StreamType streamType) throws SQLServerException { // GUIDs must be exactly 16 bytes if (16 != valueLength) throwInvalidTDS(); // Read in the GUID's binary value byte guid[] = new byte[16]; readBytes(guid, 0, 16); switch (jdbcType) { case CHAR: case VARCHAR: case LONGVARCHAR: case GUID: { StringBuilder sb = new StringBuilder(guidTemplate.length()); for (int i = 0; i < 4; i++) { sb.append(Util.hexChars[(guid[3 - i] & 0xF0) >> 4]); sb.append(Util.hexChars[guid[3 - i] & 0x0F]); } sb.append('-'); for (int i = 0; i < 2; i++) { sb.append(Util.hexChars[(guid[5 - i] & 0xF0) >> 4]); sb.append(Util.hexChars[guid[5 - i] & 0x0F]); } sb.append('-'); for (int i = 0; i < 2; i++) { sb.append(Util.hexChars[(guid[7 - i] & 0xF0) >> 4]); sb.append(Util.hexChars[guid[7 - i] & 0x0F]); } sb.append('-'); for (int i = 0; i < 2; i++) { sb.append(Util.hexChars[(guid[8 + i] & 0xF0) >> 4]); sb.append(Util.hexChars[guid[8 + i] & 0x0F]); } sb.append('-'); for (int i = 0; i < 6; i++) { sb.append(Util.hexChars[(guid[10 + i] & 0xF0) >> 4]); sb.append(Util.hexChars[guid[10 + i] & 0x0F]); } try { return DDC.convertStringToObject(sb.toString(), Encoding.UNICODE.charset(), jdbcType, streamType); } catch (UnsupportedEncodingException e) { MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_errorConvertingValue")); throw new SQLServerException(form.format(new Object[] {"UNIQUEIDENTIFIER", jdbcType}), null, 0, e); } } default: { if (StreamType.BINARY == streamType || StreamType.ASCII == streamType) return new ByteArrayInputStream(guid); return guid; } } } /** * Reads a multi-part table name from TDS and returns it as an array of Strings. */ final SQLIdentifier readSQLIdentifier() throws SQLServerException { // Multi-part names should have between 1 and 4 parts int numParts = readUnsignedByte(); if (!(1 <= numParts && numParts <= 4)) throwInvalidTDS(); // Each part is a length-prefixed Unicode string String[] nameParts = new String[numParts]; for (int i = 0; i < numParts; i++) nameParts[i] = readUnicodeString(readUnsignedShort()); // Build the identifier from the name parts SQLIdentifier identifier = new SQLIdentifier(); identifier.setObjectName(nameParts[numParts - 1]); if (numParts >= 2) identifier.setSchemaName(nameParts[numParts - 2]); if (numParts >= 3) identifier.setDatabaseName(nameParts[numParts - 3]); if (4 == numParts) identifier.setServerName(nameParts[numParts - 4]); return identifier; } final SQLCollation readCollation() throws SQLServerException { SQLCollation collation = null; try { collation = new SQLCollation(this); } catch (UnsupportedEncodingException e) { con.terminate(SQLServerException.DRIVER_ERROR_INVALID_TDS, e.getMessage(), e); // not reached } return collation; } final void skip(int bytesToSkip) throws SQLServerException { assert bytesToSkip >= 0; while (bytesToSkip > 0) { // Ensure that we have a packet to read from. if (!ensurePayload()) throwInvalidTDS(); int bytesSkipped = bytesToSkip; if (bytesSkipped > currentPacket.payloadLength - payloadOffset) bytesSkipped = currentPacket.payloadLength - payloadOffset; bytesToSkip -= bytesSkipped; payloadOffset += bytesSkipped; } } final void TryProcessFeatureExtAck(boolean featureExtAckReceived) throws SQLServerException { // in case of redirection, do not check if TDS_FEATURE_EXTENSION_ACK is received or not. if (null != this.con.getRoutingInfo()) { return; } if (isColumnEncryptionSettingEnabled() && !featureExtAckReceived) throw new SQLServerException(this, SQLServerException.getErrString("R_AE_NotSupportedByServer"), null, 0, false); } } /** * Timer for use with Commands that support a timeout. * * Once started, the timer runs for the prescribed number of seconds unless stopped. If the timer runs out, it interrupts its associated Command with * a reason like "timed out". */ final class TimeoutTimer implements Runnable { private static final String threadGroupName = "mssql-jdbc-TimeoutTimer"; private final int timeoutSeconds; private final TDSCommand command; private volatile Future<?> task; private static final ExecutorService executor = Executors.newCachedThreadPool(new ThreadFactory() { private final ThreadGroup tg = new ThreadGroup(threadGroupName); private final String threadNamePrefix = tg.getName() + "-"; private final AtomicInteger threadNumber = new AtomicInteger(0); @Override public Thread newThread(Runnable r) { Thread t = new Thread(tg, r, threadNamePrefix + threadNumber.incrementAndGet()); t.setDaemon(true); return t; } }); private volatile boolean canceled = false; TimeoutTimer(int timeoutSeconds, TDSCommand command) { assert timeoutSeconds > 0; assert null != command; this.timeoutSeconds = timeoutSeconds; this.command = command; } final void start() { task = executor.submit(this); } final void stop() { task.cancel(true); canceled = true; } public void run() { int secondsRemaining = timeoutSeconds; try { // Poll every second while time is left on the timer. // Return if/when the timer is canceled. do { if (canceled) return; Thread.sleep(1000); } while (--secondsRemaining > 0); } catch (InterruptedException e) { // re-interrupt the current thread, in order to restore the thread's interrupt status. Thread.currentThread().interrupt(); return; } // If the timer wasn't canceled before it ran out of // time then interrupt the registered command. try { command.interrupt(SQLServerException.getErrString("R_queryTimedOut")); } catch (SQLServerException e) { // Unfortunately, there's nothing we can do if we // fail to time out the request. There is no way // to report back what happened. command.log(Level.FINE, "Command could not be timed out. Reason: " + e.getMessage()); } } } /** * TDSCommand encapsulates an interruptable TDS conversation. * * A conversation may consist of one or more TDS request and response messages. A command may be interrupted at any point, from any thread, and for * any reason. Acknowledgement and handling of an interrupt is fully encapsulated by this class. * * Commands may be created with an optional timeout (in seconds). Timeouts are implemented as a form of interrupt, where the interrupt event occurs * when the timeout period expires. Currently, only the time to receive the response from the channel counts against the timeout period. */ abstract class TDSCommand { abstract boolean doExecute() throws SQLServerException; final static Logger logger = Logger.getLogger("com.microsoft.sqlserver.jdbc.internals.TDS.Command"); private final String logContext; final String getLogContext() { return logContext; } private String traceID; final public String toString() { if (traceID == null) traceID = "TDSCommand@" + Integer.toHexString(hashCode()) + " (" + logContext + ")"; return traceID; } final void log(Level level, String message) { logger.log(level, toString() + ": " + message); } // Optional timer that is set if the command was created with a non-zero timeout period. // When the timer expires, the command is interrupted. private final TimeoutTimer timeoutTimer; // TDS channel accessors // These are set/reset at command execution time. // Volatile ensures visibility to execution thread and interrupt thread private volatile TDSWriter tdsWriter; private volatile TDSReader tdsReader; protected TDSWriter getTDSWriter(){ return tdsWriter; } // Lock to ensure atomicity when manipulating more than one of the following // shared interrupt state variables below. private final Object interruptLock = new Object(); // Flag set when this command starts execution, indicating that it is // ready to respond to interrupts; and cleared when its last response packet is // received, indicating that it is no longer able to respond to interrupts. // If the command is interrupted after interrupts have been disabled, then the // interrupt is ignored. private volatile boolean interruptsEnabled = false; protected boolean getInterruptsEnabled() { return interruptsEnabled; } protected void setInterruptsEnabled(boolean interruptsEnabled) { synchronized (interruptLock) { this.interruptsEnabled = interruptsEnabled; } } // Flag set to indicate that an interrupt has happened. private volatile boolean wasInterrupted = false; private boolean wasInterrupted() { return wasInterrupted; } // The reason for the interrupt. private volatile String interruptReason = null; // Flag set when this command's request to the server is complete. // If a command is interrupted before its request is complete, it is the executing // thread's responsibility to send the attention signal to the server if necessary. // After the request is complete, the interrupting thread must send the attention signal. private volatile boolean requestComplete; protected boolean getRequestComplete() { return requestComplete; } protected void setRequestComplete(boolean requestComplete) { synchronized (interruptLock) { this.requestComplete = requestComplete; } } // Flag set when an attention signal has been sent to the server, indicating that a // TDS packet containing the attention ack message is to be expected in the response. // This flag is cleared after the attention ack message has been received and processed. private volatile boolean attentionPending = false; boolean attentionPending() { return attentionPending; } // Flag set when this command's response has been processed. Until this flag is set, // there may be unprocessed information left in the response, such as transaction // ENVCHANGE notifications. private volatile boolean processedResponse; protected boolean getProcessedResponse() { return processedResponse; } protected void setProcessedResponse(boolean processedResponse) { synchronized (interruptLock) { this.processedResponse = processedResponse; } } // Flag set when this command's response is ready to be read from the server and cleared // after its response has been received, but not necessarily processed, up to and including // any attention ack. The command's response is read either on demand as it is processed, // or by detaching. private volatile boolean readingResponse; final boolean readingResponse() { return readingResponse; } /** * Creates this command with an optional timeout. * * @param logContext * the string describing the context for this command. * @param timeoutSeconds * (optional) the time before which the command must complete before it is interrupted. A value of 0 means no timeout. */ TDSCommand(String logContext, int timeoutSeconds) { this.logContext = logContext; this.timeoutTimer = (timeoutSeconds > 0) ? (new TimeoutTimer(timeoutSeconds, this)) : null; } /** * Executes this command. * * @param tdsWriter * @param tdsReader * @throws SQLServerException * on any error executing the command, including cancel or timeout. */ boolean execute(TDSWriter tdsWriter, TDSReader tdsReader) throws SQLServerException { this.tdsWriter = tdsWriter; this.tdsReader = tdsReader; assert null != tdsReader; try { return doExecute(); // Derived classes implement the execution details } catch (SQLServerException e) { try { // If command execution threw an exception for any reason before the request // was complete then interrupt the command (it may already be interrupted) // and close it out to ensure that any response to the error/interrupt // is processed. // no point in trying to cancel on a closed connection. if (!requestComplete && !tdsReader.getConnection().isClosed()) { interrupt(e.getMessage()); onRequestComplete(); close(); } } catch (SQLServerException interruptException) { if (logger.isLoggable(Level.FINE)) logger.fine(this.toString() + ": Ignoring error in sending attention: " + interruptException.getMessage()); } // throw the original exception even if trying to interrupt fails even in the case // of trying to send a cancel to the server. throw e; } } /** * Provides sane default response handling. * * This default implementation just consumes everything in the response message. */ void processResponse(TDSReader tdsReader) throws SQLServerException { if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Processing response"); try { TDSParser.parse(tdsReader, getLogContext()); } catch (SQLServerException e) { if (SQLServerException.DRIVER_ERROR_FROM_DATABASE != e.getDriverErrorCode()) throw e; if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Ignoring error from database: " + e.getMessage()); } } /** * Clears this command from the TDS channel so that another command can execute. * * This method does not process the response. It just buffers it in memory, including any attention ack that may be present. */ final void detach() throws SQLServerException { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": detaching..."); // Read any remaining response packets from the server. // This operation may be timed out or cancelled from another thread. while (tdsReader.readPacket()) ; // Postcondition: the entire response has been read assert !readingResponse; } final void close() { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": closing..."); if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": processing response..."); while (!processedResponse) { try { processResponse(tdsReader); } catch (SQLServerException e) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": close ignoring error processing response: " + e.getMessage()); if (tdsReader.getConnection().isSessionUnAvailable()) { processedResponse = true; attentionPending = false; } } } if (attentionPending) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": processing attention ack..."); try { TDSParser.parse(tdsReader, "attention ack"); } catch (SQLServerException e) { if (tdsReader.getConnection().isSessionUnAvailable()) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": giving up on attention ack after connection closed by exception: " + e); attentionPending = false; } else { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": ignored exception: " + e); } } // If the parser returns to us without processing the expected attention ack, // then assume that no attention ack is forthcoming from the server and // terminate the connection to prevent any other command from executing. if (attentionPending) { if (logger.isLoggable(Level.SEVERE)) { logger.severe(this.toString() + ": expected attn ack missing or not processed; terminating connection..."); } try { tdsReader.throwInvalidTDS(); } catch (SQLServerException e) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": ignored expected invalid TDS exception: " + e); assert tdsReader.getConnection().isSessionUnAvailable(); attentionPending = false; } } } // Postcondition: // Response has been processed and there is no attention pending -- the command is closed. // Of course the connection may be closed too, but the command is done regardless... assert processedResponse && !attentionPending; } /** * Interrupts execution of this command, typically from another thread. * * Only the first interrupt has any effect. Subsequent interrupts are ignored. Interrupts are also ignored until enabled. If interrupting the * command requires an attention signal to be sent to the server, then this method sends that signal if the command's request is already complete. * * Signalling mechanism is "fire and forget". It is up to either the execution thread or, possibly, a detaching thread, to ensure that any pending * attention ack later will be received and processed. * * @param reason * the reason for the interrupt, typically cancel or timeout. * @throws SQLServerException * if interrupting fails for some reason. This call does not throw the reason for the interrupt. */ void interrupt(String reason) throws SQLServerException { // Multiple, possibly simultaneous, interrupts may occur. // Only the first one should be recognized and acted upon. synchronized (interruptLock) { if (interruptsEnabled && !wasInterrupted()) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": Raising interrupt for reason:" + reason); wasInterrupted = true; interruptReason = reason; if (requestComplete) attentionPending = tdsWriter.sendAttention(); } } } private boolean interruptChecked = false; /** * Checks once whether an interrupt has occurred, and, if it has, throws an exception indicating that fact. * * Any calls after the first to check for interrupts are no-ops. This method is called periodically from this command's execution thread to notify * the app when an interrupt has happened. * * It should only be called from places where consistent behavior can be ensured after the exception is thrown. For example, it should not be * called at arbitrary times while processing the response, as doing so could leave the response token stream in an inconsistent state. Currently, * response processing only checks for interrupts after every result or OUT parameter. * * Request processing checks for interrupts before writing each packet. * * @throws SQLServerException * if this command was interrupted, throws the reason for the interrupt. */ final void checkForInterrupt() throws SQLServerException { // Throw an exception with the interrupt reason if this command was interrupted. // Note that the interrupt reason may be null. Checking whether the // command was interrupted does not require the interrupt lock since only one // of the shared state variables is being manipulated; interruptChecked is not // shared with the interrupt thread. if (wasInterrupted() && !interruptChecked) { interruptChecked = true; if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": throwing interrupt exception, reason: " + interruptReason); throw new SQLServerException(interruptReason, SQLState.STATEMENT_CANCELED, DriverError.NOT_SET, null); } } /** * Notifies this command when no more request packets are to be sent to the server. * * After the last packet has been sent, the only way to interrupt the request is to send an attention signal from the interrupt() method. * * Note that this method is called when the request completes normally (last packet sent with EOM bit) or when it completes after being * interrupted (0 or more packets sent with no EOM bit). */ final void onRequestComplete() throws SQLServerException { assert !requestComplete; if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": request complete"); synchronized (interruptLock) { requestComplete = true; // If this command was interrupted before its request was complete then // we need to send the attention signal if necessary. Note that if no // attention signal is sent (i.e. no packets were sent to the server before // the interrupt happened), then don't expect an attention ack or any // other response. if (!interruptsEnabled) { assert !attentionPending; assert !processedResponse; assert !readingResponse; processedResponse = true; } else if (wasInterrupted()) { if (tdsWriter.isEOMSent()) { attentionPending = tdsWriter.sendAttention(); readingResponse = attentionPending; } else { assert !attentionPending; readingResponse = tdsWriter.ignoreMessage(); } processedResponse = !readingResponse; } else { assert !attentionPending; assert !processedResponse; readingResponse = true; } } } /** * Notifies this command when the last packet of the response has been read. * * When the last packet is read, interrupts are disabled. If an interrupt occurred prior to disabling that caused an attention signal to be sent * to the server, then an extra packet containing the attention ack is read. * * This ensures that on return from this method, the TDS channel is clear of all response packets for this command. * * Note that this method is called for the attention ack message itself as well, so we need to be sure not to expect more than one attention * ack... */ final void onResponseEOM() throws SQLServerException { boolean readAttentionAck = false; // Atomically disable interrupts and check for a previous interrupt requiring // an attention ack to be read. synchronized (interruptLock) { if (interruptsEnabled) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": disabling interrupts"); // Determine whether we still need to read the attention ack packet. // When a command is interrupted, Yukon (and later) always sends a response // containing at least a DONE(ERROR) token before it sends the attention ack, // even if the command's request was not complete. readAttentionAck = attentionPending; interruptsEnabled = false; } } // If an attention packet needs to be read then read it. This should // be done outside of the interrupt lock to avoid unnecessarily blocking // interrupting threads. Note that it is remotely possible that the call // to readPacket won't actually read anything if the attention ack was // already read by TDSCommand.detach(), in which case this method could // be called from multiple threads, leading to a benign race to clear the // readingResponse flag. if (readAttentionAck) tdsReader.readPacket(); readingResponse = false; } /** * Notifies this command when the end of its response token stream has been reached. * * After this call, we are guaranteed that tokens in the response have been processed. */ final void onTokenEOF() { processedResponse = true; } /** * Notifies this command when the attention ack (a DONE token with a special flag) has been processed. * * After this call, the attention ack should no longer be expected. */ final void onAttentionAck() { assert attentionPending; attentionPending = false; } /** * Starts sending this command's TDS request to the server. * * @param tdsMessageType * the type of the TDS message (RPC, QUERY, etc.) * @return the TDS writer used to write the request. * @throws SQLServerException * on any error, including acknowledgement of an interrupt. */ final TDSWriter startRequest(byte tdsMessageType) throws SQLServerException { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": starting request..."); // Start this command's request message try { tdsWriter.startMessage(this, tdsMessageType); } catch (SQLServerException e) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": starting request: exception: " + e.getMessage()); throw e; } // (Re)initialize this command's interrupt state for its current execution. // To ensure atomically consistent behavior, do not leave the interrupt lock // until interrupts have been (re)enabled. synchronized (interruptLock) { requestComplete = false; readingResponse = false; processedResponse = false; attentionPending = false; wasInterrupted = false; interruptReason = null; interruptsEnabled = true; } return tdsWriter; } /** * Finishes the TDS request and then starts reading the TDS response from the server. * * @return the TDS reader used to read the response. * @throws SQLServerException * if there is any kind of error. */ final TDSReader startResponse() throws SQLServerException { return startResponse(false); } final TDSReader startResponse(boolean isAdaptive) throws SQLServerException { // Finish sending the request message. If this command was interrupted // at any point before endMessage() returns, then endMessage() throws an // exception with the reason for the interrupt. Request interrupts // are disabled by the time endMessage() returns. if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": finishing request"); try { tdsWriter.endMessage(); } catch (SQLServerException e) { if (logger.isLoggable(Level.FINEST)) logger.finest(this + ": finishing request: endMessage threw exception: " + e.getMessage()); throw e; } // If command execution is subject to timeout then start timing until // the server returns the first response packet. if (null != timeoutTimer) { if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Starting timer..."); timeoutTimer.start(); } if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Reading response..."); try { // Wait for the server to execute the request and read the first packet // (responseBuffering=adaptive) or all packets (responseBuffering=full) // of the response. if (isAdaptive) { tdsReader.readPacket(); } else { while (tdsReader.readPacket()) ; } } catch (SQLServerException e) { if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Exception reading response: " + e.getMessage()); throw e; } finally { // If command execution was subject to timeout then stop timing as soon // as the server returns the first response packet or errors out. if (null != timeoutTimer) { if (logger.isLoggable(Level.FINEST)) logger.finest(this.toString() + ": Stopping timer..."); timeoutTimer.stop(); } } return tdsReader; } } /** * UninterruptableTDSCommand encapsulates an uninterruptable TDS conversation. * * TDSCommands have interruptability built in. However, some TDSCommands such as DTC commands, connection commands, cursor close and prepared * statement handle close shouldn't be interruptable. This class provides a base implementation for such commands. */ abstract class UninterruptableTDSCommand extends TDSCommand { UninterruptableTDSCommand(String logContext) { super(logContext, 0); } final void interrupt(String reason) throws SQLServerException { // Interrupting an uninterruptable command is a no-op. That is, // it can happen, but it should have no effect. if (logger.isLoggable(Level.FINEST)) { logger.finest(toString() + " Ignoring interrupt of uninterruptable TDS command; Reason:" + reason); } } }
// RMG - Reaction Mechanism Generator // RMG Team (rmg_dev@mit.edu) // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // all copies or substantial portions of the Software. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. package jing.chem; import java.util.*; import jing.chemUtil.*; import java.io.File; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.io.BufferedReader; import java.io.InputStream; import java.io.InputStreamReader; //quantum mechanics thermo property estimator; analog of GATP public class QMTP implements GeneralGAPP { private static QMTP INSTANCE = new QMTP(); //## attribute INSTANCE protected static PrimaryThermoLibrary primaryLibrary;//Note: may be able to separate this out into GeneralGAPP, as this is common to both GATP and QMTP protected static HashMap library; //as above, may be able to move this and associated functions to GeneralGAPP (and possibly change from "x implements y" to "x extends y"), as it is common to both GATP and QMTP // Constructors //## operation QMTP() private QMTP() { initializeLibrary(); initializePrimaryThermoLibrary(); } //## operation generateThermoData(ChemGraph) public ThermoData generateThermoData(ChemGraph p_chemGraph) { //#[ operation generateThermoData(ChemGraph) //first, check for thermo data in the primary thermo library and library (?); if it is there, use it ThermoData result = primaryLibrary.getThermoData(p_chemGraph.getGraph()); //System.out.println(result); if (result != null) { p_chemGraph.fromprimarythermolibrary = true; return result; } result = getFromLibrary(p_chemGraph.getChemicalFormula()); if (result != null) return result; //if there is no data in the libraries, calculate the result based on QM or MM calculations; the below steps will be generalized later to allow for other quantum mechanics packages, etc. //String qmProgram="gaussian03"; String qmProgram="mopac"; String qmMethod="pm3"; //may eventually want to pass this to various functions to choose which "sub-function" to call result = new ThermoData(); String [] InChInames = getQMFileName(p_chemGraph);//determine the filename (InChIKey) and InChI with appended info for triplets, etc. String name = InChInames[0]; String InChIaug = InChInames[1]; String directory = "QMfiles/"; File dir=new File(directory); directory = dir.getAbsolutePath();//this and previous three lines get the absolute path for the directory //first, check to see if the result already exists and the job terminated successfully boolean gaussianResultExists = successfulGaussianResultExistsQ(name,directory,InChIaug); boolean mopacResultExists = successfulMopacResultExistsQ(name,directory,InChIaug); if(!gaussianResultExists && !mopacResultExists){//if a successful result doesn't exist from previous run (or from this run), run the calculation; if a successful result exists, we will skip directly to parsing the file //1. create a 2D file //use the absolute path for directory, so we can easily reference from other directories in command-line paths //can't use RMG.workingDirectory, since this basically holds the RMG environment variable, not the workingDirectory directory = "2Dmolfiles/"; dir=new File(directory); directory = dir.getAbsolutePath(); molFile p_2dfile = new molFile(name, directory, p_chemGraph); molFile p_3dfile = new molFile();//it seems this must be initialized, so we initialize to empty object //2. convert from 2D to 3D using RDKit if the 2D molfile is for a molecule with 2 or more atoms if(p_chemGraph.getAtomNumber() > 1){ p_3dfile = embed3D(p_2dfile); } //3. create the Gaussian input file directory = "QMfiles/"; dir=new File(directory); directory = dir.getAbsolutePath();//this and previous three lines get the absolute path for the directory int attemptNumber=1;//counter for attempts using different keywords int successFlag=0;//flag for success of Gaussian run; 0 means it failed, 1 means it succeeded int maxAttemptNumber=1; while(successFlag==0 && attemptNumber <= maxAttemptNumber){ //IF block to check which program to use if (qmProgram.equals("gaussian03")){ if(p_chemGraph.getAtomNumber() > 1){ maxAttemptNumber = createGaussianPM3Input(name, directory, p_3dfile, attemptNumber, InChIaug); } else{ maxAttemptNumber = createGaussianPM3Input(name, directory, p_2dfile, -1, InChIaug);//use -1 for attemptNumber for monoatomic case } //4. run Gaussian successFlag = runGaussian(name, directory); } else if (qmProgram.equals("mopac")){ int multiplicity = p_chemGraph.getRadicalNumber()+1; //multiplicity = radical number + 1 maxAttemptNumber = createMopacPM3Input(name, directory, p_3dfile, attemptNumber, InChIaug, multiplicity); successFlag = runMOPAC(name, directory); } else{ System.out.println("Unsupported quantum chemistry program"); System.exit(0); } //new IF block to check success if(successFlag==1){ System.out.println("Attempt #"+attemptNumber + " on species " + name + " ("+InChIaug+") succeeded."); } else if(successFlag==0){ if(attemptNumber==maxAttemptNumber){//if this is the last possible attempt, and the calculation fails, exit with an error message if(qmProgram.equals("mopac")){ //if we are running with MOPAC and all keywords fail, try with Gaussian qmProgram = "gaussian03"; System.out.println("*****Final MOPAC attempt (#" + maxAttemptNumber + ") on species " + name + " ("+InChIaug+") failed. Trying to use Gaussian."); attemptNumber=0;//this needs to be 0 so that when we increment attemptNumber below, it becomes 1 when returning to the beginning of the for loop maxAttemptNumber=1; } else{ System.out.println("*****Final attempt (#" + maxAttemptNumber + ") on species " + name + " ("+InChIaug+") failed."); System.exit(0); } } System.out.println("*****Attempt #"+attemptNumber + " on species " + name + " ("+InChIaug+") failed. Will attempt a new keyword."); attemptNumber++;//try again with new keyword } } } //5. parse QM output and record as thermo data (function includes symmetry/point group calcs, etc.); if both Gaussian and MOPAC results exist, Gaussian result is used if (gaussianResultExists || (qmProgram.equals("gaussian03") && !mopacResultExists)){ result = parseGaussianPM3(name, directory, p_chemGraph); } else if (mopacResultExists || qmProgram.equals("mopac")){ result = parseMopacPM3(name, directory, p_chemGraph); } else{ System.out.println("Unexpected situation in QMTP thermo estimation"); System.exit(0); } return result; // } protected static QMTP getINSTANCE() { return INSTANCE; } public void initializePrimaryThermoLibrary(){//svp primaryLibrary = PrimaryThermoLibrary.getINSTANCE(); } //## operation getFromLibrary(String) public ThermoData getFromLibrary(String p_chemicalFormula) { //#[ operation getFromLibrary(String) return (ThermoData)library.get(p_chemicalFormula); // } public static HashMap getLibrary() { return library; } //6/2/09 gmagoon: doesn't seem like this function is ever used for GATP public static void setLibrary(HashMap p_library) { library = p_library; } public void initializeLibrary() { library = new HashMap(); // put in H2 ThermoData td_H2 = new ThermoData(0.000,31.233,6.895,6.975,6.994,7.009,7.081,7.219,7.720,0,0,0,"library value for H2"); library.put("H2", td_H2); // put in H ThermoData td_H = new ThermoData(52.103,27.419,4.968,4.968,4.968,4.968,4.968,4.968,4.968, 0,0,0,"library value for H radical"); library.put("H.",td_H); } //embed a molecule in 3D, using RDKit public molFile embed3D(molFile twoDmolFile){ //convert to 3D MOL file using RDKit script int flag=0; String directory = "3Dmolfiles/"; File dir=new File(directory); directory = dir.getAbsolutePath();//this uses the absolute path for the directory String name = twoDmolFile.getName(); try{ File runningdir=new File(directory); String command = "c:/Python25/python.exe c:/Python25/distGeomScriptMol.py ";//this should eventually be modified for added generality String twoDmolpath=twoDmolFile.getPath(); command=command.concat(twoDmolpath); command=command.concat(" "); command=command.concat(name+".mol");//this is the target file name; use the same name as the twoDmolFile (but it will be in he 3Dmolfiles folder Process pythonProc = Runtime.getRuntime().exec(command, null, runningdir); String killmsg= "Python process for "+twoDmolFile.getName()+" did not complete within 10 seconds, and the process was killed. File was probably not written.";//message to print if the process times out Thread timeoutThread = new TimeoutKill(pythonProc, killmsg, 10000L); //create a timeout thread to handle cases where the UFF optimization get's locked up (cf. Ch. 16 of "Ivor Horton's Beginning Java 2: JDK 5 Edition"); once we use the updated version of RDKit, we should be able to get rid of this timeoutThread.start();//start the thread //check for errors and display the error if there is one InputStream is = pythonProc.getErrorStream(); InputStreamReader isr = new InputStreamReader(is); BufferedReader br = new BufferedReader(isr); String line=null; while ( (line = br.readLine()) != null) { line = line.trim(); System.err.println(line); flag=1; } //if there was an error, indicate the file and InChI if(flag==1){ System.out.println("RDKit received error (see above) on " + twoDmolFile.getName()+". File was probably not written."); } int exitValue = pythonProc.waitFor(); if(timeoutThread.isAlive())//if the timeout thread is still alive (indicating that the process has completed in a timely manner), stop the timeout thread timeoutThread.interrupt(); } catch (Exception e) { String err = "Error in running RDKit Python process \n"; err += e.toString(); e.printStackTrace(); System.exit(0); } // gmagoon 6/3/09 comment out InChI checking for now; in any case, the code will need to be updated, as it is copied from my testing code // //check whether the original InChI is reproduced // if(flag==0){ // try{ // File f=new File("c:/Python25/"+molfilename); // File newFile= new File("c:/Python25/mol3d.mol"); // if(newFile.exists()){ // newFile.delete();//apparently renaming will not work unless target file does not exist (at least on Vista) // f.renameTo(newFile); // String command = "c:/Users/User1/Documents/InChI-1/cInChI-1.exe c:/Python25/mol3d.mol inchi3d.inchi /AuxNone /DoNotAddH";//DoNotAddH used to prevent adding Hs to radicals (this would be a problem for current RDKit output which doesn't use M RAD notation) // Process inchiProc = Runtime.getRuntime().exec(command); // // int exitValue = inchiProc.waitFor();
package com.newsifier.watson.reader; import com.google.gson.JsonObject; import com.ibm.watson.developer_cloud.natural_language_understanding.v1.NaturalLanguageUnderstanding; import com.ibm.watson.developer_cloud.natural_language_understanding.v1.model.*; import com.newsifier.Credentials; import com.newsifier.watson.bean.NewsNLU; import java.util.ArrayList; import java.util.List; import static com.newsifier.dao.impl.Utils.getCredentials; public class Extractor { private NaturalLanguageUnderstanding service; private Features features; public Extractor(int limit) { JsonObject credentials = getCredentials("natural-language-understanding", Credentials.getUsernameNlu(), Credentials.getPasswordNlu()); String username = credentials.get("username").getAsString(); String password = credentials.get("password").getAsString(); service = new NaturalLanguageUnderstanding( NaturalLanguageUnderstanding.VERSION_DATE_2017_02_27, username, password ); CategoriesOptions categories = new CategoriesOptions(); KeywordsOptions keywords = new KeywordsOptions.Builder() .emotion(false) //Set this to true to enable emotion analysis for detected keywords .sentiment(false) //Set this to true to enable sentiment analysis for detected keywords .limit(limit) // Maximum number of keywords to return .build(); features = new Features.Builder() .categories(categories) .keywords(keywords) .build(); } public NewsNLU extractInfo(String urlNews, double score, double relevance) { AnalyzeOptions parameters = new AnalyzeOptions.Builder() .url(urlNews) .features(features) .build(); AnalysisResults results = service .analyze(parameters) .execute(); List<CategoriesResult> cats = results.getCategories(); List<String> categoriesLabel = new ArrayList(); List<String> keywordsLabel = new ArrayList(); for (CategoriesResult cat : cats) { //System.out.println("The category : " + cat.getLabel() + " with score " + cat.getScore()); if (cat.getScore() > score) { categoriesLabel.add(cat.getLabel().replaceAll("\\s+", "_").replaceAll(",","")); } } List<KeywordsResult> keys = results.getKeywords(); for (KeywordsResult key : keys) { if (key.getRelevance() > relevance) { //System.out.println("The keyword: " + key.getText() + " with score " + key.getRelevance()); keywordsLabel.add(key.getText()); } } return new NewsNLU(urlNews, categoriesLabel, keywordsLabel); } }
package com.qiudao.dataStructure.tree; public class AvlTree<T extends Comparable<? super T>> { /** * AVLNode AvlTreeAvlTreeelement * @param */ private class AvlNode{ /** * nodedata */ private T element; /** * node */ private AvlNode left; /** * node */ private AvlNode right; /** * node */ private int height; public AvlNode(T element){ this(element, null, null); } public AvlNode(T element, AvlNode left, AvlNode right){ this.element = element; this.left = left; this.right = right; } } public AvlNode root; /** * AvlTree */ public AvlTree(){ root = null; } /** * avlNode * @param t * @return */ public int height(AvlNode t){ return t == null ? -1:t.height; } /** * node * @param a * @param b * @return */ public int max(int a, int b){ if(a>b){ return a; }else { return b; } } /** * node * @param x * @return */ public boolean insert(T x){ try{ root = insert(x, root); return true; }catch (Exception e){ e.printStackTrace(); return false; } } /** * node * @param x * @param t * @return * @throws Exception */ private AvlNode insert(T x, AvlNode t) throws Exception{ if(t==null){ t = new AvlNode(x); } else if(x.compareTo(t.element)<0){ t.left = insert(x, t.left); if(height(t.left) - height(t.right) == 2){ if(x.compareTo(t.left.element) < 0){ t = rotateWithLeftChild(t); } else { t = doubleWithLeftChild(t); } } } else if (x.compareTo(t.element)>0){ t.right = insert(x, t.right); if(height(t.right) - height(t.left) == 2){ if(x.compareTo(t.right.element) > 0){ t = rotateWithRightChild(t); }else{ t = doubleWithRightChild(t); } } }else{ throw new Exception(""); } t.height = max(height(t.left), height(t.right)) + 1; return t; } /** * * @param t * @return */ private AvlNode rotateWithLeftChild(AvlNode t){ AvlNode tLeft = t.left; t.left = tLeft.right; tLeft.right = t; t.height = max(height(t.left), height(t.right)) + 1; tLeft.height = max(height(tLeft.left), t.height) + 1; return tLeft; } /** * * @param t * @return */ private AvlNode rotateWithRightChild(AvlNode t){ AvlNode tRight = t.right; t.right = tRight.left; tRight.left = t; t.height = max(height(t.left), height(t.right)) + 1; tRight.height = max(height(tRight.right), t.height) + 1; return tRight; } /** * * @param t * @return */ private AvlNode doubleWithLeftChild(AvlNode t){ t.left = rotateWithRightChild(t.left); return rotateWithLeftChild(t); } /** * * @param t * @return */ private AvlNode doubleWithRightChild(AvlNode t){ t.right = rotateWithLeftChild(t.right); return rotateWithRightChild(t); } /** * AvlTree */ public void makeEmpty(){ root = null; } /** * AvlTree * @return */ public boolean isEmpty(){ return (root == null); } /** * AvlTree * @return */ public T findMin(){ if(isEmpty()){ return null; }else{ return findMin(root).element; } } /** * AvlTree * @param t * @return */ private AvlNode findMin(AvlNode t){ if(t == null){ return t; } while (t.left != null){ t = t.left; } return t; } /** * AvlTree * @return */ public T findMax(){ if(root == null){ return null; } return findMax(root).element; } /** * AvlTree * @param t * @return */ private AvlNode findMax(AvlNode t){ if(t == null){ return null; } while (t.right != null){ t = t.right; } return t; } /** * x Tree * @param x * @return */ public boolean contains(T x){ return contains(x, root); } /** * x tree * @param x * @param t * @return */ private boolean contains(T x, AvlNode t){ if(t == null){ return false; }else if(x.compareTo(t.element)<0){ return contains(x, t.left); }else if(x.compareTo(t.element)>0){ return contains(x, t.right); } return true; } /** * * @return */ public String serializeInfix(){ StringBuilder str = new StringBuilder(); serializeInfix(root, str, ","); return str.toString(); } /** * * @param t * @param str * @param sep */ public void serializeInfix(AvlNode t, StringBuilder str, String sep){ if(t != null){ serializeInfix(t.left, str, sep); str.append(t.element.toString()); str.append(sep); serializeInfix(t.right, str, sep); } } /** * node * @param x */ public void remove(T x){ root = remove(x, root); } /** * node * @param x * @param t * @return */ private AvlNode remove(T x, AvlNode t){ if(t == null){ return t; } if(x.compareTo(t.element) < 0){ t.left = remove(x, t.left); }else if(x.compareTo(t.element) > 0){ t.right = remove(x, t.right); }else if(t.left != null && t.right != null){ //two child t.element = findMin(t.right).element; t.right = remove(t.element, t.right); }else{// one child t = (t.left != null) ? t.left : t.right; } return balance(t); } /** * nodeAvlTree * @param t * @return */ private AvlNode balance(AvlNode t) { if (t == null) { return t; } if (height(t.left) - height(t.right) > 1) { if (height(t.left.left) >= height(t.left.right)) t = rotateWithLeftChild(t); else t = doubleWithLeftChild(t); } else if (height(t.right) - height(t.left) > 1) { if (height(t.right.right) >= height(t.right.left)) t = rotateWithRightChild(t); else t = doubleWithRightChild(t); } t.height = max(height(t.left), height(t.right)) + 1; return t; } /** * * @return */ public String serializePrefix(){ StringBuilder str = new StringBuilder(); serializePrefix(root, str, ","); return str.toString(); } /** * * @param t * @param str * @param sep */ public void serializePrefix(AvlNode t, StringBuilder str, String sep){ if(t != null){ str.append(t.element.toString()); str.append(sep); serializePrefix(t.left, str, sep); serializePrefix(t.right, str, sep); } } public static void main(String[] args){ AvlTree<Integer> t = new AvlTree<Integer>(); t.insert(2); t.insert(1); t.insert(4); t.insert(5); t.insert(9); t.insert(3); t.insert(6); t.insert(7); System.out.println(); System.out.println("Prefix Traversal:"); System.out.println(t.serializePrefix()); System.out.println("Infix Traversal"); System.out.println(t.serializeInfix()); System.out.println("contains:" + 9); System.out.println(t.contains(9)); System.out.println("contains:" + 10); System.out.println(t.contains(10)); System.out.println("max:" + t.findMax()); System.out.println("min:" + t.findMin()); t.remove(6); System.out.println("Prefix Traversal:"); System.out.println(t.serializePrefix()); System.out.println("Infix Traversal"); System.out.println(t.serializeInfix()); } }
package com.relayrides.pushy; import io.netty.bootstrap.Bootstrap; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.ChannelPipeline; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.ssl.SslHandler; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.GenericFutureListener; public class ApnsClientThread<T extends ApnsPushNotification> extends Thread { private enum ClientState { CONNECT, READY, RECONNECT, SHUTDOWN, EXIT }; private final PushManager<T> pushManager; private ClientState state = null; private final Bootstrap bootstrap; private Channel channel = null; private int sequenceNumber = 0; private final SentNotificationBuffer<T> sentNotificationBuffer; private static final int SENT_NOTIFICATION_BUFFER_SIZE = 2048; public ApnsClientThread(final PushManager<T> pushManager) { super("ApnsClientThread"); this.pushManager = pushManager; this.sentNotificationBuffer = new SentNotificationBuffer<T>(SENT_NOTIFICATION_BUFFER_SIZE); this.bootstrap = new Bootstrap(); this.bootstrap.group(new NioEventLoopGroup()); this.bootstrap.channel(NioSocketChannel.class); this.bootstrap.option(ChannelOption.SO_KEEPALIVE, true); final ApnsClientThread<T> clientThread = this; this.bootstrap.handler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(final SocketChannel channel) throws Exception { final ChannelPipeline pipeline = channel.pipeline(); if (pushManager.getEnvironment().isTlsRequired()) { pipeline.addLast("ssl", SslHandlerFactory.getSslHandler(pushManager.getKeyStore(), pushManager.getKeyStorePassword())); } pipeline.addLast("decoder", new ApnsErrorDecoder()); pipeline.addLast("encoder", new PushNotificationEncoder<T>()); pipeline.addLast("handler", new ApnsErrorHandler<T>(pushManager, clientThread)); } }); } @Override public void start() { this.state = ClientState.CONNECT; super.start(); } @Override public void run() { while (this.getClientState() != ClientState.EXIT) { switch (this.getClientState()) { case CONNECT: { try { final ChannelFuture connectFuture = this.bootstrap.connect(this.pushManager.getEnvironment().getApnsHost(), this.pushManager.getEnvironment().getApnsPort()).sync(); if (connectFuture.isSuccess()) { this.channel = connectFuture.channel(); if (this.pushManager.getEnvironment().isTlsRequired()) { final Future<Channel> handshakeFuture = this.channel.pipeline().get(SslHandler.class).handshakeFuture().sync(); if (handshakeFuture.isSuccess()) { this.advanceToStateFromOriginStates(ClientState.READY, ClientState.CONNECT); } } else { this.advanceToStateFromOriginStates(ClientState.READY, ClientState.CONNECT); } } } catch (InterruptedException e) { continue; } break; } case READY: { try { final SendableApnsPushNotification<T> sendableNotification = new SendableApnsPushNotification<T>(this.pushManager.getQueue().take(), this.sequenceNumber++); this.sentNotificationBuffer.addSentNotification(sendableNotification); // TODO Don't flush on every notification if we can avoid it this.channel.writeAndFlush(sendableNotification).addListener(new GenericFutureListener<ChannelFuture>() { public void operationComplete(final ChannelFuture future) { if (future.cause() != null) { pushManager.notifyListenersOfFailedDelivery(sendableNotification.getPushNotification(), future.cause()); } }}); } catch (InterruptedException e) { continue; } break; } case RECONNECT: { if (this.channel != null && this.channel.isOpen()) { this.channel.close(); } try { this.channel.closeFuture().sync(); this.advanceToStateFromOriginStates(ClientState.CONNECT, ClientState.RECONNECT); } catch (InterruptedException e) { continue; } break; } case SHUTDOWN: { try { if (this.channel != null && this.channel.isOpen()) { this.channel.close().sync(); } this.bootstrap.group().shutdownGracefully().sync(); } catch (InterruptedException e) { continue; } this.advanceToStateFromOriginStates(ClientState.EXIT, ClientState.SHUTDOWN); break; } case EXIT: { // Do nothing break; } default: { throw new IllegalArgumentException(String.format("Unexpected state: %S", this.getState())); } } } } protected void reconnect() { // We don't want to try to reconnect if we're already connecting or on our way out this.advanceToStateFromOriginStates(ClientState.RECONNECT, ClientState.READY); this.interrupt(); } public void shutdown() { // Don't re-shut-down if we're already on our way out this.advanceToStateFromOriginStates(ClientState.SHUTDOWN, ClientState.CONNECT, ClientState.READY, ClientState.RECONNECT); this.interrupt(); } private ClientState getClientState() { synchronized (this.state) { return this.state; } } /** * Sets the current state if and only if the current state is in one of the allowed origin states. */ private void advanceToStateFromOriginStates(final ClientState destinationState, final ClientState... allowableOriginStates) { synchronized (this.state) { for (final ClientState originState : allowableOriginStates) { if (this.state == originState) { this.state = destinationState; break; } } } } protected SentNotificationBuffer<T> getSentNotificationBuffer() { return this.sentNotificationBuffer; } }
package com.yahoo.sketches.memory; import static com.yahoo.sketches.memory.UnsafeUtil.unsafe; /** * The AllocMemory class is a subclass of NativeMemory and is used to allocate direct, off-heap * native memory, which is then accessed by the NativeMemory methods. * It is the responsibility of the calling class to free this memory using freeMemory() when done. * * <p>The task of direct allocation was moved to this sub-class for performance reasons. * * @author Lee Rhodes */ //@SuppressWarnings("restriction") public class AllocMemory extends MemoryMappedFile { /** * Constructor for allocate native memory. * * <p>Allocates and provides access to capacityBytes directly in native (off-heap) memory * leveraging the Memory interface. The MemoryRequest callback is set to null. * @param capacityBytes the size in bytes of the native memory */ public AllocMemory(long capacityBytes) { super(0L, null, null); super.nativeRawStartAddress_ = unsafe.allocateMemory(capacityBytes); super.capacityBytes_ = capacityBytes; super.memReq_ = null; } /** * Constructor for allocate native memory with MemoryRequest. * * <p>Allocates and provides access to capacityBytes directly in native (off-heap) memory leveraging * the Memory interface. * @param capacityBytes the size in bytes of the native memory * @param memReq The MemoryRequest callback */ public AllocMemory(long capacityBytes, MemoryRequest memReq) { super(0L, null, null); super.nativeRawStartAddress_ = unsafe.allocateMemory(capacityBytes); super.capacityBytes_ = capacityBytes; super.memReq_ = memReq; } /** * Constructor for reallocate native memory. * * <p>Reallocates the given off-heap NativeMemory to a new a new native (off-heap) memory * location and copies the contents of the original given NativeMemory to the new location. * Any memory beyond the capacity of the original given NativeMemory will be uninitialized. * Dispose of this new memory by calling {@link NativeMemory#freeMemory()}. * @param origMem The original NativeMemory that needs to be reallocated and must not be null. * The OS is free to just expand the capacity of the current allocation at the same native * address, or reassign a completely different native address in which case the origMem will be * freed by the OS. * The origMem capacity will be set to zero and must not be used again. * * @param newCapacityBytes the desired new capacity of the newly allocated memory in bytes * @param memReq The MemoryRequest callback, which may be null. */ public AllocMemory(NativeMemory origMem, long newCapacityBytes, MemoryRequest memReq) { super(0L, null, null); super.nativeRawStartAddress_ = unsafe.reallocateMemory(origMem.nativeRawStartAddress_, newCapacityBytes); super.capacityBytes_ = newCapacityBytes; this.memReq_ = memReq; origMem.nativeRawStartAddress_ = 0; //does not require freeMem origMem.capacityBytes_ = 0; //Cannot be used again } /** * Constructor for allocate native memory, copy and clear. * * <p>Allocate a new native (off-heap) memory with capacityBytes; copy the contents of origMem * from zero to copyToBytes; clear the new memory from copyToBytes to capacityBytes. * @param origMem The original NativeMemory, a portion of which will be copied to the * newly allocated Memory. * The reference must not be null. * This origMem is not modified in any way, may be reused and must be freed appropriately. * @param copyToBytes the upper limit of the region to be copied from origMem to the newly * allocated memory. * @param capacityBytes the desired new capacity of the newly allocated memory in bytes and the * upper limit of the region to be cleared. * @param memReq The MemoryRequest callback, which may be null. */ public AllocMemory(NativeMemory origMem, long copyToBytes, long capacityBytes, MemoryRequest memReq) { super(0L, null, null); super.nativeRawStartAddress_ = unsafe.allocateMemory(capacityBytes); super.capacityBytes_ = capacityBytes; this.memReq_ = memReq; NativeMemory.copy(origMem, 0, this, 0, copyToBytes); this.clear(copyToBytes, capacityBytes - copyToBytes); } @Override public void freeMemory() { super.freeMemory(); } /** * If the JVM calls this method and a "freeMemory() has not been called" a <i>System.err</i> * message will be logged. */ @Override protected void finalize() { super.finalize(); } }
package cpw.mods.fml.common.registry; import java.io.File; import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.apache.logging.log4j.Level; import net.minecraft.block.Block; import net.minecraft.item.Item; import net.minecraft.item.ItemBlock; import net.minecraft.item.ItemStack; import com.google.common.base.Charsets; import com.google.common.base.Joiner; import com.google.common.base.Joiner.MapJoiner; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.HashBasedTable; import com.google.common.collect.ImmutableListMultimap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Table; import com.google.common.io.Files; import cpw.mods.fml.common.FMLLog; import cpw.mods.fml.common.Loader; import cpw.mods.fml.common.ModContainer; import cpw.mods.fml.common.event.FMLMissingMappingsEvent; import cpw.mods.fml.common.event.FMLMissingMappingsEvent.MissingMapping; import cpw.mods.fml.common.registry.GameRegistry.UniqueIdentifier; public class GameData { private static Table<String, String, ItemStack> customItemStacks = HashBasedTable.create(); public static final FMLControlledNamespacedRegistry<Block> blockRegistry = new FMLControlledNamespacedRegistry<Block>("air", 4095, 0, Block.class,'\u0001'); public static final FMLControlledNamespacedRegistry<Item> itemRegistry = new FMLControlledNamespacedRegistry<Item>(null, 32000, 4096, Item.class,'\u0002'); public static Map<String,Integer> buildItemDataList() { Map<String,Integer> idMapping = Maps.newHashMap(); blockRegistry.serializeInto(idMapping); itemRegistry.serializeInto(idMapping); return idMapping; } static Item findItem(String modId, String name) { return (Item) itemRegistry.func_82594_a(modId + ":" + name); } static Block findBlock(String modId, String name) { String key = modId + ":" + name; return blockRegistry.contains(key) ? blockRegistry.func_82594_a(key) : null; } static ItemStack findItemStack(String modId, String name) { ItemStack is = customItemStacks.get(modId, name); if (is == null) { Item i = findItem(modId, name); if (i != null) { is = new ItemStack(i, 0 ,0); } } if (is == null) { Block b = findBlock(modId, name); if (b != null) { is = new ItemStack(b, 0, Short.MAX_VALUE); } } return is; } static void registerCustomItemStack(String name, ItemStack itemStack) { customItemStacks.put(Loader.instance().activeModContainer().getModId(), name, itemStack); } public static void dumpRegistry(File minecraftDir) { if (customItemStacks == null) { return; } if (Boolean.valueOf(System.getProperty("fml.dumpRegistry", "false")).booleanValue()) { ImmutableListMultimap.Builder<String, String> builder = ImmutableListMultimap.builder(); for (String modId : customItemStacks.rowKeySet()) { builder.putAll(modId, customItemStacks.row(modId).keySet()); } File f = new File(minecraftDir, "itemStackRegistry.csv"); MapJoiner mapJoiner = Joiner.on("\n").withKeyValueSeparator(","); try { Files.write(mapJoiner.join(builder.build().entries()), f, Charsets.UTF_8); FMLLog.log(Level.INFO, "Dumped item registry data to %s", f.getAbsolutePath()); } catch (IOException e) { FMLLog.log(Level.ERROR, e, "Failed to write registry data to %s", f.getAbsolutePath()); } } } static UniqueIdentifier getUniqueName(Block block) { if (block == null) return null; String name = blockRegistry.func_148750_c(block); UniqueIdentifier ui = new UniqueIdentifier(name); if (customItemStacks.contains(ui.modId, ui.name)) { return null; } return ui; } static UniqueIdentifier getUniqueName(Item item) { if (item == null) return null; String name = itemRegistry.func_148750_c(item); UniqueIdentifier ui = new UniqueIdentifier(name); if (customItemStacks.contains(ui.modId, ui.name)) { return null; } return ui; } private static Map<UniqueIdentifier, ModContainer> customOwners = Maps.newHashMap(); static void registerBlockAndItem(ItemBlock item, Block block, String name, String modId) { ModContainer mc = Loader.instance().activeModContainer(); if (modId != null) { customOwners.put(new UniqueIdentifier(modId, name), mc); } int blockId = blockRegistry.add(0, name, block); int itemId = itemRegistry.add(blockId, name, item); if (itemId != blockId) { throw new RuntimeException(); } } static void registerItem(Item item, String name, String modId) { ModContainer mc = Loader.instance().activeModContainer(); if (modId != null) { customOwners.put(new UniqueIdentifier(modId, name), mc); } if (item instanceof ItemBlock) { throw new RuntimeException("Cannot register an itemblock separately from it's block"); } int itemId = itemRegistry.add(0, name, item); blockRegistry.useSlot(itemId); } static void registerBlock(Block block, String name, String modId) { ModContainer mc = Loader.instance().activeModContainer(); if (modId != null) { customOwners.put(new UniqueIdentifier(modId, name), mc); } int blockId = blockRegistry.add(0, name, block); itemRegistry.useSlot(blockId); } public static ModContainer findModOwner(String string) { UniqueIdentifier ui = new UniqueIdentifier(string); if (customOwners.containsKey(ui)) { return customOwners.get(ui); } return Loader.instance().getIndexedModList().get(ui.modId); } public static void fixupRegistries() { for (Integer id : blockRegistry.usedIds()) { itemRegistry.useSlot(id); } for (Integer id : itemRegistry.usedIds()) { blockRegistry.useSlot(id); } } public static boolean injectWorldIDMap(Map<String, Integer> dataList, boolean injectFrozenData) { Map<String, Integer[]> remaps = Maps.newHashMap(); ArrayListMultimap<String,String> missing = ArrayListMultimap.create(); blockRegistry.dump(); itemRegistry.dump(); blockRegistry.beginIdSwap(); itemRegistry.beginIdSwap(); for (Entry<String, Integer> entry : dataList.entrySet()) { String itemName = entry.getKey(); char discriminator = itemName.charAt(0); itemName = itemName.substring(1); Integer newId = entry.getValue(); int currId; boolean isBlock = discriminator == '\u0001'; if (isBlock) { currId = blockRegistry.getId(itemName); } else { currId = itemRegistry.getId(itemName); } if (currId == -1) { FMLLog.info("Found a missing id from the world %s", itemName); missing.put(itemName.substring(0, itemName.indexOf(':')), itemName); } else if (currId != newId) { FMLLog.info("Found %s id mismatch %s : %d %d", isBlock ? "block" : "item", itemName, currId, newId); remaps.put(itemName, new Integer[] { currId, newId }); } if (isBlock) { blockRegistry.reassignMapping(itemName, newId); } else { itemRegistry.reassignMapping(itemName, newId); } } boolean successfullyLoaded = Loader.instance().fireMissingMappingEvent(missing); if (!successfullyLoaded) { blockRegistry.revertSwap(); itemRegistry.revertSwap(); return false; } if (injectFrozenData) { FMLLog.info("Injecting new block and item data into this server instance"); Map<String, Integer> missingBlocks = Maps.newHashMap(blockRegistry.getMissingMappings()); Map<String, Integer> missingItems = Maps.newHashMap(itemRegistry.getMissingMappings()); for (Entry<String, Integer> item: missingItems.entrySet()) { String itemName = item.getKey(); if (missingBlocks.containsKey(itemName)) { int blockId = blockRegistry.swap(item.getValue(), itemName, blockRegistry.get(itemName)); itemRegistry.swap(blockId, itemName, itemRegistry.get(itemName)); FMLLog.info("Injecting new block/item %s : %d", itemName, blockId); missingBlocks.remove(itemName); if (Integer.valueOf(blockId) != item.getValue()) { remaps.put(itemName, new Integer[] { item.getValue(), blockId }); } } else { FMLLog.info("Injecting new item %s", itemName); int itemId = itemRegistry.swap(item.getValue(), itemName, itemRegistry.get(itemName)); if (Integer.valueOf(itemId) != item.getValue()) { remaps.put(itemName, new Integer[] { item.getValue(), itemId }); } } } for (Entry<String, Integer> block : missingBlocks.entrySet()) { FMLLog.info("Injecting new block %s", block.getKey()); int blockId = blockRegistry.swap(block.getValue(), block.getKey(), blockRegistry.get(block.getKey())); if (Integer.valueOf(blockId) != block.getValue()) { remaps.put(block.getKey(), new Integer[] { block.getValue(), blockId }); } } } blockRegistry.completeIdSwap(); itemRegistry.completeIdSwap(); blockRegistry.dump(); itemRegistry.dump(); Loader.instance().fireRemapEvent(remaps); return true; } public static boolean processIdRematches(List<MissingMapping> remaps) { List<String> ignored = Lists.newArrayList(); List<String> warned = Lists.newArrayList(); for (MissingMapping remap : remaps) { FMLMissingMappingsEvent.Action action = remap.getAction(); if (action == FMLMissingMappingsEvent.Action.IGNORE) { ignored.add(remap.name); } else { warned.add(remap.name); } } if (!warned.isEmpty()) { FMLLog.severe("This world contains block and item mappings that may cause world breakage"); return false; } else if (!ignored.isEmpty()) { FMLLog.fine("There were %d missing mappings that have been ignored", ignored.size()); } return true; } public static void freezeData() { FMLLog.fine("Freezing block and item id maps"); blockRegistry.freezeMap(); itemRegistry.freezeMap(); } public static void revertToFrozen() { FMLLog.fine("Reverting to frozen data state"); blockRegistry.revertToFrozen(); itemRegistry.revertToFrozen(); } }
package cz.esc.iot.cloudservice.oauth2; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.util.Random; import org.apache.commons.codec.binary.Hex; import org.json.JSONException; import org.restlet.data.Reference; import org.restlet.ext.oauth.AccessTokenClientResource; import org.restlet.ext.oauth.GrantType; import org.restlet.ext.oauth.OAuthException; import org.restlet.ext.oauth.OAuthParameters; import org.restlet.ext.oauth.internal.Token; import org.restlet.representation.Representation; import org.restlet.resource.ClientResource; import com.google.gson.Gson; import cz.esc.iot.cloudservice.persistance.dao.MorfiaSetUp; import cz.esc.iot.cloudservice.persistance.model.AccessToken; import cz.esc.iot.cloudservice.persistance.model.UserEntity; /** * Class for communication with authorisation and token servers. */ public class OAuth2 { public static String clientID; public static String clientSecret; /** * Sets Google's clientId and clientSecret. */ public static void setClientCredentials() { try { BufferedReader br = new BufferedReader(new FileReader(new File("/home/z3tt0r/google_client_credentials2"))); clientID = br.readLine(); clientSecret = br.readLine(); br.close(); } catch (IOException e) { e.printStackTrace(); } } /** * Find, whether user obtained from Google is registered in * Zettor's database. * @return Returns verified user. */ public static UserEntity findUserInDatabase(String access_token) { AccessToken token = MorfiaSetUp.getDatastore().createQuery(AccessToken.class).field("access_token").equal(access_token).get(); return token.getUser(); } /** * Ask for information about user. Uses received access token for it. * @return Returns information from Google. */ public static GoogleUserInfo getGoogleUserFromAccessToken(String accessToken) throws IOException { String uri = "https: ClientResource getter = new ClientResource(uri); Representation response = getter.get(); Gson gson = new Gson(); GoogleUserInfo user = null; user = gson.fromJson(response.getText(), GoogleUserInfo.class); return user; } /** * Asks Google for access token. Uses code, received as parameter, for it. * @return Returns valid access token. * @throws JSONException * @throws OAuthException */ public static Token exchangeCodeForAccessToken(String code) throws IOException, OAuthException, JSONException { AccessTokenClientResource client = new AccessTokenClientResource(new Reference("https://accounts.google.com/o/oauth2/token")); client.setClientCredentials(OAuth2.clientID, OAuth2.clientSecret); OAuthParameters params = new OAuthParameters(); params.code(code); params.grantType(GrantType.authorization_code); Token token = client.requestToken(params); return token; } public static GoogleUserInfo getGoogleUserInfoFromCode(String code) throws IOException, OAuthException, JSONException { // exchange code for access token Token token = OAuth2.exchangeCodeForAccessToken(code); String accessToken = token.getAccessToken(); // get info about user from IDP GoogleUserInfo googleUser = OAuth2.getGoogleUserFromAccessToken(accessToken); return googleUser; } public static CloudToken generateToken() { Random random = new Random(); byte[] accessToken = new byte[40]; byte[] refreshToken = new byte[40]; random.nextBytes(accessToken); random.nextBytes(refreshToken); return new CloudToken(String.valueOf(Hex.encodeHex(accessToken)), String.valueOf(Hex.encodeHex(refreshToken))); } }
package eu.amidst.core.header; import eu.amidst.core.database.statics.readers.Attribute; public final class VariableBuilder { private static String name; private static boolean observable; private static int numberOfStates; private static StateSpaceType stateSpaceType; private static DistType distributionType; public VariableBuilder(Attribute att){ this.name = att.getName(); this.observable = true; this.numberOfStates = att.getNumberOfStates(); this.stateSpaceType = att.getStateSpaceType(); switch (att.getStateSpaceType()) { case REAL: this.distributionType = DistType.GAUSSIAN; break; case MULTINOMIAL: this.distributionType = DistType.MULTINOMIAL; break; default: throw new IllegalArgumentException(" The string \"" + att.getStateSpaceType() + "\" does not map to any Type."); } } public VariableBuilder(Attribute att, DistType typeDist){ this.name = att.getName(); this.observable = true; this.numberOfStates = att.getNumberOfStates(); this.stateSpaceType = att.getStateSpaceType(); this.distributionType = typeDist; } public static String getName() { return name; } public static boolean isObservable() { return observable; } public static int getNumberOfStates() { return numberOfStates; } public static StateSpaceType getStateSpaceType() { return stateSpaceType; } public static DistType getDistributionType() { return distributionType; } }
package function.genotype.base; import global.Data; import utils.CommandValue; /** * * @author nick */ public class QualityManager { public static boolean isMafValid(double value) { if (CommandValue.isFlipMaf) { if (value >= CommandValue.maf) { return true; } } else { if (value <= CommandValue.maf) { return true; } } return false; } public static boolean isMaf4RecessiveValid(double value) { if (CommandValue.maf4Recessive == Data.NO_FILTER) { return true; } if (CommandValue.isFlipMaf) { if (value >= CommandValue.maf) { return true; } } else if (value <= CommandValue.maf4Recessive) { return true; } return false; } public static boolean isEvsMafValid(double value) { if (CommandValue.evsMaf == Data.NO_FILTER) { return true; } if (value <= CommandValue.evsMaf) { return true; } return false; } public static boolean isEvsMhgf4RecessiveValid(double value) { if (CommandValue.evsMhgf4Recessive == Data.NO_FILTER) { return true; } if (value <= CommandValue.evsMhgf4Recessive) { return true; } return false; } public static boolean isEvsStatusValid(String status) { if (CommandValue.isExcludeEvsQcFailed) { if (status.equalsIgnoreCase("NA") || status.equalsIgnoreCase("pass")) { return true; } else { return false; } } else { return true; } } public static boolean isExacMafValid(float value) { if (CommandValue.exacMaf == Data.NO_FILTER) { return true; } if (value <= CommandValue.exacMaf) { return true; } return false; } public static boolean isExacVqslodValid(float value, boolean isSnv) { if (isSnv) { return isExacVqslodSnvValid(value); } else { return isExacVqslodIndelValid(value); } } private static boolean isExacVqslodSnvValid(float value) { if (CommandValue.exacVqslodSnv == Data.NO_FILTER) { return true; } if (value >= CommandValue.exacVqslodSnv || value == Data.NA) { return true; } return false; } private static boolean isExacVqslodIndelValid(float value) { if (CommandValue.exacVqslodIndel == Data.NO_FILTER) { return true; } if (value >= CommandValue.exacVqslodIndel || value == Data.NA) { return true; } return false; } public static boolean isMhgf4RecessiveValid(double value) { if (CommandValue.mhgf4Recessive == Data.NO_FILTER) { return true; } if (value <= CommandValue.mhgf4Recessive) { return true; } return false; } public static boolean isCombFreqValid(double value) { if (CommandValue.combFreq == Data.NO_FILTER) { return true; } if (value <= CommandValue.combFreq) { return true; } return false; } public static boolean isLooCombFreqValid(double value) { if (CommandValue.looCombFreq == Data.NO_FILTER) { return true; } if (value <= CommandValue.looCombFreq) { return true; } return false; } public static boolean isMinCoverageValid(int value, int minCov) { if (minCov == Data.NO_FILTER) { return true; } if (value >= minCov) { return true; } return false; } public static boolean isMinVarPresentValid(int value) { if (CommandValue.minVarPresent == Data.NO_FILTER) { return true; } if (value >= CommandValue.minVarPresent) { return true; } return false; } public static boolean isMinCaseCarrierValid(int value) { if (CommandValue.minCaseCarrier == Data.NO_FILTER) { return true; } if (value >= CommandValue.minCaseCarrier) { return true; } return false; } public static boolean isMinHomCaseRecValid(int value) { if (CommandValue.minHomCaseRec == Data.NO_FILTER) { return true; } if (value >= CommandValue.minHomCaseRec) { return true; } return false; } public static boolean isVarStatusValid(String value) { if (CommandValue.varStatus == null) { // no filter or all return true; } if (value == null) { if (CommandValue.isQcMissingIncluded) { return true; } } else { for (String str : CommandValue.varStatus) { if (value.equals(str)) { return true; } } } return false; } public static boolean isGqValid(float value) { if (CommandValue.genotypeQualGQ == Data.NO_FILTER) { return true; } if (value == Data.NA) { if (CommandValue.isQcMissingIncluded) { return true; } } else { if (value >= CommandValue.genotypeQualGQ) { return true; } } return false; } public static boolean isFsValid(float value) { if (CommandValue.strandBiasFS == Data.NO_FILTER) { return true; } if (value == Data.NA) { if (CommandValue.isQcMissingIncluded) { return true; } } else { if (value <= CommandValue.strandBiasFS) { return true; } } return false; } public static boolean isHapScoreValid(float value) { if (CommandValue.haplotypeScore == Data.NO_FILTER) { return true; } if (value == Data.NA) { if (CommandValue.isQcMissingIncluded) { return true; } } else { if (value <= CommandValue.haplotypeScore) { return true; } } return false; } public static boolean isMqValid(float value) { if (CommandValue.rmsMapQualMQ == Data.NO_FILTER) { return true; } if (value == Data.NA) { if (CommandValue.isQcMissingIncluded) { return true; } } else { if (value >= CommandValue.rmsMapQualMQ) { return true; } } return false; } public static boolean isQdValid(float value) { if (CommandValue.qualByDepthQD == Data.NO_FILTER) { return true; } if (value == Data.NA) { if (CommandValue.isQcMissingIncluded) { return true; } } else { if (value >= CommandValue.qualByDepthQD) { return true; } } return false; } public static boolean isQualValid(float value) { if (CommandValue.qual == Data.NO_FILTER) { return true; } if (value == Data.NA) { if (CommandValue.isQcMissingIncluded) { return true; } } else { if (value >= CommandValue.qual) { return true; } } return false; } public static boolean isRprsValid(float value) { if (CommandValue.readPosRankSum == Data.NO_FILTER) { return true; } if (value == Data.NA) { if (CommandValue.isQcMissingIncluded) { return true; } } else { if (value >= CommandValue.readPosRankSum) { return true; } } return false; } public static boolean isMqrsValid(float value) { if (CommandValue.mapQualRankSum == Data.NO_FILTER) { return true; } if (value == Data.NA) { if (CommandValue.isQcMissingIncluded) { return true; } } else { if (value >= CommandValue.mapQualRankSum) { return true; } } return false; } public static boolean isHetPercentAltReadValid(double value) { if (CommandValue.hetPercentAltRead == null) { return true; } if (value != Data.NA) { if (value >= CommandValue.hetPercentAltRead[0] && value <= CommandValue.hetPercentAltRead[1]) { return true; } } return false; } public static boolean isHomPercentAltReadValid(double value) { if (CommandValue.homPercentAltRead == null) { return true; } if (value != Data.NA) { if (value >= CommandValue.homPercentAltRead[0] && value <= CommandValue.homPercentAltRead[1]) { return true; } } return false; } public static boolean isCscoreValid(float value) { if (value == Data.NA || CommandValue.minCscore == Data.NO_FILTER) { return true; } if (value >= CommandValue.minCscore) { return true; } return false; } public static boolean isChildQdValid(float value) { if (CommandValue.childQD == Data.NO_FILTER) { return true; } if (value == Data.NA) { if (CommandValue.isQcMissingIncluded) { return true; } } else { if (value >= CommandValue.childQD) { return true; } } return false; } public static boolean isChildHetPercentAltReadValid(double value) { if (CommandValue.childHetPercentAltRead == null) { return true; } if (value != Data.NA) { if (value >= CommandValue.childHetPercentAltRead[0] && value <= CommandValue.childHetPercentAltRead[1]) { return true; } } return false; } public static boolean isChildBinomialValid(double value) { if (CommandValue.minChildBinomial == Data.NO_FILTER) { return true; } if (value != Data.NA && value >= CommandValue.minChildBinomial) { return true; } return false; } public static boolean isParentBinomialValid(double value) { if (CommandValue.maxParentBinomial == Data.NO_FILTER) { return true; } if (value != Data.NA && value < CommandValue.maxParentBinomial) { return true; } return false; } public static boolean isMaxQcFailSampleValid(int value) { if (CommandValue.maxQcFailSample == Data.NO_FILTER) { return true; } if (value <= CommandValue.maxQcFailSample) { return true; } return false; } }
package ge.edu.freeuni.sdp.xo.rooms.data; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement public class Room { @XmlElement private int id; @XmlElement(nillable=true) private Integer x_user; @XmlElement(nillable=true) private Integer o_user; /** * @param id * @param xUserId * @param oUserId */ public Room(int id, Integer x_user, Integer o_user) { this.id = id; this.x_user = x_user; this.o_user = o_user; } /** * @return the id */ public int getId() { return id; } /** * @param id the id to set */ public void setId(int id) { this.id = id; } /** * @return the xUserId */ public int getxUserId() { return x_user; } /** * @param xUserId the xUserId to set */ public void setxUserId(Integer x_user) { this.x_user = x_user; } /** * @return the oUserId */ public int getoUserId() { return o_user; } /** * @param oUserId the oUserId to set */ public void setoUserId(Integer o_user) { this.o_user = o_user; } }
package io.sigpipe.sing.adapters; import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.util.HashSet; import java.util.Scanner; import java.util.Set; import java.util.zip.GZIPOutputStream; import io.sigpipe.sing.dataset.Metadata; import io.sigpipe.sing.dataset.analysis.Quantizer; import io.sigpipe.sing.dataset.feature.Feature; import io.sigpipe.sing.dataset.feature.FeatureType; import io.sigpipe.sing.graph.FeatureHierarchy; import io.sigpipe.sing.graph.Path; import io.sigpipe.sing.graph.Sketch; import io.sigpipe.sing.query.Expression; import io.sigpipe.sing.query.Operator; import io.sigpipe.sing.query.PartitionQuery; import io.sigpipe.sing.serialization.SerializationInputStream; import io.sigpipe.sing.serialization.SerializationOutputStream; import io.sigpipe.sing.serialization.Serializer; import io.sigpipe.sing.util.Geohash; import io.sigpipe.sing.util.PerformanceTimer; import io.sigpipe.sing.util.TestConfiguration; public class ReadMetaBlob { public static Set<String> activeFeatures = new HashSet<>(); public static void main(String[] args) throws Exception { for (String featureName : TestConfiguration.FEATURE_NAMES) { activeFeatures.add(featureName); } Scanner scan=new Scanner(System.in); scan.nextInt(); FeatureHierarchy fh = new FeatureHierarchy(); for (String featureName : TestConfiguration.FEATURE_NAMES) { System.out.println( TestConfiguration.quantizers.get(featureName).numTicks() + " " + featureName); fh.addFeature(featureName, FeatureType.FLOAT); } fh.addFeature("location", FeatureType.STRING); Sketch s = new Sketch(fh); loadData(args[0], s); System.gc(); System.gc(); System.out.println(); Runtime runtime = Runtime.getRuntime(); System.out.println("max=" + runtime.maxMemory()); System.out.println("total=" + runtime.totalMemory()); System.out.println("free=" + runtime.freeMemory()); System.out.println("used=" + (runtime.totalMemory() - runtime.freeMemory())); System.out.println("estimate=" + estimateMemoryUsage( s, s.getMetrics().getVertexCount(), s.getMetrics().getLeafCount())); //scan.nextInt(); PerformanceTimer info = new PerformanceTimer("info"); info.start(); System.out.println(s.getRoot().numLeaves()); System.out.println(s.getRoot().numDescendants()); System.out.println(s.getRoot().numDescendantEdges()); info.stopAndPrint(); System.out.println(s.getMetrics()); SerializationOutputStream out = new SerializationOutputStream( new GZIPOutputStream( new FileOutputStream(new File("d.bin")))); //s.getRoot().find(new Feature("location", 319920l)); PartitionQuery pq = new PartitionQuery(); // rq.addExpression( // new Expression( // Operator.RANGE_INC, new Feature("temperature_surface", 260.0f), new Feature(300.0f))); String removePrefix = "d"; pq.addExpression( new Expression( Operator.STR_PREFIX, new Feature("location", removePrefix))); PerformanceTimer exec = new PerformanceTimer("exec"); exec.start(); pq.execute(s.getRoot()); pq.serializeResults(s.getRoot(), out); out.close(); exec.stopAndPrint(); s.geoTrie.remove(removePrefix); System.gc(); System.gc(); System.out.println(); System.out.println("max=" + runtime.maxMemory()); System.out.println("total=" + runtime.totalMemory()); System.out.println("free=" + runtime.freeMemory()); System.out.println("used=" + (runtime.totalMemory() - runtime.freeMemory())); System.out.println("estimate=" + estimateMemoryUsage( s, s.getMetrics().getVertexCount(), s.getMetrics().getLeafCount())); info.start(); System.out.println(s.getRoot().numLeaves()); System.out.println(s.getRoot().numDescendants()); System.out.println(s.getRoot().numDescendantEdges()); info.stopAndPrint(); //scan.nextInt(); System.out.println(s.getMetrics()); System.out.println(s.geoTrie.query("dj").b); loadData(args[1], s); System.gc(); System.gc(); System.out.println(); System.out.println("max=" + runtime.maxMemory()); System.out.println("total=" + runtime.totalMemory()); System.out.println("free=" + runtime.freeMemory()); System.out.println("used=" + (runtime.totalMemory() - runtime.freeMemory())); System.out.println("estimate=" + estimateMemoryUsage( s, s.getMetrics().getVertexCount(), s.getMetrics().getLeafCount())); } public static void loadData(String fileName, Sketch s) throws Exception { System.out.println("Reading metadata blob: " + fileName); FileInputStream fIn = new FileInputStream(fileName); BufferedInputStream bIn = new BufferedInputStream(fIn); SerializationInputStream in = new SerializationInputStream(bIn); int num = in.readInt(); System.out.println("Records: " + num); PerformanceTimer addAllPaths = new PerformanceTimer("addAllPaths"); addAllPaths.start(); for (int i = 0; i < num; ++i) { float lat = in.readFloat(); float lon = in.readFloat(); byte[] payload = in.readField(); Metadata m = Serializer.deserialize(Metadata.class, payload); Path p = new Path(activeFeatures.size() + 1); for (Feature f : m.getAttributes()) { String featureName = f.getName(); if (activeFeatures.contains(featureName) == false) { continue; } Quantizer q = TestConfiguration.quantizers.get(featureName); if (q == null) { continue; } Feature quantizedFeature = q.quantize(f); p.add(new Feature(f.getName().intern(), quantizedFeature)); } String location = Geohash.encode(lat, lon, 4); p.add(new Feature("location", location)); s.addPath(p); if (i % 5000 == 0) { System.out.print('.'); } } System.out.println(); addAllPaths.stopAndPrint(); in.close(); } private static double estimateMemoryUsage( Sketch sketch, long vertices, long leaves) { int bytesPerVertex = 16; int numFeatures = sketch.getFeatureHierarchy().size(); int bytesPerLeaf = 8 + (8 * numFeatures * 4) + (8 * ((numFeatures * (numFeatures - 1)) / 2)); return ((bytesPerVertex * vertices) + (bytesPerLeaf * leaves)) * 1.7; } }
package ir.angellandros.kollektion; import java.util.Collection; import java.util.Iterator; /** * * @author Muhammad-Ali A'rabi <A HREF="me@angellandros.ir"> me@angellandros.ir * </A> * * @param <E> * * @version June 04, 2015 */ public class LinkedList<E> implements Iterator<E> { private Node<E> head; private Node<E> current; public LinkedList() { head = null; current = head; } public LinkedList(Collection<E> c) { Node<E> last = null; for (E e : c) { if (last == null) { head = new Node<E>(e, null, null); last = head; } else { Node<E> current = new Node<E>(e, null, last); last.setNext(current); last = current; } } restart(); } public LinkedList(Iterator<E> c) { Node<E> last = null; while (c.hasNext()) { E e = c.next(); if (last == null) { head = new Node<E>(e, null, null); last = head; } else { Node<E> current = new Node<E>(e, null, last); last.setNext(current); last = current; } } restart(); } public LinkedList<E> clone() { return new LinkedList<E>(this); } @Override public boolean hasNext() { return (current != null && current.hasNext()); } @Override public E next() { current = current.getNext(); return current.getData(); } public boolean hasPrevious() { return (current != null && !current.isFirst()); } public E previous() { current = current.getPrevious(); return current.getData(); } /** * check whether there is a valid current node * * @return true if current node exists and is valid, false otherwise */ public boolean hasCurrent() { return (current != null && current.getData() != null); } /** * @return the data of current node */ public E current() { return current.getData(); } /** * remove current node */ @Override public void remove() { try { current.getPrevious().setNext(current.getNext()); } catch (NullPointerException e) { // no previous, eh? so, you are the head head = head.getNext(); } try { current.getNext().setPrevious(current.getPrevious()); } catch (NullPointerException e) { // no next, eh? } current = current.getPrevious(); } /** * replace the current node with another new one * @param data */ public void replace(E data) { current.setData(data); } /** * insert a node after the current node, and move cursor to it * * @param oth2 * data of new node */ public void insert(E data) { Node<E> node; try { node = new Node<E>(data, current.getNext(), current); current.setNext(node); } catch(NullPointerException e) { // no current? hence an empty list node = new Node<E>(data, null, null); head = node; } try { current.getNext().setPrevious(node); } catch (NullPointerException e) { // dear boy is the last node } current = node; } /** * move the cursor the very beginning of linked list, before the head. use * next() to get data of head after this method is called. */ public void restart() { current = new Node<E>(null, head, null); } public String toString() { StringBuffer toReturn = new StringBuffer(); Node<E> it = new Node<E>(null, head, null); while (it.hasNext()) { if (it.getData() != null) { toReturn.append(", "); } it = it.getNext(); toReturn.append(it.getData().toString()); } return toReturn.toString(); } }
package jp.kotmw.splatoon.mainweapons; import java.util.Random; import org.bukkit.Bukkit; import org.bukkit.ChatColor; import org.bukkit.entity.EntityType; import org.bukkit.entity.Player; import org.bukkit.entity.Snowball; import org.bukkit.event.EventHandler; import org.bukkit.event.Listener; import org.bukkit.event.block.Action; import org.bukkit.event.entity.EntityDamageByEntityEvent; import org.bukkit.event.entity.ProjectileHitEvent; import org.bukkit.event.player.PlayerInteractEvent; import org.bukkit.inventory.ItemStack; import org.bukkit.scheduler.BukkitRunnable; import org.bukkit.util.Vector; import jp.kotmw.splatoon.Main; import jp.kotmw.splatoon.gamedatas.DataStore; import jp.kotmw.splatoon.gamedatas.DataStore.WeaponType; import jp.kotmw.splatoon.gamedatas.PlayerData; import jp.kotmw.splatoon.gamedatas.WeaponData; import jp.kotmw.splatoon.maingame.MainGame; import jp.kotmw.splatoon.mainweapons.threads.ShooterRunnable; import jp.kotmw.splatoon.manager.Paint; public class Shooter implements Listener { @EventHandler public void onInteract(PlayerInteractEvent e) { if(!DataStore.hasPlayerData(e.getPlayer().getName())) return; if(DataStore.getPlayerData(e.getPlayer().getName()).getArena() == null) return; Action action = e.getAction(); if(action == Action.LEFT_CLICK_AIR || action == Action.LEFT_CLICK_BLOCK || action == Action.PHYSICAL) return; Player player = e.getPlayer(); ItemStack item = player.getInventory().getItemInMainHand(); PlayerData data = DataStore.getPlayerData(player.getName()); if(DataStore.getWeapondata(data.getWeapon()).getType() != WeaponType.Shooter) return; if(data.isAllCancel() || item == null || item.getType() != DataStore.getWeapondata(data.getWeapon()).getItemtype() || !item.hasItemMeta() || item.getItemMeta().getLore().size() < 5 || !item.getItemMeta().getDisplayName().equalsIgnoreCase(data.getWeapon())) return; WeaponData weapondata = DataStore.getWeapondata(data.getWeapon()); if(player.getExp() < weapondata.getCost()) { MainGame.sendTitle(data, 0, 5, 0, " ", ChatColor.RED+"!"); return; } int tick = 1; if(weapondata.getFirespeed() < 5) tick=tick+(5-weapondata.getFirespeed()); if(data.getTask() == null) { BukkitRunnable task = new ShooterRunnable(player.getName()); task.runTaskTimer(Main.main, 0, weapondata.getFirespeed()); data.setTask(task); } data.setTick(tick); } @EventHandler public void onHit(ProjectileHitEvent e) { if(!(e.getEntity() instanceof Snowball) || !(e.getEntity().getShooter() instanceof Player)) return; Player player = (Player) e.getEntity().getShooter(); if(!DataStore.hasPlayerData(player.getName())) return; if(DataStore.getPlayerData(player.getName()).getArena() == null) return; PlayerData data = DataStore.getPlayerData(player.getName()); if(DataStore.getWeapondata(data.getWeapon()).getType() != WeaponType.Shooter) return; Paint.SpherePaint(e.getEntity().getLocation(), DataStore.getWeapondata(data.getWeapon()).getRadius(), data); } @EventHandler public void onDamage(EntityDamageByEntityEvent e) { if(e.getDamager() instanceof Snowball && DataStore.hasPlayerData(e.getEntity().getName())) { Snowball ball = (Snowball) e.getDamager(); if(!(ball.getShooter() instanceof Player)) return; if(!(e.getEntity() instanceof Player)) return; Player player = (Player) e.getEntity(), shooter = (Player) ball.getShooter(); if(!DataStore.hasPlayerData(shooter.getName()) || player.getName() == shooter.getName() || DataStore.getPlayerData(player.getName()).getTeamid() == DataStore.getPlayerData(shooter.getName()).getTeamid()) return; WeaponData data = DataStore.getWeapondata(DataStore.getPlayerData(shooter.getName()).getWeapon()); if(data.getType() != WeaponType.Shooter) return; e.setDamage(data.getDamage()); } } @EventHandler public void onArmorstanddamage(EntityDamageByEntityEvent e) { if(e.getEntity().getType() != EntityType.ARMOR_STAND || !(e.getDamager() instanceof Snowball)) return; Snowball ball = (Snowball) e.getDamager(); if(!(ball.getShooter() instanceof Player) || !DataStore.hasPlayerData(((Player)ball.getShooter()).getName())) return; e.setCancelled(true); } public static void shoot(PlayerData data) { Player player = Bukkit.getPlayer(data.getName()); WeaponData weapon = DataStore.getWeapondata(data.getWeapon()); player.setExp((float) (player.getExp()-weapon.getCost())); Paint.SpherePaint(player.getLocation(), DataStore.getWeapondata(data.getWeapon()).getRadius(), data); Random random = new Random(); int angle = weapon.getAngle()*100; double x = Math.toRadians((random.nextInt(angle)/100)-((weapon.getAngle()-1)/2)); double z = Math.toRadians((random.nextInt(angle)/100)-((weapon.getAngle()-1)/2)); Vector direction = player.getLocation().getDirection().clone(); MainGame.sync(() -> { Snowball snowball = player.launchProjectile(Snowball.class); Vector vec = new Vector(x,0,z), vec2 = new Vector(direction.getX()*0.75, direction.getY()*0.75, direction.getZ()*0.75); vec2.add(vec); snowball.setVelocity(vec2); }); } }
package me.jan.invtest; import org.bukkit.Server; import org.bukkit.event.EventHandler; import org.bukkit.event.HandlerList; import org.bukkit.event.Listener; import org.bukkit.event.inventory.InventoryClickEvent; import org.bukkit.event.inventory.InventoryCloseEvent; import org.bukkit.event.inventory.InventoryType; import org.bukkit.inventory.Inventory; import org.bukkit.inventory.InventoryHolder; import org.bukkit.plugin.Plugin; public class OpenInvInventoryHolder implements InventoryHolder, Listener { private final Server server; private final Inventory inventory; public OpenInvInventoryHolder(Plugin plugin) { this.server = plugin.getServer(); this.inventory = server.createInventory(this, InventoryType.DISPENSER, "foo bar"); server.getPluginManager().registerEvents(this, plugin); } @Override public Inventory getInventory() { return inventory; } @EventHandler public void onInventoryClick(InventoryClickEvent event) { server.getLogger().info("Click event!"); server.getLogger().info("click type = " + event.getClick()); server.getLogger().info("clicked inventory = " + event.getClickedInventory()); InventoryHolder holder = event.getView().getTopInventory().getHolder(); if (holder == this) { server.getLogger().info("(holder == this) holds true"); } //from this point server seems to deadlock upon shiftclicking an item into the dispenser inventory. //cannot reproduce with a chest inventory. } @EventHandler public void onInventoryClose(InventoryCloseEvent event) { server.getLogger().info("Close event!"); InventoryHolder holder = event.getView().getTopInventory().getHolder(); if (holder == this) { server.getLogger().info("(holder == this) holds true"); HandlerList.unregisterAll(this); server.getLogger().info("unregistered this listener"); } } }
package mx.nhtzr.osgiee.web; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.map.ObjectWriter; import org.springframework.stereotype.Controller; import javax.ws.rs.Consumes; import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.Path; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MultivaluedMap; import java.io.IOException; @Controller @Path("/") public class WelcomeController { private static final Log logger = LogFactory.getLog(WelcomeController.class); private ObjectWriter writer = new ObjectMapper().writerWithDefaultPrettyPrinter(); @POST @Consumes(value = MediaType.APPLICATION_FORM_URLENCODED) public String postHandler(MultivaluedMap<String, String> params ) throws IOException { logger.info("params = " + writer.writeValueAsString(params)); return params.getFirst("body"); } @GET public String main() { return "Holi"; } }
package net.krazyweb.cataclysm.mapeditor; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import net.krazyweb.util.FileUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; public class Tile { private static final Logger log = LogManager.getLogger(Tile.class); private static List<Tile> tiles = new ArrayList<>(); public String id; public boolean connectsToWalls = false; private Tile(final String id, final boolean connectsToWalls) { this.id = id; this.connectsToWalls = connectsToWalls; } public static Tile get(final String tileId) { for (Tile tile : tiles) { if (tile.id.equals(tileId)) { return tile; } } return null; } public static List<Tile> getAll() { return tiles; } public static void loadTiles() { Path gameFolder = ApplicationSettings.getInstance().getPath(ApplicationSettings.Preference.GAME_FOLDER); try { load(gameFolder.resolve(Paths.get("data", "json"))); } catch (IOException e) { log.error("Error while loading terrain and furniture definitions:", e); } } private static void load(final Path path) throws IOException { FileUtils.listFiles(path).stream().filter(file -> file.getFileName().toString().endsWith(".json")).forEach(file -> { log.info("Loading tiles from: '" + file + "'"); try { JsonNode root = new ObjectMapper().readTree(file.toFile()); root.forEach(node -> { if (node.get("type").asText().equals("terrain") || node.get("type").asText().equals("furniture")) { boolean connectsToWalls = false; if (node.has("flags")) { for (JsonNode flag : node.get("flags")) { String parsedFlag = flag.asText().replaceAll("\"", ""); if (parsedFlag.equals("CONNECT_TO_WALL") || parsedFlag.equals("WALL")) { log.trace("Connects to Walls: " + node.get("id").asText()); connectsToWalls = true; break; } } } tiles.add(new Tile(node.get("id").asText(), connectsToWalls)); log.trace("Loaded tile: '" + node.get("id").asText() + "'"); } }); } catch (IOException e) { e.printStackTrace(); } }); tiles.sort((tile1, tile2) -> { if (tile1.id.startsWith("t_") && tile2.id.startsWith("f_")) { return -1; } else if (tile1.id.startsWith("f_") && tile2.id.startsWith("t_")) { return 1; } return 0; }); } }
package net.snowflake.client.core; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import net.snowflake.client.jdbc.ErrorCode; import net.snowflake.client.jdbc.SnowflakeDriver; import net.snowflake.client.jdbc.SnowflakeSQLException; import net.snowflake.client.jdbc.SnowflakeType; import net.snowflake.client.jdbc.SnowflakeUtil; import net.snowflake.client.log.SFLogger; import net.snowflake.client.log.SFLoggerFactory; import net.snowflake.common.core.ClientAuthnDTO; import net.snowflake.common.core.ClientAuthnParameter; import net.snowflake.common.core.SqlState; import org.apache.http.HttpHeaders; import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.utils.URIBuilder; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.SystemDefaultHttpClient; import org.apache.http.message.BasicHeader; import org.apache.http.message.HeaderGroup; import org.apache.http.params.BasicHttpParams; import org.apache.http.params.HttpConnectionParams; import org.apache.http.params.HttpParams; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.select.Elements; import java.io.IOException; import java.net.MalformedURLException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.nio.charset.Charset; import java.security.PrivateKey; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.UUID; public class SessionUtil { public static final String SF_QUERY_DATABASE = "databaseName"; public static final String SF_QUERY_SCHEMA = "schemaName"; public static final String SF_QUERY_WAREHOUSE = "warehouse"; public static final String SF_QUERY_ROLE = "roleName"; public static final String SF_QUERY_REQUEST_ID = "requestId"; public static final String SF_PATH_AUTHENTICATOR_REQUEST = "/session/authenticator-request"; private static final String SF_PATH_LOGIN_REQUEST = "/session/v1/login-request"; private static final String SF_PATH_TOKEN_REQUEST = "/session/token-request"; public static final String SF_QUERY_SESSION_DELETE = "delete"; private static final String SF_PATH_SESSION = "/session"; private static ObjectMapper mapper = new ObjectMapper(); public static final String SF_HEADER_AUTHORIZATION = HttpHeaders.AUTHORIZATION; public static final String SF_HEADER_BASIC_AUTHTYPE = "Basic"; public static final String SF_HEADER_SNOWFLAKE_AUTHTYPE = "Snowflake"; public static final String SF_HEADER_TOKEN_TAG = "Token"; private static int DEFAULT_HTTP_CLIENT_CONNECTION_TIMEOUT = 60000; // millisec private static int DEFAULT_HTTP_CLIENT_SOCKET_TIMEOUT = 300000; // millisec private static int DEFAULT_HEALTH_CHECK_INTERVAL = 45; // sec static final SFLogger logger = SFLoggerFactory.getLogger(SessionUtil.class); private static Set<String> STRING_PARAMS = new HashSet<>(Arrays.asList( "TIMEZONE", "TIMESTAMP_OUTPUT_FORMAT", "TIMESTAMP_NTZ_OUTPUT_FORMAT", "TIMESTAMP_LTZ_OUTPUT_FORMAT", "TIMESTAMP_TZ_OUTPUT_FORMAT", "DATE_OUTPUT_FORMAT", "TIME_OUTPUT_FORMAT", "BINARY_OUTPUT_FORMAT", "CLIENT_TIMESTAMP_TYPE_MAPPING")); private static Set<String> INT_PARAMS = new HashSet<>(Arrays.asList( "CLIENT_RESULT_PREFETCH_SLOTS", "CLIENT_RESULT_PREFETCH_THREADS", "CLIENT_PREFETCH_THREADS", "CLIENT_MEMORY_LIMIT")); private static Set<String> BOOLEAN_PARAMS = new HashSet<>(Arrays.asList( "CLIENT_HONOR_CLIENT_TZ_FOR_TIMESTAMP_NTZ", "JDBC_EXECUTE_RETURN_COUNT_FOR_DML", "CLIENT_DISABLE_INCIDENTS", "CLIENT_SESSION_KEEP_ALIVE", "JDBC_USE_JSON_PARSER", "AUTOCOMMIT", "JDBC_EFFICIENT_CHUNK_STORAGE", "JDBC_RS_COLUMN_CASE_INSENSITIVE", "CLIENT_METADATA_REQUEST_USE_CONNECTION_CTX", "JDBC_TREAT_DECIMAL_AS_INT", "JDBC_ENABLE_COMBINED_DESCRIBE")); /** * A class for holding all information required for login */ public static class LoginInput { private String serverUrl; private String databaseName; private String schemaName; private String warehouse; private String role; private String authenticator; private HttpClient httpClient; private String accountName; private int loginTimeout = -1; // default is invalid private String userName; private String password; private Properties clientInfo; private boolean passcodeInPassword; private String passcode; private String token; private int connectionTimeout = DEFAULT_HTTP_CLIENT_CONNECTION_TIMEOUT; private int socketTimeout = DEFAULT_HTTP_CLIENT_SOCKET_TIMEOUT; private String appId; private String appVersion; private String sessionToken; private String masterToken; private Map<String, Object> sessionParameters; private PrivateKey privateKey; public LoginInput() { } public LoginInput setServerUrl(String serverUrl) { this.serverUrl = serverUrl; return this; } public LoginInput setDatabaseName(String databaseName) { this.databaseName = databaseName; return this; } public LoginInput setSchemaName(String schemaName) { this.schemaName = schemaName; return this; } public LoginInput setWarehouse(String warehouse) { this.warehouse = warehouse; return this; } public LoginInput setRole(String role) { this.role = role; return this; } public LoginInput setAuthenticator(String authenticator) { this.authenticator = authenticator; return this; } public LoginInput setHttpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } public LoginInput setAccountName(String accountName) { this.accountName = accountName; return this; } public LoginInput setLoginTimeout(int loginTimeout) { this.loginTimeout = loginTimeout; return this; } public LoginInput setUserName(String userName) { this.userName = userName; return this; } public LoginInput setPassword(String password) { this.password = password; return this; } public LoginInput setToken(String token) { this.token = token; return this; } public LoginInput setClientInfo(Properties clientInfo) { this.clientInfo = clientInfo; return this; } public LoginInput setPasscodeInPassword(boolean passcodeInPassword) { this.passcodeInPassword = passcodeInPassword; return this; } public LoginInput setPasscode(String passcode) { this.passcode = passcode; return this; } public LoginInput setConnectionTimeout(int connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } public LoginInput setSocketTimeout(int socketTimeout) { this.socketTimeout = socketTimeout; return this; } public LoginInput setAppId(String appId) { this.appId = appId; return this; } public LoginInput setAppVersion(String appVersion) { this.appVersion = appVersion; return this; } public LoginInput setSessionToken(String sessionToken) { this.sessionToken = sessionToken; return this; } public LoginInput setMasterToken(String masterToken) { this.masterToken = masterToken; return this; } public LoginInput setSessionParameters(Map<String, Object> sessionParameters) { this.sessionParameters = sessionParameters; return this; } public LoginInput setPrivateKey(PrivateKey privateKey) { this.privateKey = privateKey; return this; } public HttpClient getHttpClient() { return httpClient; } public String getServerUrl() { return serverUrl; } public String getDatabaseName() { return databaseName; } public String getSchemaName() { return schemaName; } public String getWarehouse() { return warehouse; } public String getRole() { return role; } public String getAuthenticator() { return authenticator; } public String getAccountName() { return accountName; } public int getLoginTimeout() { return loginTimeout; } public String getUserName() { return userName; } public String getPassword() { return password; } public Properties getClientInfo() { return clientInfo; } public String getPasscode() { return passcode; } public String getToken() { return token; } public int getConnectionTimeout() { return connectionTimeout; } public int getSocketTimeout() { return socketTimeout; } public boolean isPasscodeInPassword() { return passcodeInPassword; } public String getAppId() { return appId; } public String getAppVersion() { return appVersion; } public String getSessionToken() { return sessionToken; } public String getMasterToken() { return masterToken; } public Map<String, Object> getSessionParameters() { return sessionParameters; } public PrivateKey getPrivateKey() { return privateKey; } } /** * Login output information including session tokens, database versions */ public static class LoginOutput { String sessionToken; String masterToken; long masterTokenValidityInSeconds; String remMeToken; String databaseVersion; int databaseMajorVersion; int databaseMinorVersion; String newClientForUpgrade; int healthCheckInterval; int httpClientSocketTimeout; String sessionDatabase; String sessionSchema; String sessionRole; Map<String, Object> commonParams; public LoginOutput() { } ; public LoginOutput(String sessionToken, String masterToken, long masterTokenValidityInSeconds, String remMeToken, String databaseVersion, int databaseMajorVersion, int databaseMinorVersion, String newClientForUpgrade, int healthCheckInterval, int httpClientSocketTimeout, String sessionDatabase, String sessionSchema, String sessionRole, Map<String, Object> commonParams) { this.sessionToken = sessionToken; this.masterToken = masterToken; this.remMeToken = remMeToken; this.databaseVersion = databaseVersion; this.databaseMajorVersion = databaseMajorVersion; this.databaseMinorVersion = databaseMinorVersion; this.newClientForUpgrade = newClientForUpgrade; this.healthCheckInterval = healthCheckInterval; this.httpClientSocketTimeout = httpClientSocketTimeout; this.sessionDatabase = sessionDatabase; this.sessionSchema = sessionSchema; this.sessionRole = sessionRole; this.commonParams = commonParams; this.masterTokenValidityInSeconds = masterTokenValidityInSeconds; } public LoginOutput setSessionToken(String sessionToken) { this.sessionToken = sessionToken; return this; } public LoginOutput setMasterToken(String masterToken) { this.masterToken = masterToken; return this; } public LoginOutput setRemMeToken(String remMeToken) { this.remMeToken = remMeToken; return this; } public LoginOutput setDatabaseVersion(String databaseVersion) { this.databaseVersion = databaseVersion; return this; } public LoginOutput setDatabaseMajorVersion(int databaseMajorVersion) { this.databaseMajorVersion = databaseMajorVersion; return this; } public LoginOutput setDatabaseMinorVersion(int databaseMinorVersion) { this.databaseMinorVersion = databaseMinorVersion; return this; } public LoginOutput setNewClientForUpgrade(String newClientForUpgrade) { this.newClientForUpgrade = newClientForUpgrade; return this; } public LoginOutput setHealthCheckInterval(int healthCheckInterval) { this.healthCheckInterval = healthCheckInterval; return this; } public LoginOutput setHttpClientSocketTimeout(int httpClientSocketTimeout) { this.httpClientSocketTimeout = httpClientSocketTimeout; return this; } public LoginOutput setCommonParams(Map<String, Object> commonParams) { this.commonParams = commonParams; return this; } public String getSessionToken() { return sessionToken; } public String getMasterToken() { return masterToken; } public String getRemMeToken() { return remMeToken; } public String getDatabaseVersion() { return databaseVersion; } public int getDatabaseMajorVersion() { return databaseMajorVersion; } public int getDatabaseMinorVersion() { return databaseMinorVersion; } public String getNewClientForUpgrade() { return newClientForUpgrade; } public int getHealthCheckInterval() { return healthCheckInterval; } public int getHttpClientSocketTimeout() { return httpClientSocketTimeout; } public Map<String, Object> getCommonParams() { return commonParams; } public String getSessionDatabase() { return sessionDatabase; } public void setSessionDatabase(String sessionDatabase) { this.sessionDatabase = sessionDatabase; } public String getSessionSchema() { return sessionSchema; } public void setSessionSchema(String sessionSchema) { this.sessionSchema = sessionSchema; } public String getSessionRole() { return sessionRole; } public long getMasterTokenValidityInSeconds() { return masterTokenValidityInSeconds; } } /** * Returns Authenticator type * * @param loginInput login information * @return Authenticator type */ static private ClientAuthnDTO.AuthenticatorType getAuthenticator( LoginInput loginInput) { if (loginInput.getAuthenticator() != null) { if (loginInput.getAuthenticator().equalsIgnoreCase( ClientAuthnDTO.AuthenticatorType.EXTERNALBROWSER.name())) { // SAML 2.0 compliant service/application return ClientAuthnDTO.AuthenticatorType.EXTERNALBROWSER; } else if (loginInput.getAuthenticator().equalsIgnoreCase( ClientAuthnDTO.AuthenticatorType.OAUTH.name())) { // OAuth Authentication return ClientAuthnDTO.AuthenticatorType.OAUTH; } else if (loginInput.getAuthenticator().equalsIgnoreCase( ClientAuthnDTO.AuthenticatorType.SNOWFLAKE_JWT.name())) { return ClientAuthnDTO.AuthenticatorType.SNOWFLAKE_JWT; } else if (!loginInput.getAuthenticator().equalsIgnoreCase( ClientAuthnDTO.AuthenticatorType.SNOWFLAKE.name())) { // OKTA authenticator v1. This will be deprecated once externalbrowser // is in production. return ClientAuthnDTO.AuthenticatorType.OKTA; } } // authenticator is null, then jdbc will decide authenticator depends on // if privateKey is specified or not. If yes, authenticator type will be // SNOWFLAKE_JWT, otherwise it will use SNOWFLAKE. return loginInput.getPrivateKey() != null ? ClientAuthnDTO.AuthenticatorType.SNOWFLAKE_JWT : ClientAuthnDTO.AuthenticatorType.SNOWFLAKE; } /** * Open a new session * * @param loginInput login information * @return information get after login such as token information * @throws SFException if unexpected uri syntax * @throws SnowflakeSQLException if failed to establish connection with snowflake */ static public LoginOutput openSession(LoginInput loginInput) throws SFException, SnowflakeSQLException { AssertUtil.assertTrue(loginInput.getServerUrl() != null, "missing server URL for opening session"); AssertUtil.assertTrue(loginInput.getUserName() != null, "missing user name for opening session"); AssertUtil.assertTrue(loginInput.getAppId() != null, "missing app id for opening session"); AssertUtil.assertTrue(loginInput.getHttpClient() != null, "missing http client for opening session"); AssertUtil.assertTrue(loginInput.getLoginTimeout() >= 0, "negative login timeout for opening session"); // build URL for login request URIBuilder uriBuilder; URI loginURI; String tokenOrSamlResponse = null; String samlProofKey = null; HttpClient httpClient; String sessionToken; String masterToken; String sessionDatabase; String sessionSchema; String sessionRole; long masterTokenValidityInSeconds; String remMeToken; String databaseVersion = null; int databaseMajorVersion = 0; int databaseMinorVersion = 0; String newClientForUpgrade = null; int healthCheckInterval = DEFAULT_HEALTH_CHECK_INTERVAL; int httpClientSocketTimeout = loginInput.getSocketTimeout(); Map<String, Object> commonParams; final ClientAuthnDTO.AuthenticatorType authenticator = getAuthenticator( loginInput); try { uriBuilder = new URIBuilder(loginInput.getServerUrl()); // add database name and schema name as query parameters if (loginInput.getDatabaseName() != null) { uriBuilder.addParameter(SF_QUERY_DATABASE, loginInput.getDatabaseName()); } if (loginInput.getSchemaName() != null) { uriBuilder.addParameter(SF_QUERY_SCHEMA, loginInput.getSchemaName()); } if (loginInput.getWarehouse() != null) { uriBuilder.addParameter(SF_QUERY_WAREHOUSE, loginInput.getWarehouse()); } if (loginInput.getRole() != null) { uriBuilder.addParameter(SF_QUERY_ROLE, loginInput.getRole()); } if (authenticator == ClientAuthnDTO.AuthenticatorType.EXTERNALBROWSER) { // SAML 2.0 compliant service/application SessionUtilExternalBrowser s = new SessionUtilExternalBrowser( loginInput); s.authenticate(); tokenOrSamlResponse = s.getToken(); samlProofKey = s.getProofKey(); } else if (authenticator == ClientAuthnDTO.AuthenticatorType.OKTA) { // okta authenticator v1 tokenOrSamlResponse = getSamlResponseUsingOkta(loginInput); } else if (authenticator == ClientAuthnDTO.AuthenticatorType.SNOWFLAKE_JWT) { SessionUtilKeyPair s = new SessionUtilKeyPair(loginInput.getPrivateKey(), loginInput.getAccountName(), loginInput.getUserName()); loginInput.setToken(s.issueJwtToken()); } uriBuilder.addParameter(SF_QUERY_REQUEST_ID, UUID.randomUUID().toString()); uriBuilder.setPath(SF_PATH_LOGIN_REQUEST); loginURI = uriBuilder.build(); } catch (URISyntaxException ex) { logger.error("Exception when building URL", ex); throw new SFException(ex, ErrorCode.INTERNAL_ERROR, "unexpected URI syntax exception:1"); } httpClient = loginInput.getHttpClient(); HttpPost postRequest = null; try { ClientAuthnDTO authnData = new ClientAuthnDTO(); Map<String, Object> data = new HashMap<String, Object>(); data.put(ClientAuthnParameter.CLIENT_APP_ID.name(), loginInput.getAppId()); /* * username is always included regardless of authenticator to identify * the user. */ data.put(ClientAuthnParameter.LOGIN_NAME.name(), loginInput.getUserName()); /* * only include password information in the request to GS if federated * authentication method is not specified. * When specified, this password information is really to be used to * authenticate with the IDP provider only, and GS should not have any * trace for this information. */ if (authenticator == ClientAuthnDTO.AuthenticatorType.SNOWFLAKE) { data.put(ClientAuthnParameter.PASSWORD.name(), loginInput.getPassword()); } else if (authenticator == ClientAuthnDTO.AuthenticatorType.EXTERNALBROWSER) { data.put(ClientAuthnParameter.AUTHENTICATOR.name(), ClientAuthnDTO.AuthenticatorType.EXTERNALBROWSER.name()); data.put(ClientAuthnParameter.PROOF_KEY.name(), samlProofKey); data.put(ClientAuthnParameter.TOKEN.name(), tokenOrSamlResponse); } else if (authenticator == ClientAuthnDTO.AuthenticatorType.OKTA) { data.put(ClientAuthnParameter.RAW_SAML_RESPONSE.name(), tokenOrSamlResponse); } else if (authenticator == ClientAuthnDTO.AuthenticatorType.OAUTH || authenticator == ClientAuthnDTO.AuthenticatorType.SNOWFLAKE_JWT) { data.put(ClientAuthnParameter.AUTHENTICATOR.name(), authenticator.name()); data.put(ClientAuthnParameter.TOKEN.name(), loginInput.getToken()); } Map<String, Object> clientEnv = new HashMap<String, Object>(); clientEnv.put("OS", System.getProperty("os.name")); clientEnv.put("OS_VERSION", System.getProperty("os.version")); clientEnv.put("JAVA_VERSION", System.getProperty("java.version")); clientEnv.put("JAVA_RUNTIME", System.getProperty("java.runtime.name")); clientEnv.put("JAVA_VM", System.getProperty("java.vm.name")); // SNOW-15780: find out if application has set // -Dcom.sun.security.enableCRLDP=true and // -Dcom.sun.net.ssl.checkRevocation=true boolean CRLEnabled = SessionUtil.checkCRLSystemProperty(); clientEnv.put("CRL_ENABLED", CRLEnabled); // When you add new client environment info, please add new keys to // messages_en_US.src.json so that they can be displayed properly in UI // detect app name String appName = System.getProperty("sun.java.command"); // remove the arguments if (appName != null) { if (appName.indexOf(" ") > 0) appName = appName.substring(0, appName.indexOf(" ")); clientEnv.put("APPLICATION", appName); } // add properties from client info Properties clientInfo = loginInput.getClientInfo(); if (clientInfo != null) for (Map.Entry property : clientInfo.entrySet()) { if (property != null && property.getKey() != null && property.getValue() != null) clientEnv.put(property.getKey().toString(), property.getValue().toString()); } // SNOW-20103: track additional client info in session String clientInfoJSONStr = System.getProperty("snowflake.client.info"); if (clientInfoJSONStr != null) { JsonNode clientInfoJSON = null; try { clientInfoJSON = mapper.readTree(clientInfoJSONStr); } catch (Throwable ex) { logger.warn( "failed to process snowflake.client.info property as JSON: " + clientInfoJSONStr, ex); } if (clientInfoJSON != null) { Iterator<Map.Entry<String, JsonNode>> fields = clientInfoJSON.fields(); while (fields.hasNext()) { Map.Entry<String, JsonNode> field = fields.next(); clientEnv.put(field.getKey(), field.getValue().asText()); } } } data.put(ClientAuthnParameter.CLIENT_ENVIRONMENT.name(), clientEnv); // Initialize the session parameters Map<String, Object> sessionParameter = loginInput.getSessionParameters(); if (sessionParameter != null) { data.put(ClientAuthnParameter.SESSION_PARAMETERS.name(), loginInput .getSessionParameters()); } if (loginInput.getAccountName() != null) { data.put(ClientAuthnParameter.ACCOUNT_NAME.name(), loginInput.getAccountName()); } // Second Factor Authentication if (loginInput.isPasscodeInPassword()) { data.put(ClientAuthnParameter.EXT_AUTHN_DUO_METHOD.name(), "passcode"); } else if (loginInput.getPasscode() != null) { data.put(ClientAuthnParameter.EXT_AUTHN_DUO_METHOD.name(), "passcode"); data.put(ClientAuthnParameter.PASSCODE.name(), loginInput.getPasscode()); } else { data.put(ClientAuthnParameter.EXT_AUTHN_DUO_METHOD.name(), "push"); } logger.debug( "implementation version = {}", SnowflakeDriver.implementVersion); data.put(ClientAuthnParameter.CLIENT_APP_VERSION.name(), loginInput.getAppVersion()); authnData.setData(data); String json = mapper.writeValueAsString(authnData); postRequest = new HttpPost(loginURI); // attach the login info json body to the post request StringEntity input = new StringEntity(json, Charset.forName("UTF-8")); input.setContentType("application/json"); postRequest.setEntity(input); postRequest.addHeader("accept", "application/json"); /* * HttpClient should take authorization header from char[] instead of * String. */ postRequest.setHeader(SF_HEADER_AUTHORIZATION, SF_HEADER_BASIC_AUTHTYPE); String theString = HttpUtil.executeRequest(postRequest, loginInput.getHttpClient(), loginInput.getLoginTimeout(), 0, null); logger.debug("login response: {}", theString); // general method, same as with data binding JsonNode jsonNode = mapper.readTree(theString); // check the success field first if (!jsonNode.path("success").asBoolean()) { logger.debug("response = {}", theString); String errorCode = jsonNode.path("code").asText(); throw new SnowflakeSQLException( SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, ErrorCode.CONNECTION_ERROR.getMessageCode(), errorCode, jsonNode.path("message").asText()); } // session token is in the data field of the returned json response sessionToken = jsonNode.path("data").path("token").asText(); masterToken = jsonNode.path("data").path("masterToken").asText(); remMeToken = jsonNode.path("data").path("remMeToken").asText(); masterTokenValidityInSeconds = jsonNode.path("data"). path("masterValidityInSeconds").asLong(); String serverVersion = jsonNode.path("data").path("serverVersion").asText(); JsonNode dbNode = jsonNode.path("data").path("sessionInfo").path("databaseName"); sessionDatabase = dbNode.isNull() ? null : dbNode.asText(); JsonNode schemaNode = jsonNode.path("data").path("sessionInfo").path("schemaName"); sessionSchema = schemaNode.isNull() ? null : schemaNode.asText(); JsonNode roleNode = jsonNode.path("data").path("sessionInfo").path("roleName"); sessionRole = roleNode.isNull() ? null : roleNode.asText(); commonParams = SessionUtil.getCommonParams(jsonNode.path("data").path("parameters")); if (serverVersion != null) { logger.debug("server version = {}", serverVersion); if (serverVersion.indexOf(" ") > 0) databaseVersion = serverVersion.substring(0, serverVersion.indexOf(" ")); else databaseVersion = serverVersion; } else { logger.warn("server version is null"); } if (databaseVersion != null) { String[] components = databaseVersion.split("\\."); if (components != null && components.length >= 2) { try { databaseMajorVersion = Integer.parseInt(components[0]); databaseMinorVersion = Integer.parseInt(components[1]); } catch (Exception ex) { logger.error("Exception encountered when parsing server " + "version: {} Exception: {}", databaseVersion, ex.getMessage()); } } } else logger.warn("database version is null"); if (!jsonNode.path("data").path("newClientForUpgrade").isNull()) { newClientForUpgrade = jsonNode.path("data").path("newClientForUpgrade").asText(); logger.debug("new client: {}", newClientForUpgrade); } // get health check interval and adjust network timeouts if different int healthCheckIntervalFromGS = jsonNode.path("data").path("healthCheckInterval").asInt(); logger.debug( "health check interval = {}", healthCheckIntervalFromGS); if (healthCheckIntervalFromGS > 0 && healthCheckIntervalFromGS != healthCheckInterval) { healthCheckInterval = healthCheckIntervalFromGS; // add health check interval to socket timeout httpClientSocketTimeout = loginInput.getSocketTimeout() + (healthCheckIntervalFromGS * 1000); final HttpParams httpParams = new BasicHttpParams(); // set timeout so that we don't wait forever HttpConnectionParams.setConnectionTimeout(httpParams, loginInput.getConnectionTimeout()); HttpConnectionParams.setSoTimeout(httpParams, httpClientSocketTimeout); ((SystemDefaultHttpClient) httpClient).setParams(httpParams); logger.debug( "adjusted connection timeout to = {}", loginInput.getConnectionTimeout()); logger.debug( "adjusted socket timeout to = {}", httpClientSocketTimeout); } } catch (SnowflakeSQLException ex) { throw ex; // must catch here to avoid Throwable to get the exception } catch (IOException ex) { logger.error("IOException when creating session: " + postRequest, ex); throw new SnowflakeSQLException(ex, SqlState.IO_ERROR, ErrorCode.NETWORK_ERROR.getMessageCode(), "Exception encountered when opening connection: " + ex.getMessage()); } catch (Throwable ex) { logger.error("Exception when creating session: " + postRequest, ex); throw new SnowflakeSQLException(ex, SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, ErrorCode.CONNECTION_ERROR.getMessageCode(), ErrorCode.CONNECTION_ERROR.getMessageCode(), ex.getMessage()); } return new LoginOutput(sessionToken, masterToken, masterTokenValidityInSeconds, remMeToken, databaseVersion, databaseMajorVersion, databaseMinorVersion, newClientForUpgrade, healthCheckInterval, httpClientSocketTimeout, sessionDatabase, sessionSchema, sessionRole, commonParams); } /** * Renew a session * * @param loginInput login information * @return login output * @throws SFException if unexpected uri information * @throws SnowflakeSQLException if failed to renew the session */ static public LoginOutput renewSession(LoginInput loginInput) throws SFException, SnowflakeSQLException { AssertUtil.assertTrue(loginInput.getServerUrl() != null, "missing server URL for renewing session"); AssertUtil.assertTrue(loginInput.getSessionToken() != null, "missing session token for renewing session"); AssertUtil.assertTrue(loginInput.getMasterToken() != null, "missing master token for renewing session"); AssertUtil.assertTrue(loginInput.getHttpClient() != null, "missing http client for renewing session"); AssertUtil.assertTrue(loginInput.getLoginTimeout() >= 0, "negative login timeout for renewing session"); // build URL for login request URIBuilder uriBuilder; HttpPost postRequest = null; String sessionToken; String masterToken; try { uriBuilder = new URIBuilder(loginInput.getServerUrl()); uriBuilder.setPath(SF_PATH_TOKEN_REQUEST); uriBuilder.addParameter(SFSession.SF_QUERY_REQUEST_ID, UUID.randomUUID().toString()); postRequest = new HttpPost(uriBuilder.build()); } catch (URISyntaxException ex) { logger.error("Exception when creating http request", ex); throw new SFException(ex, ErrorCode.INTERNAL_ERROR, "unexpected URI syntax exception:3"); } try { // input json with old session token and request type, notice the // session token needs to be quoted. String json = "{\"oldSessionToken\":\"" + loginInput.getSessionToken() + "\", \"requestType\":" + 0 + "}"; // attach the login info json body to the post request StringEntity input = new StringEntity(json, Charset.forName("UTF-8")); input.setContentType("application/json"); postRequest.setEntity(input); postRequest.addHeader("accept", "application/json"); postRequest.setHeader(SF_HEADER_AUTHORIZATION, SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SF_HEADER_TOKEN_TAG + "=\"" + loginInput.getMasterToken() + "\""); logger.debug("old session token: {}, request type: 0, master token: {}", loginInput.getSessionToken(), loginInput.getMasterToken()); String theString = HttpUtil.executeRequest(postRequest, loginInput.getHttpClient(), loginInput.getLoginTimeout(), 0, null); // general method, same as with data binding JsonNode jsonNode = mapper.readTree(theString); // check the success field first if (!jsonNode.path("success").asBoolean()) { logger.debug("response = {}", theString); String errorCode = jsonNode.path("code").asText(); String message = jsonNode.path("message").asText(); EventUtil.triggerBasicEvent( Event.EventType.NETWORK_ERROR, "SessionUtil:renewSession failure, error code=" + errorCode + ", message=" + message, true); SnowflakeUtil.checkErrorAndThrowException(jsonNode); } // session token is in the data field of the returned json response sessionToken = jsonNode.path("data").path("sessionToken").asText(); masterToken = jsonNode.path("data").path("masterToken").asText(); } catch (IOException ex) { logger.error("IOException when renewing session: " + postRequest, ex); // Any EventType.NETWORK_ERRORs should have been triggered before // exception was thrown. throw new SFException(ex, ErrorCode.NETWORK_ERROR, ex.getMessage()); } LoginOutput loginOutput = new LoginOutput(); loginOutput.setSessionToken(sessionToken) .setMasterToken(masterToken); return loginOutput; } /** * Close a session * * @param loginInput login information * @throws SnowflakeSQLException if failed to close session * @throws SFException if failed to close session */ static public void closeSession(LoginInput loginInput) throws SFException, SnowflakeSQLException { logger.debug(" public void close() throws SFException"); // assert the following inputs are valid AssertUtil.assertTrue(loginInput.getServerUrl() != null, "missing server URL for closing session"); AssertUtil.assertTrue(loginInput.getSessionToken() != null, "missing session token for closing session"); AssertUtil.assertTrue(loginInput.getHttpClient() != null, "missing http client for closing session"); AssertUtil.assertTrue(loginInput.getLoginTimeout() >= 0, "missing login timeout for closing session"); HttpPost postRequest = null; try { URIBuilder uriBuilder; uriBuilder = new URIBuilder(loginInput.getServerUrl()); uriBuilder.addParameter(SF_QUERY_SESSION_DELETE, "true"); uriBuilder.addParameter(SFSession.SF_QUERY_REQUEST_ID, UUID.randomUUID().toString()); uriBuilder.setPath(SF_PATH_SESSION); postRequest = new HttpPost(uriBuilder.build()); postRequest.setHeader(SF_HEADER_AUTHORIZATION, SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SF_HEADER_TOKEN_TAG + "=\"" + loginInput.getSessionToken() + "\""); String theString = HttpUtil.executeRequest(postRequest, loginInput.getHttpClient(), loginInput.getLoginTimeout(), 0, null); JsonNode rootNode; logger.debug( "connection close response: {}", theString); rootNode = mapper.readTree(theString); SnowflakeUtil.checkErrorAndThrowException(rootNode); } catch (URISyntaxException ex) { throw new RuntimeException("unexpected URI syntax exception", ex); } catch (IOException ex) { logger.error("unexpected IO exception for: " + postRequest, ex); } catch (SnowflakeSQLException ex) { // ignore session expiration exception if (ex.getErrorCode() != Constants.SESSION_EXPIRED_GS_CODE) throw ex; } } /** * Given access token, query IDP URL snowflake app to get SAML response * We also need to perform important client side validation: * validate the post back url come back with the SAML response * contains the same prefix as the Snowflake's server url, which is the * intended destination url to Snowflake. * Explanation: * This emulates the behavior of IDP initiated login flow in the user * browser where the IDP instructs the browser to POST the SAML * assertion to the specific SP endpoint. This is critical in * preventing a SAML assertion issued to one SP from being sent to * another SP. * * @param loginInput * @param ssoUrl * @param oneTimeToken * @return * @throws SnowflakeSQLException */ private static String federatedFlowStep4( LoginInput loginInput, String ssoUrl, String oneTimeToken) throws SnowflakeSQLException { String responseHtml = ""; try { final URL url = new URL(ssoUrl); URI oktaGetUri = new URIBuilder() .setScheme(url.getProtocol()) .setHost(url.getHost()) .setPath(url.getPath()) .setParameter("RelayState", "%2Fsome%2Fdeep%2Flink") .setParameter("onetimetoken", oneTimeToken).build(); HttpGet httpGet = new HttpGet(oktaGetUri); HeaderGroup headers = new HeaderGroup(); headers.addHeader(new BasicHeader(HttpHeaders.ACCEPT, "*/*")); httpGet.setHeaders(headers.getAllHeaders()); responseHtml = HttpUtil.executeRequest(httpGet, loginInput.getHttpClient(), loginInput.getLoginTimeout(), 0, null); // step 5 String postBackUrl = getPostBackUrlFromHTML(responseHtml); if (!isPrefixEqual(postBackUrl, loginInput.getServerUrl())) { logger.debug("The specified authenticator {} and the destination URL " + "in the SAML assertion {} do not match.", loginInput.getAuthenticator(), postBackUrl); throw new SnowflakeSQLException( SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, ErrorCode.IDP_INCORRECT_DESTINATION.getMessageCode()); } } catch (IOException | URISyntaxException ex) { handleFederatedFlowError(loginInput, ex); } return responseHtml; } /** * Query IDP token url to authenticate and retrieve access token * * @param loginInput * @param tokenUrl * @return * @throws SnowflakeSQLException */ private static String federatedFlowStep3(LoginInput loginInput, String tokenUrl) throws SnowflakeSQLException { String oneTimeToken = ""; try { URL url = new URL(tokenUrl); URI tokenUri = url.toURI(); final HttpPost postRequest = new HttpPost(tokenUri); StringEntity params = new StringEntity("{\"username\":\"" + loginInput.getUserName() + "\",\"password\":\"" + loginInput.getPassword() + "\"}"); postRequest.setEntity(params); HeaderGroup headers = new HeaderGroup(); headers.addHeader(new BasicHeader(HttpHeaders.ACCEPT, "application/json")); headers.addHeader(new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json")); postRequest.setHeaders(headers.getAllHeaders()); final String idpResponse = HttpUtil.executeRequestWithoutCookies(postRequest, loginInput.getHttpClient(), loginInput.getLoginTimeout(), 0, null); logger.debug("user is authenticated against {}.", loginInput.getAuthenticator()); // session token is in the data field of the returned json response final JsonNode jsonNode = mapper.readTree(idpResponse); oneTimeToken = jsonNode.get("cookieToken").asText(); } catch (IOException | URISyntaxException ex) { handleFederatedFlowError(loginInput, ex); } return oneTimeToken; } /** * Perform important client side validation: * validate both token url and sso url contains same prefix * (protocol + host + port) as the given authenticator url. * Explanation: * This provides a way for the user to 'authenticate' the IDP it is * sending his/her credentials to. Without such a check, the user could * be coerced to provide credentials to an IDP impersonator. * * @param loginInput * @param tokenUrl * @param ssoUrl * @throws SnowflakeSQLException */ private static void federatedFlowStep2( LoginInput loginInput, String tokenUrl, String ssoUrl) throws SnowflakeSQLException { try { if (!isPrefixEqual(loginInput.getAuthenticator(), tokenUrl) || !isPrefixEqual(loginInput.getAuthenticator(), ssoUrl)) { logger.debug("The specified authenticator {} is not supported.", loginInput.getAuthenticator()); throw new SnowflakeSQLException( SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, ErrorCode.IDP_CONNECTION_ERROR.getMessageCode()); } } catch (MalformedURLException ex) { handleFederatedFlowError(loginInput, ex); } } /** * Query Snowflake to obtain IDP token url and IDP SSO url * * @param loginInput * @throws SnowflakeSQLException */ private static JsonNode federatedFlowStep1(LoginInput loginInput) throws SnowflakeSQLException { JsonNode dataNode = null; try { URIBuilder fedUriBuilder = new URIBuilder(loginInput.getServerUrl()); fedUriBuilder.setPath(SF_PATH_AUTHENTICATOR_REQUEST); URI fedUrlUri = fedUriBuilder.build(); Map<String, Object> data = new HashMap<>(); data.put(ClientAuthnParameter.ACCOUNT_NAME.name(), loginInput.getAccountName()); data.put(ClientAuthnParameter.AUTHENTICATOR.name(), loginInput.getAuthenticator()); data.put(ClientAuthnParameter.CLIENT_APP_ID.name(), loginInput.getAppId()); data.put(ClientAuthnParameter.CLIENT_APP_VERSION.name(), loginInput.getAppVersion()); ClientAuthnDTO authnData = new ClientAuthnDTO(); authnData.setData(data); String json = mapper.writeValueAsString(authnData); // attach the login info json body to the post request StringEntity input = new StringEntity(json, Charset.forName("UTF-8")); input.setContentType("application/json"); HttpPost postRequest = new HttpPost(fedUrlUri); postRequest.setEntity(input); postRequest.addHeader("accept", "application/json"); final String gsResponse = HttpUtil.executeRequest(postRequest, loginInput.getHttpClient(), loginInput.getLoginTimeout(), 0, null); logger.debug("authenticator-request response: {}", gsResponse); JsonNode jsonNode = mapper.readTree(gsResponse); // check the success field first if (!jsonNode.path("success").asBoolean()) { logger.debug("response = {}", gsResponse); String errorCode = jsonNode.path("code").asText(); throw new SnowflakeSQLException( SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, ErrorCode.CONNECTION_ERROR.getMessageCode(), errorCode, jsonNode.path("message").asText()); } // session token is in the data field of the returned json response dataNode = jsonNode.path("data"); } catch (IOException | URISyntaxException ex) { handleFederatedFlowError(loginInput, ex); } return dataNode; } /** * Logs an error generated during the federated authentication flow and * re-throws it as a SnowflakeSQLException. * Note that we seperate IOExceptions since those tend to be network related. * * @param loginInput * @param ex * @throws SnowflakeSQLException */ private static void handleFederatedFlowError(LoginInput loginInput, Exception ex) throws SnowflakeSQLException { if (ex instanceof IOException) { logger.error("IOException when authenticating with " + loginInput.getAuthenticator(), ex); throw new SnowflakeSQLException(ex, SqlState.IO_ERROR, ErrorCode.NETWORK_ERROR.getMessageCode(), "Exception encountered when opening connection: " + ex.getMessage()); } logger.error("Exception when authenticating with " + loginInput.getAuthenticator(), ex); throw new SnowflakeSQLException(ex, SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, ErrorCode.CONNECTION_ERROR.getMessageCode(), ErrorCode.CONNECTION_ERROR.getMessageCode(), ex.getMessage()); } /** * FEDERATED FLOW * See SNOW-27798 for additional details. * * @return saml response * @throws SnowflakeSQLException */ static private String getSamlResponseUsingOkta(LoginInput loginInput) throws SnowflakeSQLException { JsonNode dataNode = federatedFlowStep1(loginInput); String tokenUrl = dataNode.path("tokenUrl").asText(); String ssoUrl = dataNode.path("ssoUrl").asText(); federatedFlowStep2(loginInput, tokenUrl, ssoUrl); final String oneTimeToken = federatedFlowStep3(loginInput, tokenUrl); final String responseHtml = federatedFlowStep4( loginInput, ssoUrl, oneTimeToken); return responseHtml; } /** * Verify if two input urls have the same protocol, host, and port. * * @param aUrlStr a source URL string * @param bUrlStr a target URL string * @return true if matched otherwise false * @throws MalformedURLException raises if a URL string is not valid. */ static boolean isPrefixEqual(String aUrlStr, String bUrlStr) throws MalformedURLException { URL aUrl = new URL(aUrlStr); URL bUrl = new URL(bUrlStr); int aPort = aUrl.getPort(); int bPort = bUrl.getPort(); if (aPort == -1 && "https".equals(aUrl.getProtocol())) { // default port number for HTTPS aPort = 443; } if (bPort == -1 && "https".equals(bUrl.getProtocol())) { // default port number for HTTPS bPort = 443; } // no default port number for HTTP is supported. return aUrl.getHost().equalsIgnoreCase(bUrl.getHost()) && aUrl.getProtocol().equalsIgnoreCase(bUrl.getProtocol()) && aPort == bPort; } /** * Extracts post back url from the HTML returned by the IDP * * @param html * @return */ static private String getPostBackUrlFromHTML(String html) { Document doc = Jsoup.parse(html); Elements e1 = doc.getElementsByTag("body"); Elements e2 = e1.get(0).getElementsByTag("form"); String postBackUrl = e2.first().attr("action"); return postBackUrl; } /** * Check if com.sun.security.enableCRLDP and com.sun.net.ssl.checkRevocation * are set to true * * @return true if both system properties set to true, false otherwise. */ static public boolean checkCRLSystemProperty() { String enableCRLDP = System.getProperty("com.sun.security.enableCRLDP"); String checkRevocation = System.getProperty("com.sun.net.ssl.checkRevocation"); boolean CRLEnabled = false; if ((enableCRLDP != null && "true".equalsIgnoreCase(enableCRLDP)) && (checkRevocation != null && "true".equalsIgnoreCase(checkRevocation))) CRLEnabled = true; return CRLEnabled; } /** * Helper function to parse a JsonNode from a GS response * containing CommonParameters, emitting an EnumMap of parameters * * @param paramsNode parameters in JSON form * @return map object including key and value pairs */ public static Map<String, Object> getCommonParams(JsonNode paramsNode) { Map<String, Object> parameters = new HashMap<>(); for (JsonNode child : paramsNode) { // If there isn't a name then the response from GS must be erroneous. if (!child.hasNonNull("name")) { logger.error("Common Parameter JsonNode encountered with " + "no parameter name!"); continue; } // Look up the parameter based on the "name" attribute of the node. String paramName = child.path("name").asText(); // What type of value is it and what's the value? if (!child.hasNonNull("value")) { logger.debug("No value found for Common Parameter {}", child.path("name").asText()); continue; } if (STRING_PARAMS.contains(paramName.toUpperCase())) { parameters.put(paramName, child.path("value").asText()); } else if (INT_PARAMS.contains(paramName.toUpperCase())) { parameters.put(paramName, child.path("value").asInt()); } else if (BOOLEAN_PARAMS.contains(paramName.toUpperCase())) { parameters.put(paramName, child.path("value").asBoolean()); } else { logger.debug("Unknown Common Parameter: {}", paramName); } logger.debug("Parameter {}: {}", new Object[]{paramName, child.path("value").asText()}); } return parameters; } public static void updateSfDriverParamValues( Map<String, Object> parameters, SFSession session) { for (Map.Entry<String, Object> entry : parameters.entrySet()) { logger.debug("processing parameter {}", entry.getKey()); if ("CLIENT_DISABLE_INCIDENTS".equalsIgnoreCase(entry.getKey())) { SnowflakeDriver.setDisableIncidents((Boolean) entry.getValue()); } else if ( "JDBC_EXECUTE_RETURN_COUNT_FOR_DML".equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setExecuteReturnCountForDML((Boolean) entry.getValue()); } } else if ( "CLIENT_SESSION_KEEP_ALIVE".equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setEnableHeartbeat((Boolean) entry.getValue()); } } else if ( "AUTOCOMMIT".equalsIgnoreCase(entry.getKey())) { boolean autoCommit = (Boolean) entry.getValue(); if (session != null && session.getAutoCommit() != autoCommit) { session.setAutoCommit(autoCommit); } } else if ("JDBC_RS_COLUMN_CASE_INSENSITIVE".equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setRsColumnCaseInsensitive((boolean) entry.getValue()); } } else if ("CLIENT_METADATA_REQUEST_USE_CONNECTION_CTX".equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setMetadataRequestUseConnectionCtx((boolean) entry.getValue()); } } else if ("CLIENT_TIMESTAMP_TYPE_MAPPING".equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setTimestampMappedType(SnowflakeType.valueOf( ((String) entry.getValue()).toUpperCase())); } } else if ("JDBC_TREAT_DECIMAL_AS_INT".equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setJdbcTreatDecimalAsInt((boolean) entry.getValue()); } } else if ("JDBC_ENABLE_COMBINED_DESCRIBE".equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setEnableCombineDescribe((boolean) entry.getValue()); } } } } }
package nl.bitwalker.useragentutils; import java.io.Serializable; /** * @author harald * */ public class UserAgent implements Serializable { private static final long serialVersionUID = 7025462762784240212L; private OperatingSystem operatingSystem = OperatingSystem.UNKNOWN; private Browser browser = Browser.UNKNOWN; private int id; private String userAgentString; public UserAgent(OperatingSystem operatingSystem, Browser browser) { this.operatingSystem = operatingSystem; this.browser = browser; this.id = (( operatingSystem.getId() << 16) + browser.getId()); } public UserAgent(String userAgentString) { Browser browser = Browser.parseUserAgentString(userAgentString); OperatingSystem operatingSystem = OperatingSystem.UNKNOWN; // BOTs don't have an interesting OS for us if (browser != Browser.BOT) operatingSystem = OperatingSystem.parseUserAgentString(userAgentString); this.operatingSystem = operatingSystem; this.browser = browser; this.id = (( operatingSystem.getId() << 16) + browser.getId()); this.userAgentString = userAgentString; } /** * @param userAgentString * @return UserAgent */ public static UserAgent parseUserAgentString(String userAgentString) { return new UserAgent(userAgentString); } /** * Detects the detailed version information of the browser. Depends on the userAgent to be available. * Use it only after using UserAgent(String) or UserAgent.parseUserAgent(String). * Returns null if it can not detect the version information. * @return Version */ public Version getBrowserVersion() { return this.browser.getVersion(this.userAgentString); } /** * @return the system */ public OperatingSystem getOperatingSystem() { return operatingSystem; } /** * @return the browser */ public Browser getBrowser() { return browser; } /** * Returns an unique integer value of the operating system & browser combination * @return the id */ public int getId() { return id; } /** * Combined string representation of both enums */ public String toString() { return this.operatingSystem.toString() + "-" + this.browser.toString(); } /** * Returns UserAgent based on specified unique id * @param id * @return */ public static UserAgent valueOf(int id) { OperatingSystem operatingSystem = OperatingSystem.valueOf((short) (id >> 16)); Browser browser = Browser.valueOf( (short) (id & 0x0FFFF)); return new UserAgent(operatingSystem,browser); } /** * Returns UserAgent based on combined string representation * @param name * @return */ public static UserAgent valueOf(String name) { if (name == null) throw new NullPointerException("Name is null"); String[] elements = name.split("-"); if (elements.length == 2) { OperatingSystem operatingSystem = OperatingSystem.valueOf(elements[0]); Browser browser = Browser.valueOf(elements[1]); return new UserAgent(operatingSystem,browser); } throw new IllegalArgumentException( "Invalid string for userAgent " + name); } /* (non-Javadoc) * @see java.lang.Object#hashCode() */ @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((browser == null) ? 0 : browser.hashCode()); result = prime * result + id; result = prime * result + ((operatingSystem == null) ? 0 : operatingSystem.hashCode()); return result; } /* (non-Javadoc) * @see java.lang.Object#equals(java.lang.Object) */ @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; final UserAgent other = (UserAgent) obj; if (browser == null) { if (other.browser != null) return false; } else if (!browser.equals(other.browser)) return false; if (id != other.id) return false; if (operatingSystem == null) { if (other.operatingSystem != null) return false; } else if (!operatingSystem.equals(other.operatingSystem)) return false; return true; } }
package nom.bdezonia.zorbage.algorithm; import nom.bdezonia.zorbage.type.algebra.Addition; import nom.bdezonia.zorbage.type.algebra.Algebra; import nom.bdezonia.zorbage.type.algebra.BitOperations; import nom.bdezonia.zorbage.type.algebra.Bounded; import nom.bdezonia.zorbage.type.algebra.Ordered; import nom.bdezonia.zorbage.type.algebra.Unity; //Adapted from an algorithm as published by Stepanov and Rose 2015. /** * * @author Barry DeZonia * */ public class DivMod { /** * * @param alg * @param a * @param b * @param d * @param m */ public static <T extends Algebra<T,U> & Ordered<U> & Unity<U> & Addition<U> & BitOperations<U> & Bounded<U>,U> void compute(T alg, U a, U b, U d, U m) { if (alg.isZero().call(b)) throw new IllegalArgumentException("divide by zero"); U zero = alg.construct(); U one = alg.construct(); U min = alg.construct(); alg.unity().call(one); alg.minBound().call(min); if (alg.isLess().call(min, zero)) { // signed numbers: work in negative numbers to accommodate -minint asymmetry boolean aPos; boolean bPos; U aNeg = alg.construct(); U bNeg = alg.construct(); if (alg.isGreater().call(a, zero)) { aPos = true; alg.negate().call(a, aNeg); } else { aPos = false; alg.assign().call(a, aNeg); } if (alg.isGreater().call(b, zero)) { bPos = true; alg.negate().call(b, bNeg); } else { bPos = false; alg.assign().call(b, bNeg); } if (alg.isGreater().call(aNeg, bNeg)) { if (aPos) alg.negate().call(aNeg, aNeg); alg.assign().call(zero, d); alg.assign().call(aNeg, m); return; } U c = alg.construct(); largestDoublingN(alg, aNeg, bNeg, c); if (alg.isGreater().call(aNeg, zero)) throw new IllegalArgumentException("bad aNeg"); if (alg.isGreater().call(bNeg, zero)) throw new IllegalArgumentException("bad bNeg"); if (alg.isGreater().call(c, zero)) throw new IllegalArgumentException("bad c"); U n = alg.construct(one); alg.subtract().call(aNeg, c, aNeg); while (!alg.isEqual().call(c, bNeg)) { alg.bitShiftRight().call(1, c, c); alg.add().call(n, n, n); if (alg.isGreaterEqual().call(c, aNeg)) { alg.subtract().call(aNeg, c, aNeg); alg.add().call(n, one, n); } if (alg.isGreater().call(aNeg, zero)) throw new IllegalArgumentException("bad aNeg"); if (alg.isGreater().call(bNeg, zero)) throw new IllegalArgumentException("bad bNeg"); if (alg.isGreater().call(c, zero)) throw new IllegalArgumentException("bad c"); } if (aPos != bPos) alg.negate().call(n, n); if (aPos) alg.negate().call(aNeg, aNeg); alg.assign().call(n, d); alg.assign().call(aNeg, m); } else { // unsigned numbers: work with positive number algorithm U aPos = alg.construct(a); U bPos = alg.construct(b); if (alg.isLess().call(aPos, bPos)) { alg.assign().call(zero, d); alg.assign().call(aPos, m); return; } U c = alg.construct(); largestDoublingP(alg, aPos, bPos, c); U n = alg.construct(one); alg.subtract().call(aPos, c, aPos); while (!alg.isEqual().call(c, bPos)) { alg.bitShiftRight().call(1, c, c); alg.add().call(n, n, n); if (alg.isLessEqual().call(c, aPos)) { alg.subtract().call(aPos, c, aPos); alg.add().call(n, one, n); } } alg.assign().call(n, d); alg.assign().call(aPos, m); } } private static <T extends Algebra<T,U> & Ordered<U> & Addition<U>,U> void largestDoublingP(T alg, U a, U b, U c) { U tmpB = alg.construct(b); U diff = alg.construct(); alg.subtract().call(a, tmpB, diff); while (alg.isGreaterEqual().call(diff, tmpB)) { alg.add().call(tmpB, tmpB, tmpB); alg.subtract().call(a, tmpB, diff); } alg.assign().call(tmpB, c); } private static <T extends Algebra<T,U> & Ordered<U> & Addition<U>,U> void largestDoublingN(T alg, U a, U b, U c) { U tmpB = alg.construct(b); U diff = alg.construct(); alg.subtract().call(a, tmpB, diff); while (alg.isLessEqual().call(diff, tmpB)) { alg.add().call(tmpB, tmpB, tmpB); alg.subtract().call(a, tmpB, diff); } alg.assign().call(tmpB, c); } }
package org.cojen.tupl.rows; import java.lang.invoke.CallSite; import java.lang.invoke.ConstantCallSite; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodType; import java.lang.invoke.MutableCallSite; import java.util.function.Supplier; import org.cojen.maker.MethodMaker; /** * Retries generating code after an exception, over and over until it succeeds. * * @author Brian S O'Neill */ public class ExceptionCallSite extends MutableCallSite { /** * Returns a ConstantCallSite when the generator doesn't fail, which means that it's just a * plain wrapper around a MethodHandle. Otherwise, returns an ExceptionCallSite which calls * the generator when invoked, and when the generator succeeds, the target is replaced. * Assuming that code inlining magic works, the extra level of indirection will eventually * be removed. * * @param generator returns a MethodHandle or a Failed object */ static CallSite make(Supplier<Object> generator) { Object result = generator.get(); if (result instanceof MethodHandle) { return new ConstantCallSite((MethodHandle) result); } else if (result instanceof Failed) { return new ExceptionCallSite(generator, (Failed) result); } else { throw new IllegalStateException(String.valueOf(result)); } } static class Failed { final MethodType mt; final MethodMaker mm; final Throwable ex; Failed(MethodType mt, MethodMaker mm, Throwable ex) { this.mt = mt; this.mm = mm; this.ex = ex; } } private final Supplier<Object> mGenerator; private Throwable mException; private Thread mOrigin; private ExceptionCallSite(Supplier<Object> generator, Failed f) { super(f.mt); mGenerator = generator; mException = f.ex; mOrigin = Thread.currentThread(); var ecs = f.mm.var(ExceptionCallSite.class).setExact(this); var mh = ecs.invoke("call"); ecs.invoke("setTarget", mh); // Invoke the MethodHandle directly after the retry succeeds, but subsequent calls will // use the new target directly. Class<?>[] paramTypes = f.mt.parameterArray(); var params = new Object[paramTypes.length]; for (int i=0; i<params.length; i++) { params[i] = f.mm.param(i); } var result = mh.invoke(f.mt.returnType(), "invokeExact", paramTypes, params); if (f.mt.returnType() == void.class) { f.mm.return_(); } else { f.mm.return_(result); } setTarget(f.mm.finish()); } public MethodHandle call() throws Throwable { Throwable e = mException; if (e != null && mOrigin == Thread.currentThread()) { // Throw the initial exception, and then retry later. mException = null; mOrigin = null; throw e; } Object result = mGenerator.get(); if (result instanceof MethodHandle) { return (MethodHandle) result; } else if (result instanceof Failed) { throw ((Failed) result).ex; } else { throw new IllegalStateException(String.valueOf(result)); } } }
package org.cojen.tupl.rows; import java.lang.invoke.CallSite; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; import java.lang.invoke.VarHandle; import java.lang.ref.WeakReference; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.TreeMap; import java.util.function.IntFunction; import org.cojen.maker.ClassMaker; import org.cojen.maker.Label; import org.cojen.maker.MethodMaker; import org.cojen.maker.Variable; import org.cojen.tupl.Cursor; import org.cojen.tupl.DatabaseException; import org.cojen.tupl.Transaction; import org.cojen.tupl.View; import org.cojen.tupl.filter.AndFilter; import org.cojen.tupl.filter.ColumnToArgFilter; import org.cojen.tupl.filter.ColumnToColumnFilter; import org.cojen.tupl.filter.OrFilter; import org.cojen.tupl.filter.RowFilter; import org.cojen.tupl.filter.Visitor; import org.cojen.tupl.io.Utils; /** * Makes ScanControllerFactory classes which perform basic filtering. * * @author Brian S O'Neill */ public class FilteredScanMaker<R> { private static long cFilterNum; private static final VarHandle cFilterNumHandle; static { try { cFilterNumHandle = MethodHandles.lookup().findStaticVarHandle (FilteredScanMaker.class, "cFilterNum", long.class); } catch (Throwable e) { throw Utils.rethrow(e); } } private final WeakReference<RowStore> mStoreRef; private final Class<?> mTableClass; private final Class<R> mRowType; private final RowGen mRowGen; private final boolean mIsPrimaryTable; private final long mIndexId; private final RowFilter mFilter, mLowBound, mHighBound; private final String mFilterStr; private final ClassMaker mFilterMaker; private final MethodMaker mFilterCtorMaker; // Bound to mFilterCtorMaker. private final ColumnCodec[] mKeyCodecs, mValueCodecs; /** * Three RowFilter objects are passed in, which correspond to results of the * RowFilter.rangeExtract method. If all filters are null, then nothing is filtered at all, * and this class shouldn't be used. * * <p>The low/high bound filters operate only against the key column codecs, and so they * don't need to handle schema versions. The remainder filter must handle schema versions, * and it maintains a weak reference to the RowFilter object, as a minor optimization. If * the RowFilter goes away, the filterStr is needed to create it again. * * @param storeRef is passed along to the generated code * @param unfiltered defines the encode methods; the decode method will be overridden * @param filter the filter to apply to all rows which are in bounds, or null if none * @param filterStr the canonical string for the filter param, or null if none * @param lowBound pass null for open bound * @param highBound pass null for open bound */ public FilteredScanMaker(WeakReference<RowStore> storeRef, Class<?> tableClass, Class<? extends SingleScanController<R>> unfiltered, Class<R> rowType, RowInfo rowInfo, long indexId, RowFilter filter, String filterStr, RowFilter lowBound, RowFilter highBound) { mStoreRef = storeRef; mTableClass = tableClass; mRowType = rowType; mRowGen = rowInfo.rowGen(); mIsPrimaryTable = RowInfo.find(rowType) == rowInfo; mIndexId = indexId; mFilter = filter; mLowBound = lowBound; mHighBound = highBound; mFilterStr = filterStr; // Generate a sub-package with an increasing number to facilitate unloading. long filterNum = (long) cFilterNumHandle.getAndAdd(1L); mFilterMaker = mRowGen.beginClassMaker(getClass(), rowType, "f" + filterNum, "Filter") .final_().extend(unfiltered).implement(ScanControllerFactory.class); mFilterCtorMaker = mFilterMaker.addConstructor(Object[].class).varargs().private_(); mKeyCodecs = ColumnCodec.bind(mRowGen.keyCodecs(), mFilterCtorMaker); mValueCodecs = ColumnCodec.bind(mRowGen.valueCodecs(), mFilterCtorMaker); // Need a constructor for the factory singleton instance. mFilterMaker.addConstructor().private_().invokeSuperConstructor(null, false, null, false); } public ScanControllerFactory<R> finish() { // Finish the filter class... // Define the fields to hold the filter arguments. if (mFilter != null) { mFilter.accept(new Visitor() { private final HashSet<Integer> mAdded = new HashSet<>(); @Override public void visit(ColumnToArgFilter filter) { int argNum = filter.argument(); if (mAdded.add(argNum)) { int colNum = columnNumberFor(filter.column().name); boolean in = filter.isIn(filter.operator()); Variable argVar = mFilterCtorMaker.param(0).aget(argNum); codecFor(colNum).filterPrepare(in, argVar, argNum); } } }); } var ctorParams = new Object[] {null, false, null, false}; if (mLowBound != null || mHighBound != null) { var argVarMap = new HashMap<ColumnArg, Variable>(); if (mLowBound != null) { encodeBound(argVarMap, ctorParams, mLowBound, true); } if (mHighBound != null) { encodeBound(argVarMap, ctorParams, mHighBound, false); } } mFilterCtorMaker.invokeSuperConstructor(ctorParams); addDecodeRowMethod(); // Provide access to the inherited markAllClean method. { Class<?> rowClass = RowMaker.find(mRowType); MethodMaker mm = mFilterMaker.addMethod(null, "markAllClean", rowClass).static_(); mm.super_().invoke("markAllClean", mm.param(0)); } // Define a singleton instance which serves as the factory. { mFilterMaker.addField(mFilterMaker, "factory").private_().static_().final_(); MethodMaker mm = mFilterMaker.addClinit(); mm.field("factory").set(mm.new_(mFilterMaker)); } // Define the factory method. { MethodMaker mm = mFilterMaker.addMethod (ScanController.class, "newScanController", Object[].class).public_().varargs(); mm.return_(mm.new_(mFilterMaker, mm.param(0))); } MethodHandles.Lookup filterLookup = mFilterMaker.finishLookup(); Class<?> filterClass = filterLookup.lookupClass(); try { var vh = filterLookup.findStaticVarHandle(filterClass, "factory", filterClass); return (ScanControllerFactory<R>) vh.get(); } catch (Throwable e) { throw Utils.rethrow(e); } } private Integer columnNumberFor(String colName) { return mRowGen.columnNumbers().get(colName); } private ColumnCodec codecFor(int colNum) { ColumnCodec[] codecs = mKeyCodecs; return colNum < codecs.length ? codecs[colNum] : mValueCodecs[colNum - codecs.length]; } /** * Simple hashtable key. */ private static class ColumnArg { ColumnInfo column; int argument; @Override public int hashCode() { return column.hashCode() * 31 + argument; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } var other = (ColumnArg) obj; return argument == other.argument && column.equals(other.column); } } /** * Adds code to the constructor. */ private void encodeBound(Map<ColumnArg, Variable> argVarMap, Object[] ctorParams, RowFilter bound, boolean low) { ColumnCodec[] codecs = mRowGen.keyCodecs(); var argVars = new Variable[codecs.length]; var visitor = new Visitor() { int lastOp = -1; int pos = 0; @Override public void visit(ColumnToArgFilter filter) { lastOp = filter.operator(); ColumnInfo column = filter.column(); int argument = filter.argument(); var key = new ColumnArg(); key.column = column; key.argument = argument; Variable argVar = argVarMap.get(key); if (argVar == null) { argVar = mFilterCtorMaker.param(0).aget(filter.argument()); argVar = ConvertCallSite.make(mFilterCtorMaker, column.type, argVar); argVarMap.put(key, argVar); } argVars[pos++] = argVar; } }; bound.accept(visitor); int lastOp = visitor.lastOp; int numArgs = visitor.pos; boolean inclusive; boolean increment = false; if (low) { switch (lastOp) { case ColumnToArgFilter.OP_GE: inclusive = true; break; case ColumnToArgFilter.OP_GT: if (numArgs == codecs.length) { inclusive = false; } else { inclusive = true; increment = true; } break; default: throw new AssertionError(); } } else { switch (lastOp) { case ColumnToArgFilter.OP_LT: inclusive = false; break; case ColumnToArgFilter.OP_LE: if (numArgs == codecs.length) { inclusive = true; } else { inclusive = false; increment = true; } break; default: throw new AssertionError(); } } var boundCodecs = new ColumnCodec[numArgs]; // Determine the minimum byte array size and prepare the encoders. int minSize = 0; for (int i=0; i<numArgs; i++) { ColumnCodec codec = codecs[i].bind(mFilterCtorMaker); boundCodecs[i] = codec; minSize += codec.minSize(); codec.encodePrepare(); } codecs = boundCodecs; // Generate code which determines the additional runtime length. Variable totalVar = null; for (int i=0; i<codecs.length; i++) { totalVar = codecs[i].encodeSize(argVars[i], totalVar); } // Generate code which allocates the destination byte array. Variable dstVar; if (totalVar == null) { dstVar = mFilterCtorMaker.new_(byte[].class, minSize); } else { if (minSize != 0) { totalVar = totalVar.add(minSize); } dstVar = mFilterCtorMaker.new_(byte[].class, totalVar); } // Generate code which fills in the byte array. var offsetVar = mFilterCtorMaker.var(int.class).set(0); for (int i=0; i<codecs.length; i++) { codecs[i].encode(argVars[i], dstVar, offsetVar); } if (increment) { var overflowedVar = mFilterCtorMaker.var(RowUtils.class) .invoke("increment", dstVar, 0, dstVar.alength()); Label noOverflow = mFilterCtorMaker.label(); overflowedVar.ifTrue(noOverflow); if (low) { dstVar.set(mFilterCtorMaker.var(ScanController.class).field("EMPTY")); } else { dstVar.set(null); } noOverflow.here(); } int ctorParamOffset = low ? 0 : 2; ctorParams[ctorParamOffset++] = dstVar; ctorParams[ctorParamOffset] = inclusive; } private void addDecodeRowMethod() { if (mFilter == null) { // No remainder filter, so rely on inherited method. return; } // Specified by RowDecoderEncoder. MethodMaker mm = mFilterMaker.addMethod (Object.class, "decodeRow", byte[].class, byte[].class, Object.class).public_(); if (mIsPrimaryTable) { // The decode method is implemented using indy, to support multiple schema versions. var indy = mm.var(FilteredScanMaker.class).indy ("indyDecodeRow", mStoreRef, mTableClass, mRowType, mIndexId, mFilter, mFilterStr); var valueVar = mm.param(1); var schemaVersion = TableMaker.decodeSchemaVersion(mm, valueVar); mm.return_(indy.invoke(Object.class, "decodeRow", null, schemaVersion, mm.param(0), valueVar, mm.param(2), mm.this_())); } else { // Decoding secondary index row is simpler because it has no schema version. Class<?> rowClass = RowMaker.find(mRowType); var visitor = new DecodeVisitor (mm, 0, mTableClass, rowClass, mRowGen, null, mm.this_()); mFilter.accept(visitor); visitor.done(); } } public static CallSite indyDecodeRow(MethodHandles.Lookup lookup, String name, MethodType mt, WeakReference<RowStore> storeRef, Class<?> tableClass, Class<?> rowType, long indexId, RowFilter filter, String filterStr) { var dm = new DecodeMaker (lookup, mt, storeRef, tableClass, rowType, indexId, filter, filterStr); return new SwitchCallSite(lookup, mt, dm); } private static class DecodeMaker implements IntFunction<Object> { private final MethodHandles.Lookup mLookup; private final MethodType mMethodType; private final WeakReference<RowStore> mStoreRef; private final Class<?> mTableClass; private final Class<?> mRowType; private final long mIndexId; private final String mFilterStr; // The DecodeMaker isn't defined as a lambda function because this field cannot be final. private WeakReference<RowFilter> mFilterRef; DecodeMaker(MethodHandles.Lookup lookup, MethodType mt, WeakReference<RowStore> storeRef, Class<?> tableClass, Class<?> rowType, long indexId, RowFilter filter, String filterStr) { mLookup = lookup; mMethodType = mt.dropParameterTypes(0, 1); mStoreRef = storeRef; mTableClass = tableClass; mRowType = rowType; mIndexId = indexId; mFilterStr = filterStr; mFilterRef = new WeakReference<>(filter); } /** * Defined in IntFunction, needed by SwitchCallSite. * * @return MethodHandle or ExceptionCallSite.Failed */ @Override public Object apply(int schemaVersion) { MethodMaker mm = MethodMaker.begin(mLookup, "case", mMethodType); RowFilter filter = mFilterRef.get(); if (filter == null) { filter = AbstractTable.parse(mRowType, mFilterStr); mFilterRef = new WeakReference<>(filter); } RowStore store = mStoreRef.get(); if (store == null) { mm.new_(DatabaseException.class, "Closed").throw_(); return mm.finish(); } RowInfo rowInfo; MethodHandle decoder; try { if (schemaVersion != 0) { rowInfo = store.rowInfo(mRowType, mIndexId, schemaVersion); } else { // No value columns to decode, and the primary key cannot change. RowInfo dstRowInfo = RowInfo.find(mRowType); rowInfo = new RowInfo(dstRowInfo.name); rowInfo.keyColumns = dstRowInfo.keyColumns; rowInfo.valueColumns = Collections.emptyNavigableMap(); rowInfo.allColumns = new TreeMap<>(rowInfo.keyColumns); } // Obtain the MethodHandle which fully decodes the value columns. decoder = (MethodHandle) mLookup.findStatic (mLookup.lookupClass(), "decodeValueHandle", MethodType.methodType(MethodHandle.class, int.class)) .invokeExact(schemaVersion); } catch (Throwable e) { return new ExceptionCallSite.Failed(mMethodType, mm, e); } Class<?> rowClass = RowMaker.find(mRowType); RowGen rowGen = rowInfo.rowGen(); int valueOffset = RowUtils.lengthPrefixPF(schemaVersion); var visitor = new DecodeVisitor (mm, valueOffset, mTableClass, rowClass, rowGen, decoder, null); filter.accept(visitor); visitor.done(); return mm.finish(); } } /** * Generates code to filter and decode rows for a specific schema version. */ private static class DecodeVisitor extends Visitor { private final MethodMaker mMaker; private final int mValueOffset; private final Class<?> mTableClass; private final Class<?> mRowClass; private final RowGen mRowGen; private final MethodHandle mDecoder; private final Variable mDecoderVar; private final ColumnCodec[] mKeyCodecs, mValueCodecs; private Label mPass, mFail; private LocatedColumn[] mLocatedKeys; private int mHighestLocatedKey; private LocatedColumn[] mLocatedValues; private int mHighestLocatedValue; /** * Supports two forms of methods: * * R decodeRow(byte[] key, byte[] value, R row, decoder/filter) * R decodeRow(byte[] key, byte[] value, R row) * * When using the first form, a decoder MethodHandle must be provided. When using the * second form, a decoderVar msut be provided. * @param mm signature: R decodeRow(byte[] key, byte[] value, R row [, decoder/filter]) * @param valueOffset offset to skip past the schema version * @param tableClass current table implementation class * @param rowClass current row implementation * @param rowGen actual row definition to be decoded * @param decoder performs full decoding of the value columns * @param decoderVar the actual decoder/filter instance */ DecodeVisitor(MethodMaker mm, int valueOffset, Class<?> tableClass, Class<?> rowClass, RowGen rowGen, MethodHandle decoder, Variable decoderVar) { mMaker = mm; mValueOffset = valueOffset; mTableClass = tableClass; mRowClass = rowClass; mRowGen = rowGen; mDecoder = decoder; if (decoderVar == null) { if (decoder == null) { throw new IllegalArgumentException(); } mDecoderVar = mm.param(3); } else { if (decoder != null) { throw new IllegalArgumentException(); } mDecoderVar = decoderVar; } mKeyCodecs = ColumnCodec.bind(rowGen.keyCodecs(), mm); mValueCodecs = ColumnCodec.bind(rowGen.valueCodecs(), mm); mPass = mm.label(); mFail = mm.label(); } void done() { mFail.here(); mMaker.return_(null); mPass.here(); // FIXME: Some columns may have already been decoded, so don't double decode them. var tableVar = mMaker.var(mTableClass); var rowVar = mMaker.param(2).cast(mRowClass); Label hasRow = mMaker.label(); rowVar.ifNe(null, hasRow); rowVar.set(mMaker.new_(mRowClass)); hasRow.here(); tableVar.invoke("decodePrimaryKey", rowVar, mMaker.param(0)); // Invoke the schema-specific decoder directly, instead of calling the decodeValue // method which redundantly examines the schema version and switches on it. var valueVar = mMaker.param(1); // param(1) is the value byte array if (mDecoder != null) { mMaker.invoke(mDecoder, rowVar, valueVar); } else { mMaker.var(mTableClass).invoke("decodeValue", rowVar, valueVar); } // Call the generated filter class, which has access to the inherited markAllClean // method. mDecoderVar.invoke("markAllClean", rowVar); mMaker.return_(rowVar); } @Override public void visit(OrFilter filter) { final Label originalFail = mFail; RowFilter[] subFilters = filter.subFilters(); if (subFilters.length == 0) { mMaker.goto_(originalFail); return; } mFail = mMaker.label(); subFilters[0].accept(this); mFail.here(); // Only the state observed on the left tree path can be preserved, because it's // guaranteed to have executed. final int hk = mHighestLocatedKey; final int hv = mHighestLocatedValue; for (int i=1; i<subFilters.length; i++) { mFail = mMaker.label(); subFilters[i].accept(this); mFail.here(); } resetHighestLocatedKey(hk); resetHighestLocatedValue(hv); mMaker.goto_(originalFail); mFail = originalFail; } @Override public void visit(AndFilter filter) { final Label originalPass = mPass; RowFilter[] subFilters = filter.subFilters(); if (subFilters.length == 0) { mMaker.goto_(originalPass); return; } mPass = mMaker.label(); subFilters[0].accept(this); mPass.here(); // Only the state observed on the left tree path can be preserved, because it's // guaranteed to have executed. final int hk = mHighestLocatedKey; final int hv = mHighestLocatedValue; for (int i=1; i<subFilters.length; i++) { mPass = mMaker.label(); subFilters[i].accept(this); mPass.here(); } resetHighestLocatedKey(hk); resetHighestLocatedValue(hv); mMaker.goto_(originalPass); mPass = originalPass; } @Override public void visit(ColumnToArgFilter filter) { ColumnInfo colInfo = filter.column(); int op = filter.operator(); Variable argObjVar = mDecoderVar; // contains the arg fields prepared earlier int argNum = filter.argument(); Integer colNum = columnNumberFor(colInfo.name); if (colNum != null) { ColumnCodec codec = codecFor(colNum); LocatedColumn located = decodeColumn(colNum, colInfo, true); Object decoded = located.mDecodedQuick; if (decoded != null) { codec.filterQuickCompare(colInfo, located.mSrcVar, located.mOffsetVar, op, decoded, argObjVar, argNum, mPass, mFail); } else { var argField = argObjVar.field(ColumnCodec.argFieldName(colInfo, argNum)); CompareUtils.compare(mMaker, colInfo, located.mDecodedVar, colInfo, argField, op, mPass, mFail); } } else { // Column doesn't exist in the row, so compare against a default. This code // assumes that value codecs always define an arg field which preserves the // original argument, possibly converted to the correct type. var argField = argObjVar.field(ColumnCodec.argFieldName(colInfo, argNum)); var columnVar = mMaker.var(colInfo.type); Converter.setDefault(mMaker, colInfo, columnVar); CompareUtils.compare(mMaker, colInfo, columnVar, colInfo, argField, op, mPass, mFail); } } @Override public void visit(ColumnToColumnFilter filter) { ColumnInfo aColInfo = filter.column(); int op = filter.operator(); ColumnInfo bColInfo = filter.otherColumn(); Integer aColNum = columnNumberFor(aColInfo.name); Integer bColNum = columnNumberFor(bColInfo.name); if (aColNum == null && bColNum == null) { // Comparing two columns that don't exist. If this filter is part of a chain, // the rest will be dead code. mMaker.goto_(CompareUtils.selectNullColumnToNullArg(op, mPass, mFail)); return; } Variable aVar = decodeColumnOrDefault(aColNum, aColInfo); Variable bVar = decodeColumnOrDefault(bColNum, bColInfo); if (aVar.classType() != bVar.classType()) { ColumnInfo cColInfo = filter.common(); var aConvertedVar = mMaker.var(cColInfo.type); Converter.convertLossy(mMaker, aColInfo, aVar, cColInfo, aConvertedVar); aColInfo = cColInfo; aVar = aConvertedVar; var bConvertedVar = mMaker.var(cColInfo.type); Converter.convertLossy(mMaker, bColInfo, bVar, cColInfo, bConvertedVar); bColInfo = cColInfo; bVar = bConvertedVar; } CompareUtils.compare(mMaker, aColInfo, aVar, bColInfo, bVar, op, mPass, mFail); } private Variable decodeColumnOrDefault(Integer colNum, ColumnInfo colInfo) { if (colNum != null) { return decodeColumn(colNum, colInfo, false).mDecodedVar; } else { var colVar = mMaker.var(colInfo.type); Converter.setDefault(mMaker, colInfo, colVar); return colVar; } } private Integer columnNumberFor(String colName) { return mRowGen.columnNumbers().get(colName); } private ColumnCodec codecFor(int colNum) { ColumnCodec[] codecs = mKeyCodecs; return colNum < codecs.length ? codecs[colNum] : mValueCodecs[colNum - codecs.length]; } /** * Decodes a column and remembers it if requested again later. * * @param colInfo current definition for column * @param quick allow quick decode */ private LocatedColumn decodeColumn(int colNum, ColumnInfo colInfo, boolean quick) { Variable srcVar; LocatedColumn[] located; ColumnCodec[] codecs = mKeyCodecs; int highestNum; init: { int startOffset; if (colNum < codecs.length) { // Key column. highestNum = mHighestLocatedKey; srcVar = mMaker.param(0); if ((located = mLocatedKeys) != null) { break init; } mLocatedKeys = located = new LocatedColumn[mRowGen.info.keyColumns.size()]; startOffset = 0; } else { // Value column. colNum -= codecs.length; highestNum = mHighestLocatedValue; srcVar = mMaker.param(1); codecs = mValueCodecs; if ((located = mLocatedValues) != null) { break init; } mLocatedValues = located = new LocatedColumn[mRowGen.info.valueColumns.size()]; startOffset = mValueOffset; } located[0] = new LocatedColumn(); located[0].located(srcVar, mMaker.var(int.class).set(startOffset)); } if (colNum < highestNum) { LocatedColumn col = located[colNum]; if (col.isDecoded(quick)) { return col; } // Regress the highest to force the column to be decoded. The highest field // won't regress, since the field assignment (at the end) checks this. highestNum = colNum; } if (!located[highestNum].isLocated()) { throw new AssertionError(); } for (; highestNum <= colNum; highestNum++) { // Offset will be mutated, and so a copy must be made before calling decode. Variable offsetVar = located[highestNum].mOffsetVar; LocatedColumn next; copyOffsetVar: { if (highestNum + 1 >= located.length) { next = null; } else { next = located[highestNum + 1]; if (next == null) { next = new LocatedColumn(); located[highestNum + 1] = next; } else if (!next.isLocated()) { // Can recycle the offset variable because it's not used. Variable freeVar = next.mOffsetVar; if (freeVar != null) { freeVar.set(offsetVar); offsetVar = freeVar; break copyOffsetVar; } } } offsetVar = offsetVar.get(); } ColumnCodec codec = codecs[highestNum]; Variable endVar = null; if (highestNum < colNum) { codec.decodeSkip(srcVar, offsetVar, endVar); } else if (quick && codec.canFilterQuick(colInfo)) { Object decoded = codec.filterQuickDecode(colInfo, srcVar, offsetVar, endVar); located[highestNum].decodedQuick(decoded); } else { Variable dstVar = mMaker.var(colInfo.type); Converter.decode(mMaker, srcVar, offsetVar, endVar, codec, colInfo, dstVar); located[highestNum].decodedVar(dstVar); } if (next != null && !next.isLocated()) { // The decode call incremented offsetVar as a side-effect. Note that if the // column is already located, then newly discovered offset will match. It // can simply be replaced, but by discarding it, the compiler can discard // some of the redundant steps which computed the offset again. next.located(srcVar, offsetVar); } } highestNum = Math.min(highestNum, located.length - 1); if (located == mLocatedKeys) { if (highestNum > mHighestLocatedKey) { mHighestLocatedKey = highestNum; } } else { if (highestNum > mHighestLocatedValue) { mHighestLocatedValue = highestNum; } } return located[colNum]; } /** * Reset the highest located key column. The trailing LocatedColumn instances can be * re-used, reducing the number of Variables created. */ private void resetHighestLocatedKey(int colNum) { if (colNum < mHighestLocatedKey) { mHighestLocatedKey = colNum; finishReset(mLocatedKeys, colNum); } } /** * Reset the highest located value column. The trailing LocatedColumn instances can be * re-used, reducing the number of Variables created. * * @param colNum column number among all value columns */ private void resetHighestLocatedValue(int colNum) { if (colNum < mHighestLocatedValue) { mHighestLocatedValue = colNum; finishReset(mLocatedValues, colNum); } } private static void finishReset(LocatedColumn[] columns, int colNum) { while (++colNum < columns.length) { var col = columns[colNum]; if (col == null) { break; } col.unlocated(); } } } private static class LocatedColumn { // Used by mState field. private static final int UNLOCATED = 0, LOCATED = 1, DECODED = 2; private int mState; // Source byte array. Is valid when mState is LOCATED or DECODED. Variable mSrcVar; // Offset into the byte array. Is valid when mState is LOCATED or DECODED. Variable mOffsetVar; // Is only valid when mState is DECODED and canFilterQuick returned true. Object mDecodedQuick; // Is only valid when mState is DECODED. Variable mDecodedVar; LocatedColumn() { } boolean isLocated() { return mState >= LOCATED; } /** * @param quick when true, accepts quick or fully decoded forms; when false, only * accepts the fully decoded form */ boolean isDecoded(boolean quick) { return mState == DECODED && (quick || mDecodedVar != null); } /** * @param srcVar source byte array * @param offsetVar start offset into the byte array */ void located(Variable srcVar, Variable offsetVar) { mSrcVar = srcVar; mOffsetVar = offsetVar; mState = LOCATED; } /** * @param decoded object returned from ColumnCodec.filterQuickDecode */ void decodedQuick(Object decoded) { if (mState == UNLOCATED) { throw new IllegalStateException(); } mDecodedQuick = decoded; mState = DECODED; } void decodedVar(Variable decodedVar) { if (mState == UNLOCATED) { throw new IllegalStateException(); } mDecodedVar = decodedVar; mState = DECODED; } void unlocated() { mDecodedQuick = null; mDecodedVar = null; mState = UNLOCATED; } } }
package org.cyclops.fluidconverters; import org.cyclops.cyclopscore.config.configurable.ConfigurableBlock; import org.cyclops.cyclopscore.config.configurable.ConfigurableBlockContainer; import org.cyclops.cyclopscore.config.extendedconfig.ExtendedConfig; /** * Class that can hold basic static things that are better not hard-coded * like mod details, texture paths, ID's... * @author immortaleeb * */ @SuppressWarnings("javadoc") public class Reference { // Mod info public static final String MOD_ID = "fluidConverters"; public static final String MOD_NAME = "FluidConverters"; public static final String MOD_VERSION = "@VERSION@"; public static final String MOD_BUILD_NUMBER = "@BUILD_NUMBER@"; public static final String MOD_CHANNEL = MOD_ID; public static final String MOD_MC_VERSION = "@MC_VERSION@"; public static final String GA_TRACKING_ID = "UA-65307010-6"; public static final String VERSION_URL = "https://raw.githubusercontent.com/CyclopsMC/Versions/master/1.8/Todo.txt"; // Paths public static final String TEXTURE_PATH_GUI = "textures/gui/"; public static final String TEXTURE_PATH_SKINS = "textures/skins/"; public static final String TEXTURE_PATH_MODELS = "textures/models/"; public static final String TEXTURE_PATH_ENTITIES = "textures/entities/"; public static final String TEXTURE_PATH_GUIBACKGROUNDS = "textures/gui/title/background/"; public static final String TEXTURE_PATH_ITEMS = "textures/items/"; public static final String TEXTURE_PATH_PARTICLES = "textures/particles/"; public static final String MODEL_PATH = "models/"; public static final String ASSETS_PATH = "/assets/" + MOD_ID + "/"; // MOD ID's public static final String MOD_FORGE = "Forge"; public static final String MOD_FORGE_VERSION = "@FORGE_VERSION@"; public static final String MOD_FORGE_VERSION_MIN = "11.14.3.1494"; public static final String MOD_CYCLOPSCORE = "cyclopscore"; public static final String MOD_CYCLOPSCORE_VERSION = "@CYCLOPSCORE_VERSION@"; public static final String MOD_CYCLOPSCORE_VERSION_MIN = "0.3.0"; public static final String MOD_WAILA = "Waila"; // Dependencies public static final String MOD_DEPENDENCIES = "required-after:" + MOD_FORGE + "@[" + MOD_FORGE_VERSION_MIN + ",);" + "required-after:" + MOD_CYCLOPSCORE + "@[" + MOD_CYCLOPSCORE_VERSION_MIN + ",);"; /** * Adds "modid:" as a prefix to the given string. * @return The given string prefixed with "modid:" */ public static final String prefixModId(String s) { return MOD_ID + ":" + s; } /** * Prepends "modid:" to the named id of the given config. * @param extendedConfig config which provides a namedid * @return named id prepended with "modid:" */ public static final String prefixModId(ExtendedConfig<?> extendedConfig) { return prefixModId(extendedConfig.getNamedId()); } /** * Prepends "modid:" to the named id of the given block * @param block configurable block which provides a namedid * @return named id of the block prepended with "modid:" */ public static final String prefixModId(ConfigurableBlock block) { return prefixModId(block.getConfig()); } /** * Prepends "modid:" to the named id of the given block container * @param block configurable block container which provides a namedid * @return named id of the block container prepended with "modid:" */ public static final String prefixModId(ConfigurableBlockContainer block) { return prefixModId(block.getConfig()); } }
package org.deeplearning4j; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; import org.apache.spark.SparkConf; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.eval.Evaluation; import org.deeplearning4j.models.featuredetectors.rbm.RBM; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.OutputPreProcessor; import org.deeplearning4j.nn.conf.preprocessor.BinomialSamplingPreProcessor; import org.deeplearning4j.nn.layers.OutputLayer; import org.deeplearning4j.nn.layers.factory.LayerFactories; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.spark.impl.multilayer.SparkDl4jMultiLayer; import org.kohsuke.args4j.CmdLineException; import org.kohsuke.args4j.CmdLineParser; import org.kohsuke.args4j.Option; import org.nd4j.linalg.api.activation.Activations; import org.nd4j.linalg.dataset.DataSet; import org.nd4j.linalg.lossfunctions.LossFunctions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.util.*; public class DistributedExample { @Option(name="--batchSize") private int batchSize = 10000; @Option(name = "--frameSize") private int frameSize = 600000; @Option(name = "--masterUrl",required = true) private String masterUrl; @Option(name = "--iterations") private int iterations = 5; @Option(name = "--output") private String outputPath = "mnist.ser"; @Option(name = "--avgiteration") private boolean averageEachIteration = false; private static Logger log = LoggerFactory.getLogger(DistributedExample.class); public DistributedExample(String[] args) { CmdLineParser parser = new CmdLineParser(this); try { parser.parseArgument(args); } catch (CmdLineException e) { parser.printUsage(System.err); log.error("Unable to parse args",e); } } public static void main(String[] args) throws Exception { DistributedExample app = new DistributedExample(args); // set to test mode SparkConf sparkConf = new SparkConf().set("spark.executor.extraJavaOptions","-Ddtype=float") .setMaster(app.masterUrl).set("spark.akka.frameSize", String.valueOf(app.frameSize)) .set(SparkDl4jMultiLayer.AVERAGE_EACH_ITERATION, String.valueOf(app.averageEachIteration)) .setAppName("mnist"); System.out.println("Setting up Spark Context..."); JavaSparkContext sc = new JavaSparkContext(sparkConf); Map<Integer,OutputPreProcessor> preProcessorMap = new HashMap<>(); preProcessorMap.put(0,new BinomialSamplingPreProcessor()); preProcessorMap.put(1,new BinomialSamplingPreProcessor()); preProcessorMap.put(2,new BinomialSamplingPreProcessor()); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().iterations(app.iterations).momentum(0.5) .l2(2e-4).regularization(true).optimizationAlgo(OptimizationAlgorithm.ITERATION_GRADIENT_DESCENT) .nIn(784).nOut(10).layerFactory(LayerFactories.getFactory(RBM.class)).batchSize(app.batchSize).momentumAfter(Collections.singletonMap(20,0.9)) .list(4).hiddenLayerSizes(600, 500, 400).preProcessors(preProcessorMap) .override(new NeuralNetConfiguration.ConfOverride() { @Override public void override(int i, NeuralNetConfiguration.Builder builder) { if (i == 3) { builder.activationFunction(Activations.softMaxRows()); builder.layerFactory(LayerFactories.getFactory(OutputLayer.class)); builder.lossFunction(LossFunctions.LossFunction.MCXENT); } } }).build(); System.out.println("Initializing network"); SparkDl4jMultiLayer master = new SparkDl4jMultiLayer(sc,conf); DataSet d = new MnistDataSetIterator(60000,60000).next(); List<DataSet> next = new ArrayList<>(); for(int i = 0; i < d.numExamples(); i++) next.add(d.get(i).copy()); Collections.shuffle(next); JavaRDD<DataSet> data = sc.parallelize(next); MultiLayerNetwork network = master.fitDataSet(data); Evaluation evaluation = new Evaluation(); evaluation.eval(d.getLabels(),network.output(d.getFeatureMatrix())); System.out.println("Averaged once " + evaluation.stats()); String json = conf.toJson(); FileUtils.writeStringToFile(new File(app.outputPath + ".json"),json); FileUtils.writeStringToFile(new File(app.outputPath + ".params"), StringUtils.join(network.params().data().asDouble(),',')); } }
package org.dita.dost.reader; import static org.dita.dost.util.Constants.*; import static org.dita.dost.util.FileUtils.*; import static org.dita.dost.util.URLUtils.*; import java.io.File; import java.net.URI; import java.util.Collections; import java.util.Hashtable; import java.util.Map; import java.util.Properties; import org.dita.dost.log.MessageUtils; import org.dita.dost.util.FileUtils; import org.dita.dost.util.StringUtils; import org.xml.sax.Attributes; import org.xml.sax.SAXException; import org.xml.sax.XMLReader; /** * Class for reading conref push content. * */ public final class ConrefPushReader extends AbstractXMLReader { /** Conaction mark value */ private static final String ATTR_CONACTION_VALUE_MARK = "mark"; /** Conaction push after value */ private static final String ATTR_CONACTION_VALUE_PUSHAFTER = "pushafter"; /** Conaction push before value */ private static final String ATTR_CONACTION_VALUE_PUSHBEFORE = "pushbefore"; /** Conaction push replace value */ private static final String ATTR_CONACTION_VALUE_PUSHREPLACE = "pushreplace"; /** push table.*/ private final Hashtable<String, Hashtable<String, String>> pushtable; /** push table.*/ private final XMLReader reader; /**keep the file path of current file under parse filePath is useful to get the absolute path of the target file.*/ private File fileDir = null; /**keep the file name of current file under parse */ private File parsefilename = null; /**pushcontent is used to store the content copied to target in pushcontent href will be resolved if it is relative path if @conref is in pushconref the target name should be recorded so that it could be added to conreflist for conref resolution.*/ private StringBuffer pushcontent = null; /**boolean start is used to control whether sax parser can start to record push content into String pushcontent.*/ private boolean start = false; /**level is used to record the level number to the root element in pushcontent In endElement(...) we can turn start off to terminate adding content to pushcontent if level is zero. That means we reach the end tag of the starting element.*/ private int level = 0; /**target is used to record the target of the conref push if we reach pushafter action but there is no target recorded before, we need to report error.*/ private URI target = null; /**pushType is used to record the current type of push it is used in endElement(....) to tell whether it is pushafter or replace.*/ private String pushType = null; /** * Get push table * * @return unmodifiable push table */ public Map<String, Hashtable<String, String>> getPushMap() { return Collections.unmodifiableMap(pushtable); } /** * @param filename filename */ @Override public void read(final File filename) { fileDir = filename.getParentFile().getAbsoluteFile(); parsefilename = new File(filename.getName()); start = false; pushcontent = new StringBuffer(INT_256); pushType = null; try{ reader.parse(filename.toURI().toString()); }catch (final Exception e) { logger.logError(e.getMessage(), e) ; } } /** * Constructor. */ public ConrefPushReader(){ pushtable = new Hashtable<String, Hashtable<String,String>>(); try{ reader = StringUtils.getXMLReader(); reader.setFeature(FEATURE_NAMESPACE_PREFIX, true); reader.setFeature(FEATURE_NAMESPACE, true); reader.setProperty(LEXICAL_HANDLER_PROPERTY,this); reader.setContentHandler(this); }catch (final Exception e) { throw new RuntimeException("Failed to initialize XML parser: " + e.getMessage(), e); } } @Override public void startElement(final String uri, final String localName, final String name, final Attributes atts) throws SAXException { if(start){ //if start is true, we need to record content in pushcontent //also we need to add level to make sure start is turn off //at the corresponding end element level ++; putElement(pushcontent, name, atts, false); } final String conactValue = atts.getValue(ATTRIBUTE_NAME_CONACTION); if (!start && conactValue != null){ if (ATTR_CONACTION_VALUE_PUSHBEFORE.equalsIgnoreCase(conactValue)){ if(pushcontent.length() != 0){ // there are redundant "pushbefore", create a new pushcontent and emit a warning message. pushcontent = new StringBuffer(); logger.logWarn(MessageUtils.getInstance().getMessage("DOTJ044W", atts.getValue(ATTRIBUTE_NAME_XTRF), atts.getValue(ATTRIBUTE_NAME_XTRC)).toString()); } start = true; level =0; level ++; putElement(pushcontent, name, atts, true); pushType = ATTR_CONACTION_VALUE_PUSHBEFORE; }else if (ATTR_CONACTION_VALUE_PUSHAFTER.equalsIgnoreCase(conactValue)){ start = true; level = 0; level ++; if (target == null){ logger.logError(MessageUtils.getInstance().getMessage("DOTJ039E", atts.getValue(ATTRIBUTE_NAME_XTRF), atts.getValue(ATTRIBUTE_NAME_XTRC)).toString()); }else{ putElement(pushcontent, name, atts, true); pushType = ATTR_CONACTION_VALUE_PUSHAFTER; } }else if (ATTR_CONACTION_VALUE_PUSHREPLACE.equalsIgnoreCase(conactValue)){ start = true; level = 0; level ++; target = toURI(atts.getValue(ATTRIBUTE_NAME_CONREF)); if (target == null){ logger.logError(MessageUtils.getInstance().getMessage("DOTJ040E", atts.getValue(ATTRIBUTE_NAME_XTRF), atts.getValue(ATTRIBUTE_NAME_XTRC)).toString()); }else{ pushType = ATTR_CONACTION_VALUE_PUSHREPLACE; putElement(pushcontent, name, atts, true); } }else if (ATTR_CONACTION_VALUE_MARK.equalsIgnoreCase(conactValue)){ target = toURI(atts.getValue(ATTRIBUTE_NAME_CONREF)); if (target != null && pushcontent != null && pushcontent.length() > 0 && ATTR_CONACTION_VALUE_PUSHBEFORE.equals(pushType)){ //pushcontent != null means it is pushbefore action //we need to add target and content to pushtable replaceContent(); addtoPushTable(target, pushcontent.toString(), pushType); pushcontent = new StringBuffer(INT_256); target = null; pushType = null; } } }//else if (pushcontent != null && pushcontent.length() > 0 && level == 0){ //if there is no element with conaction="mark" after //one with conaction="pushbefore", report syntax error } /** * replace content. */ private void replaceContent() { // replace all conref and href value in pushcontent according to target // this is useful to "pushbefore" action because it doesn't know the target // when processing these content int index = 0; int nextindex = 0; int hrefindex = pushcontent.indexOf("href=\"", index); int conrefindex = pushcontent.indexOf("conref=\"", index); final StringBuffer resultBuffer = new StringBuffer(INT_256); if(hrefindex < 0 && conrefindex < 0){ return; } while (hrefindex >= 0 || conrefindex >= 0){ if (hrefindex > 0 && conrefindex > 0){ nextindex = hrefindex < conrefindex ? hrefindex : conrefindex; }else if(hrefindex > 0){ nextindex = hrefindex; }else if(conrefindex > 0){ nextindex = conrefindex; } final int valueindex = pushcontent.indexOf(QUOTATION,nextindex)+1; resultBuffer.append(pushcontent.substring(index, valueindex)); resultBuffer.append(replaceURL(pushcontent.substring(valueindex, pushcontent.indexOf(QUOTATION, valueindex)))); index = pushcontent.indexOf(QUOTATION, valueindex); if(hrefindex > 0){ hrefindex = pushcontent.indexOf("href=\"", index); } if(conrefindex > 0){ conrefindex = pushcontent.indexOf("conref=\"", index); } } resultBuffer.append(pushcontent.substring(index)); pushcontent = resultBuffer; } /** * * @param buf buffer * @param elemName element name * @param atts attribute * @param removeConref whether remeove conref info */ private void putElement(final StringBuffer buf, final String elemName, final Attributes atts, final boolean removeConref) { //parameter boolean removeConref specifies whether to remove //conref information like @conref @conaction in current element //when copying it to pushcontent. True means remove and false means //not remove. int index = 0; buf.append(LESS_THAN).append(elemName); for (index=0; index < atts.getLength(); index++){ if (!removeConref || !ATTRIBUTE_NAME_CONREF.equals(atts.getQName(index))&& !ATTRIBUTE_NAME_CONACTION.equals(atts.getQName(index))){ buf.append(STRING_BLANK); buf.append(atts.getQName(index)).append(EQUAL).append(QUOTATION); String value = atts.getValue(index); value = StringUtils.escapeXML(value); if (ATTRIBUTE_NAME_HREF.equals(atts.getQName(index)) || ATTRIBUTE_NAME_CONREF.equals(atts.getQName(index))){ // adjust href for pushbefore and replace value = replaceURL(value); } buf.append(value).append(QUOTATION); } } //id attribute should only be added to the starting element //which dosen't have id attribute set if(ATTR_CONACTION_VALUE_PUSHREPLACE.equals(pushType) && atts.getValue(ATTRIBUTE_NAME_ID) == null && level == 1){ final String fragment = target.getFragment(); if (fragment == null){ //if there is no '#' in target string, report error logger.logError(MessageUtils.getInstance().getMessage("DOTJ041E", target.toString()).toString()); }else{ final String targetLoc = fragment; String id = ""; //has element id if(targetLoc.contains(SLASH)){ id = targetLoc.substring(targetLoc.lastIndexOf(SLASH) + 1); }else{ id = targetLoc; } //add id attribute buf.append(STRING_BLANK); buf.append(ATTRIBUTE_NAME_ID).append(EQUAL).append(QUOTATION); buf.append(id).append(QUOTATION); } } buf.append(GREATER_THAN); } /** * * @param value string * @return URL */ private String replaceURL(final String value) { if(value == null){ return null; }else if(target == null || FileUtils.isAbsolutePath(value) || value.contains(COLON_DOUBLE_SLASH) || value.startsWith(SHARP)){ return value; }else{ final String source = FileUtils.resolveFile(fileDir, target).getPath(); final String urltarget = FileUtils.resolveTopic(fileDir, value); return FileUtils.getRelativeUnixPath(source, urltarget); } } /** * * @param target target * @param pushcontent content * @param type push type */ private void addtoPushTable(URI target, final String pushcontent, final String type) { if (target.getFragment() == null){ //if there is no '#' in target string, report error logger.logError(MessageUtils.getInstance().getMessage("DOTJ041E", target.toString()).toString()); return; } if (target.getPath().isEmpty()) { //means conref the file itself target = toURI(parsefilename.getPath() + target); } final String key = FileUtils.resolveFile(fileDir, target).getPath(); Hashtable<String, String> table = null; if (pushtable.containsKey(key)){ //if there is something else push to the same file table = pushtable.get(key); }else{ //if there is nothing else push to the same file table = new Hashtable<String, String>(); pushtable.put(key, table); } final String targetLoc = SHARP + target.getFragment(); final String addon = STICK+type; if (table.containsKey(targetLoc+addon)){ //if there is something else push to the same target //append content if type is 'pushbefore' or 'pushafter' //report error if type is 'replace' if (ATTR_CONACTION_VALUE_PUSHREPLACE.equalsIgnoreCase(type)){ logger.logError(MessageUtils.getInstance().getMessage("DOTJ042E", target.toString()).toString()); return; }else{ table.put(targetLoc+addon, table.get(targetLoc+addon)+pushcontent); } }else{ //if there is nothing else push to the same target table.put(targetLoc+addon, pushcontent); } } @Override public void characters(final char[] ch, final int start, final int length) throws SAXException { if (this.start){ pushcontent.append(StringUtils.escapeXML(ch, start, length)); } } @Override public void endElement(final String uri, final String localName, final String name) throws SAXException { if (start){ level pushcontent.append(LESS_THAN).append(SLASH).append(name).append(GREATER_THAN); } if (level == 0){ //turn off start if we reach the end tag of staring element start = false; if (ATTR_CONACTION_VALUE_PUSHAFTER.equals(pushType) || ATTR_CONACTION_VALUE_PUSHREPLACE.equals(pushType)){ //if it is pushafter or replace, we need to record content in pushtable //if target == null we have already reported error in startElement; if(target != null){ addtoPushTable(target, pushcontent.toString(), pushType); pushcontent = new StringBuffer(INT_256); target = null; pushType = null; } } } } }
package org.jboss.virtual.spi; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.security.AccessController; import java.security.PrivilegedAction; import org.jboss.logging.Logger; import org.jboss.virtual.VFS; import org.jboss.virtual.VFSUtils; import org.jboss.virtual.VirtualFile; /** * Simple vfs cache factory. * * @author <a href="mailto:ales.justin@jboss.com">Ales Justin</a> */ public class VFSCacheFactory { private static final Object lock = new Object(); private static Logger log = Logger.getLogger(VFSCacheFactory.class); private static VFSCache instance; private VFSCacheFactory() { } /** * Get VFS cache instance. * * @return the vfs cache instance */ public static VFSCache getInstance() { if (instance == null) { synchronized (lock) { instance = AccessController.doPrivileged(new VFSCacheCreatorAction()); } } return instance; } /** * Set instance. * * This should be used with care. * Better to leave it to getInstance method creation. * * @param cache cache instance to set */ public static void setInstance(VFSCache cache) { if (cache != null && instance != null && instance instanceof NoopVFSCache == false) throw new IllegalArgumentException("Instance already set!"); instance = cache; } private static class VFSCacheCreatorAction implements PrivilegedAction<VFSCache> { public VFSCache run() { try { String className = System.getProperty(VFSUtils.VFS_CACHE_KEY); if (className != null) { log.info("Initializing VFSCache [" + className + "] ..."); ClassLoader cl = VFSCacheFactory.class.getClassLoader(); Class<?> clazz = cl.loadClass(className); VFSCache cache = VFSCache.class.cast(clazz.newInstance()); cache.start(); // start here, so we fall back to default no-op in case start fails return cache; } } catch (Throwable t) { log.warn("Exception instantiating VFS cache: " + t); } return new NoopVFSCache(); } } /** * Noop cache. * Doesn't do any caching. */ private static class NoopVFSCache implements VFSCache { public VirtualFile getFile(URI uri) throws IOException { return VFS.getRoot(uri); } public VirtualFile getFile(URL url) throws IOException { try { return getFile(VFSUtils.toURI(url)); } catch (URISyntaxException e) { IOException ioe = new IOException(); ioe.initCause(e); throw ioe; } } public void putContext(VFSContext context) { } public void removeContext(VFSContext context) { } public void start() throws Exception { } public void stop() { } } }
package org.jenkins.tools.test.model; import java.io.File; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import javax.xml.transform.Result; import javax.xml.transform.Source; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerFactory; import javax.xml.transform.stream.StreamResult; import javax.xml.transform.stream.StreamSource; import hudson.maven.MavenEmbedder; import hudson.maven.MavenEmbedderException; import hudson.maven.MavenRequest; import org.apache.commons.io.FileUtils; import org.apache.maven.execution.AbstractExecutionListener; import org.apache.maven.execution.ExecutionEvent; import org.apache.maven.execution.MavenExecutionResult; import org.codehaus.plexus.component.repository.exception.ComponentLookupException; import org.jenkins.tools.test.exception.PomExecutionException; import org.jenkins.tools.test.exception.PomTransformationException; import org.springframework.core.io.ClassPathResource; public class MavenPom { private File rootDir; private String pomFileName; public MavenPom(File rootDir){ this(rootDir, "pom.xml"); } public MavenPom(File rootDir, String pomFileName){ this.rootDir = rootDir; this.pomFileName = pomFileName; } public void transformPom(String newParentGroupId, String newParentArtifactId, String newParentVersion) throws PomTransformationException{ File pom = new File(rootDir.getAbsolutePath()+"/"+pomFileName); File backupedPom = new File(rootDir.getAbsolutePath()+"/"+pomFileName+".backup"); try { FileUtils.moveFile(pom, backupedPom); Source xmlSource = new StreamSource(backupedPom); Source xsltSource = new StreamSource(new ClassPathResource("mavenParentReplacer.xsl").getFile()); Result result = new StreamResult(pom); TransformerFactory factory = TransformerFactory.newInstance(); Transformer transformer = factory.newTransformer(xsltSource); transformer.setParameter("parentArtifactId", newParentArtifactId); transformer.setParameter("parentGroupId", newParentGroupId); transformer.setParameter("parentVersion", newParentVersion); transformer.transform(xmlSource, result); } catch (Exception e) { throw new PomTransformationException("Error while transforming pom : "+pom.getAbsolutePath(), e); } } public MavenExecutionResult executeGoals(List goals) throws PomExecutionException { final List<String> succeededPlugins = new ArrayList<String>(); MavenRequest mavenRequest = new MavenRequest(); //mavenRequest.setPom(pluginCheckoutDir.getAbsolutePath()+"/pom.xml"); mavenRequest.setBaseDirectory(rootDir.getAbsolutePath()); mavenRequest.setGoals(goals); AbstractExecutionListener mavenListener = new AbstractExecutionListener(){ public void mojoSucceeded(ExecutionEvent event){ succeededPlugins.add(event.getMojoExecution().getArtifactId()); } }; mavenRequest.setExecutionListener(mavenListener); mavenRequest.getUserProperties().put( "failIfNoTests", "false" ); mavenRequest.setPom(rootDir.getAbsolutePath()+"/pom.xml"); MavenExecutionResult result; try { MavenEmbedder embedder = new MavenEmbedder(Thread.currentThread().getContextClassLoader(), mavenRequest); result = embedder.execute(mavenRequest); }catch(MavenEmbedderException e){ // TODO: better manage this exception throw new RuntimeException("Error during maven embedder execution", e); } catch(ComponentLookupException e){ // TODO: better manage this exception throw new RuntimeException("Error during maven embedder execution", e); } if(!result.getExceptions().isEmpty()){ throw new PomExecutionException("Error while executing pom goals : "+ Arrays.toString(goals.toArray()), result.getExceptions(), succeededPlugins); } return result; } }
package org.jenkinsci.plugins.p4; import hudson.AbortException; import hudson.FilePath.FileCallable; import hudson.model.TaskListener; import hudson.remoting.VirtualChannel; import java.io.File; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import java.util.logging.Logger; import org.jenkinsci.plugins.p4.client.ClientHelper; import org.jenkinsci.plugins.p4.client.ConnectionHelper; import org.jenkinsci.plugins.p4.credentials.P4StandardCredentials; import org.jenkinsci.plugins.p4.populate.Populate; import org.jenkinsci.plugins.p4.workspace.Workspace; public class CheckoutTask implements FileCallable<Boolean>, Serializable { private static final long serialVersionUID = 1L; private static Logger logger = Logger.getLogger(CheckoutTask.class .getName()); private final P4StandardCredentials credential; private final TaskListener listener; private final String client; private CheckoutStatus status; private int head; private Object buildChange; private int review; private Populate populate; /** * Constructor * * @param config * - Server connection details * @param auth * - Server login details */ public CheckoutTask(String credentialID, Workspace config, TaskListener listener) { this.credential = ConnectionHelper.findCredential(credentialID); this.listener = listener; this.client = config.getFullName(); } public void setBuildOpts(Workspace workspace) throws AbortException { ClientHelper p4 = new ClientHelper(credential, listener, client); try { // setup the client workspace to use for the build. if (!p4.setClient(workspace)) { String err = "Undefined workspace: " + workspace.getFullName(); logger.severe(err); listener.error(err); throw new AbortException(err); } // fetch and calculate change to sync to or review to unshelve. status = getStatus(workspace); head = p4.getClientHead(); review = getReview(workspace); buildChange = getBuildChange(workspace); } catch (Exception e) { String err = "Unable to setup workspace: " + e; logger.severe(err); listener.error(err); throw new AbortException(err); } finally { p4.disconnect(); } } public void setPopulateOpts(Populate populate) { this.populate = populate; } /** * Invoke sync on build node (master or remote node). * * @return true if updated, false if no change. */ public Boolean invoke(File workspace, VirtualChannel channel) throws IOException { ClientHelper p4 = new ClientHelper(credential, listener, client); try { // test server connection if (!p4.isConnected()) { p4.log("P4: Server connection error:" + credential.getP4port()); return false; } p4.log("Connected to server: " + credential.getP4port()); // test client connection if (p4.getClient() == null) { p4.log("P4: Client unknown: " + client); return false; } p4.log("Connected to client: " + client); // Tidy the workspace before sync/build p4.tidyWorkspace(populate); // Sync workspace to label, head or specified change p4.syncFiles(buildChange, populate); // Unshelve review if specified if (status == CheckoutStatus.SHELVED) { p4.unshelveFiles(review); } } catch (Exception e) { String msg = "Unable to update workspace: " + e; logger.warning(msg); throw new AbortException(msg); } finally { p4.disconnect(); } return true; } /** * Get the build status for the parameter map. * * @param map * @return */ private CheckoutStatus getStatus(Workspace workspace) { CheckoutStatus status = CheckoutStatus.HEAD; String value = workspace.get("status"); if (value != null && !value.isEmpty()) { status = CheckoutStatus.parse(value); } return status; } /** * Get the sync point from the parameter map. Returns the head if no change * found in the map. * * @param map * @return */ private Object getBuildChange(Workspace workspace) { // Use head as the default Object build = this.head; // if change is specified then update String change = workspace.get("change"); if (change != null && !change.isEmpty()) { try { build = Integer.parseInt(change); } catch (NumberFormatException e) { } } // if label is specified then update String label = workspace.get("label"); if (label != null && !label.isEmpty()) { try { // if build is a change-number passed as a label build = Integer.parseInt(label); } catch (NumberFormatException e) { build = label; } } return build; } /** * Get the unshelve point from the parameter map. * * @param map * @return */ private int getReview(Workspace workspace) { int review = 0; String value = workspace.get("review"); if (value != null && !value.isEmpty()) { try { review = Integer.parseInt(value); } catch (NumberFormatException e) { } } return review; } public List<Object> getChanges(Object last) { List<Object> changes = new ArrayList<Object>(); // Add changes to this build. ClientHelper p4 = new ClientHelper(credential, listener, client); try { changes = p4.listChanges(last, buildChange); } catch (Exception e) { String err = "Unable to get changes: " + e; logger.severe(err); listener.getLogger().println(err); e.printStackTrace(); } finally { p4.disconnect(); } // Include shelf if a review if (status == CheckoutStatus.SHELVED) { changes.add(review); } return changes; } public CheckoutStatus getStatus() { return status; } public Object getBuildChange() { if (status == CheckoutStatus.SHELVED) { return review; } return buildChange; } }
package org.json.simple.parser; /** * ParseException explains why and where the error occurs in source JSON text. * * @author FangYidong<fangyidong@yahoo.com.cn> * */ public class ParseException extends Exception { private static final long serialVersionUID = -7880698968187728547L; public static final int ERROR_UNEXPECTED_CHAR = 0; public static final int ERROR_UNEXPECTED_TOKEN = 1; public static final int ERROR_UNEXPECTED_EXCEPTION = 2; private int errorType; private Object unexpectedObject; private int position; public ParseException(int errorType){ this(-1, errorType, null); } public ParseException(int errorType, Object unexpectedObject){ this(-1, errorType, unexpectedObject); } public ParseException(int position, int errorType, Object unexpectedObject){ this.position = position; this.errorType = errorType; this.unexpectedObject = unexpectedObject; } public int getErrorType() { return errorType; } public void setErrorType(int errorType) { this.errorType = errorType; } /** * @see org.json.simple.parser.JSONParser#getPosition() * * @return The character position (starting with 0) of the input where the error occurs. */ public int getPosition() { return position; } public void setPosition(int position) { this.position = position; } /** * @see org.json.simple.parser.Yytoken * * @return One of the following base on the value of errorType: * ERROR_UNEXPECTED_CHAR java.lang.Character * ERROR_UNEXPECTED_TOKEN org.json.simple.parser.Yytoken * ERROR_UNEXPECTED_EXCEPTION java.lang.Exception */ public Object getUnexpectedObject() { return unexpectedObject; } public void setUnexpectedObject(Object unexpectedObject) { this.unexpectedObject = unexpectedObject; } public String getMessage() { StringBuffer sb = new StringBuffer(); switch(errorType){ case ERROR_UNEXPECTED_CHAR: sb.append("Unexpected character (").append(unexpectedObject).append(") at position ").append(position).append("."); break; case ERROR_UNEXPECTED_TOKEN: sb.append("Unexpected token ").append(unexpectedObject).append(" at position ").append(position).append("."); break; case ERROR_UNEXPECTED_EXCEPTION: sb.append("Unexpected exception at position ").append(position).append(": ").append(unexpectedObject); break; default: sb.append("Unkown error at position ").append(position).append("."); break; } return sb.toString(); } }
package org.mafagafogigante.dungeon.game; import org.mafagafogigante.dungeon.commands.IssuedCommand; import org.mafagafogigante.dungeon.commands.IssuedCommandEvaluation; import org.mafagafogigante.dungeon.commands.IssuedCommandProcessor; import org.mafagafogigante.dungeon.gui.GameWindow; import org.mafagafogigante.dungeon.io.Loader; import org.mafagafogigante.dungeon.io.Writer; import org.mafagafogigante.dungeon.logging.DungeonLogger; import org.mafagafogigante.dungeon.util.StopWatch; import org.mafagafogigante.dungeon.util.Utils; import org.apache.commons.lang3.StringUtils; import java.awt.Color; import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.List; import javax.swing.JOptionPane; import javax.swing.SwingUtilities; public class Game { private static final InstanceInformation instanceInformation = new InstanceInformation(); private static GameWindow gameWindow; private static GameState gameState; /** * The main method. */ public static void main(String[] args) { final StopWatch stopWatch = new StopWatch(); invokeOnEventDispatchThreadAndWait(new Runnable() { @Override public void run() { gameWindow = new GameWindow(); } }); DungeonLogger.info("Finished making the window. Took " + stopWatch.toString() + "."); setGameState(getInitialGameState()); invokeOnEventDispatchThreadAndWait(new Runnable() { @Override public void run() { getGameWindow().startAcceptingCommands(); DungeonLogger.info("Signaled the window to start accepting commands."); } }); } /** * Invokes a runnable on the EDT and waits for it to finish. If an exception is thrown, this method logs it and * finishes the application. */ private static void invokeOnEventDispatchThreadAndWait(Runnable runnable) { try { SwingUtilities.invokeAndWait(runnable); } catch (InterruptedException | InvocationTargetException fatal) { DungeonLogger.logSevere(fatal); } } /** * Loads a saved GameState or creates a new one. Should be invoked to get the first GameState of the instance. * * <p>If a new GameState is created and the saves folder is empty, the tutorial is suggested. */ private static GameState getInitialGameState() { GameState gameState = Loader.loadGame(true); if (gameState == null) { gameState = Loader.newGame(); // Note that loadedGameState may be null even if a save exists (if the player declined to load it). // So check for any save in the folder. if (!Loader.checkForSave()) { // Suggest the tutorial only if no saved game exists. suggestTutorial(); } } return gameState; } private static void suggestTutorial() { Writer.write(new DungeonString("\nYou may want to issue 'tutorial' to learn the basics.\n")); } /** * Gets a GameState object. Should be invoked to get a GameState after the Hero dies. */ private static GameState getAfterDeathGameState() { GameState gameState = Loader.loadGame(false); if (gameState != null) { JOptionPane.showMessageDialog(getGameWindow(), "Loaded the most recent saved game."); } else { gameState = Loader.newGame(); JOptionPane.showMessageDialog(getGameWindow(), "Could not load a saved game. Created a new game."); } return gameState; } public static GameWindow getGameWindow() { return gameWindow; } public static GameState getGameState() { return gameState; } /** * Sets a new GameState to the static field. Can be used to nullify the GameState, something that should be done while * another GameState is being created. If the provided GameState is not null, this setter also invokes Hero.look(). * * @param state another GameState object, or null */ public static void setGameState(GameState state) { if (getGameState() != null) { DungeonLogger.warning("Called setGameState without unsetting the old game state."); } if (state == null) { throw new IllegalArgumentException("passed null to setGameState."); } gameState = state; DungeonLogger.info("Set the GameState field in Game to a GameState."); // This is a new GameState that must be refreshed in order to have spawned creatures at the beginning. Engine.refresh(); Writer.write(new DungeonString("\n")); // Improves readability. gameState.getHero().look(); } public static void unsetGameState() { DungeonLogger.info("Set the GameState field in Game to null."); gameState = null; } /** * Renders a turn based on the last IssuedCommand. * * @param issuedCommand the last IssuedCommand. */ public static void renderTurn(IssuedCommand issuedCommand) { // Clears the text pane. getGameWindow().clearTextPane(); if (processInput(issuedCommand)) { if (getGameState().getHero().getHealth().isDead()) { getGameWindow().clearTextPane(); Writer.write("You died."); unsetGameState(); setGameState(getAfterDeathGameState()); } else { Engine.endTurn(); } } } /** * Processes the player's input. Adds the IssuedCommand to the CommandHistory and to the CommandStatistics. Finally, * this method finds and executes the corresponding Command object or prints a message if there is not such Command. * * @param issuedCommand the last IssuedCommand. * @return a boolean indicating whether or not the command executed successfully */ private static boolean processInput(IssuedCommand issuedCommand) { IssuedCommandEvaluation evaluation = IssuedCommandProcessor.evaluateIssuedCommand(issuedCommand); if (evaluation.isValid()) { instanceInformation.incrementAcceptedCommandCount(); getGameState().getCommandHistory().addCommand(issuedCommand); getGameState().getStatistics().addCommand(issuedCommand); IssuedCommandProcessor.prepareIssuedCommand(issuedCommand).execute(); return true; } else { DungeonString string = new DungeonString(); string.setColor(Color.RED); string.append("That is not a valid command.\n"); string.append("But it is similar to "); List<String> suggestionsBetweenCommas = new ArrayList<>(); for (String suggestion : evaluation.getSuggestions()) { suggestionsBetweenCommas.add(StringUtils.wrap(suggestion, '"')); } string.append(Utils.enumerate(suggestionsBetweenCommas)); string.append(".\n"); string.setColor(Color.ORANGE); string.append("See 'commands' for a complete list of commands."); Writer.write(string); return false; } } /** * Exits the game, prompting the user if the current state should be saved if it is not already saved. */ public static void exit() { if (getGameState() != null && !getGameState().isSaved()) { Loader.saveGame(getGameState()); } logInstanceClosing(); System.exit(0); } private static void logInstanceClosing() { StringBuilder builder = new StringBuilder(); builder.append("Closing instance. Ran for "); builder.append(instanceInformation.getDurationString()); builder.append(". "); if (instanceInformation.getAcceptedCommandCount() == 0) { builder.append("Parsed no commands."); } else if (instanceInformation.getAcceptedCommandCount() == 1) { builder.append("Parsed one command."); } else { builder.append("Parsed "); builder.append(instanceInformation.getAcceptedCommandCount()); builder.append(" commands."); } DungeonLogger.info(builder.toString()); } }
package org.ndexbio.rest.services; import java.util.List; import java.util.UUID; import javax.annotation.security.PermitAll; import javax.servlet.http.HttpServletRequest; import javax.ws.rs.DELETE; import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.core.Context; import javax.ws.rs.core.Response; import org.ndexbio.model.object.User; import org.ndexbio.common.models.dao.orientdb.UserDAO; import org.ndexbio.common.access.NdexDatabase; import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx; import org.ndexbio.common.exceptions.*; import org.ndexbio.model.object.SimpleUserQuery; import org.ndexbio.rest.annotations.ApiDoc; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Preconditions; import com.google.common.base.Strings; @Path("/user") public class UserService extends NdexService { private static final Logger _logger = LoggerFactory .getLogger(UserService.class); private static UserDAO dao; private static NdexDatabase database; private static ODatabaseDocumentTx localConnection; //all DML will be in this connection, in one transaction. public UserService(@Context HttpServletRequest httpRequest) { super(httpRequest); } /* * refactored to accommodate non-transactional database operations */ @POST @PermitAll @Produces("application/json") @ApiDoc("Create a new user based on a JSON object specifying username, password, and emailAddress, returns the new user - including its internal id. Username and emailAddress must be unique in the database.") public User createUser(final User newUser) throws IllegalArgumentException, DuplicateObjectException, NdexException { final User user; database = new NdexDatabase(); localConnection = database.getAConnection(); localConnection.begin(); dao = new UserDAO(localConnection); try { user = dao.createNewUser(newUser); localConnection.commit(); } catch (IllegalArgumentException e) { throw e; } catch (DuplicateObjectException e) { throw e; } catch (Exception e) { throw new NdexException(e.getMessage()); } finally { localConnection.close(); database.close(); } return user; } @GET @PermitAll @Path("/{userId}") @Produces("application/json") @ApiDoc("Return the user corresponding to userId, whether userId is actually a database id or a accountName. Error if neither is found.") public User getUser(@PathParam("userId") final String userId) throws IllegalArgumentException, NdexException { database = new NdexDatabase(); localConnection = database.getAConnection(); localConnection.begin(); dao = new UserDAO(localConnection); try { final User user = dao.getUserByAccountName(userId); return user; } catch (ObjectNotFoundException e) { try { final User user = dao.getUserById(UUID.fromString(userId)); return user; } catch (ObjectNotFoundException ee) { throw ee; } catch (Exception ee) { throw new NdexException(ee.getMessage()); } } catch (Exception e) { throw new NdexException(e.getMessage()); } finally { localConnection.close(); database.close(); } } @GET @PermitAll @Path("/authenticate/{accountName}/{password}") @Produces("application/json") @ApiDoc("Authenticates the combination of accountName and password supplied in the route parameters, returns the authenticated user if successful.") public User authenticateUser(@PathParam("accountName") final String accountName, @PathParam("password") final String password) throws SecurityException, NdexException { database = new NdexDatabase(); localConnection = database.getAConnection(); localConnection.begin(); dao = new UserDAO(localConnection); try { return dao.authenticateUser(accountName, password); } catch (SecurityException se) { throw se; } catch (Exception e) { _logger.error("Can't authenticate users.", e); throw new NdexException("There's a problem with the authentication server. Please try again later."); } finally { localConnection.close(); database.close(); } } @POST @Path("/password") @Produces("application/json") @ApiDoc("Changes the authenticated user's password to the new password in the POST data.") public void changePassword(String password) throws IllegalArgumentException, NdexException { Preconditions.checkArgument(!Strings.isNullOrEmpty(password), "A password is required"); database = new NdexDatabase(); localConnection = database.getAConnection(); localConnection.begin(); dao = new UserDAO(localConnection); try { dao.changePassword(password, getLoggedInUser().getExternalId()); localConnection.commit(); } catch (IllegalArgumentException e) { throw e; } catch (Exception e) { throw new NdexException(e.getMessage()); } finally { localConnection.close(); database.close(); } } @DELETE @Produces("application/json") @ApiDoc("Deletes the authenticated user. Errors if the user administrates any group or network. Should remove any other objects depending on the user.") public void deleteUser() throws NdexException, ObjectNotFoundException { database = new NdexDatabase(); localConnection = database.getAConnection(); localConnection.begin(); dao = new UserDAO(localConnection); try { dao.deleteUserById(getLoggedInUser().getExternalId()); localConnection.commit(); } catch (ObjectNotFoundException e) { throw e; } catch (Exception e) { throw new NdexException(e.getMessage()); } finally { localConnection.close(); database.close(); } } @GET @PermitAll @Path("/{username}/forgot-password") @Produces("application/json") @ApiDoc("Causes a new password to be generated for the authenticated user and then emailed to the users emailAddress") public Response emailNewPassword( @PathParam("username") final String username) throws IllegalArgumentException, NdexException { Preconditions.checkArgument(!Strings.isNullOrEmpty(username), "A username is required"); // TODO: In the future security questions should be implemented - right // now anyone can change anyone else's password to a randomly generated // password database = new NdexDatabase(); localConnection = database.getAConnection(); localConnection.begin(); dao = new UserDAO(localConnection); try { final Response res = dao.emailNewPassword(username); localConnection.commit(); return res; } catch (IllegalArgumentException e) { throw e; } catch (Exception e) { throw new NdexException(e.getMessage()); } finally { localConnection.close(); database.close(); } } @POST @PermitAll @Path("/search/{skipBlocks}/{blockSize}") @Produces("application/json") @ApiDoc("Returns a list of users based on the range [skipBlocks, blockSize] and the POST data searchParameters. " + "The searchParameters must contain a 'searchString' parameter. ") public List<User> findUsers(SimpleUserQuery simpleUserQuery, @PathParam("skipBlocks") final int skipBlocks, @PathParam("blockSize") final int blockSize) throws IllegalArgumentException, NdexException { database = new NdexDatabase(); localConnection = database.getAConnection(); localConnection.begin(); dao = new UserDAO(localConnection); try { final List<User> users = dao.findUsers(simpleUserQuery, skipBlocks, blockSize); return users; } catch (IllegalArgumentException e) { throw e; } catch (Exception e) { throw new NdexException(e.getMessage()); } finally { localConnection.close(); database.close(); } } @POST @Produces("application/json") @ApiDoc("Updates the authenticated user based on the serialized user object in the POST data. Errors if the user object references a different user.") public User updateUser(final User updatedUser) throws IllegalArgumentException, ObjectNotFoundException, NdexException { Preconditions.checkArgument(null != updatedUser, "Updated user data are required"); database = new NdexDatabase(); localConnection = database.getAConnection(); localConnection.begin(); dao = new UserDAO(localConnection); try { final User user = dao.updateUser(updatedUser, getLoggedInUser().getExternalId()); localConnection.commit(); return user; } catch (IllegalArgumentException e) { throw e; } catch (ObjectNotFoundException e) { throw e; } catch (Exception e) { throw new NdexException(e.getMessage()); } finally { localConnection.close(); database.close(); } } /* private BufferedImage resizeImage(final BufferedImage sourceImage, final int width, final int height) { final Image resizeImage = sourceImage.getScaledInstance(width, height, Image.SCALE_SMOOTH); final BufferedImage resizedImage = new BufferedImage(width, height, Image.SCALE_SMOOTH); resizedImage.getGraphics().drawImage(resizeImage, 0, 0, null); return resizedImage; } */ }
package org.openlmis.fulfillment.util; import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.Validate; import org.springframework.context.MessageSource; import lombok.Getter; import java.util.Locale; /** * Immutable value object for a message that is localizable. */ public class Message { private String key; private Object[] params; public Message(String messageKey) { this(messageKey, (Object[]) null); } /** * Creates a new Message with parameters that optionally may be used when the message is * localized. * * @param messageKey the key of the message * @param messageParameters the ordered parameters for substitution in a localized message. */ public Message(String messageKey, Object... messageParameters) { Validate.notBlank(messageKey); this.key = messageKey.trim(); this.params = messageParameters; } @Override public String toString() { return key + ": " + StringUtils.join(params, ", "); } /** * Gets the localized version of this message as it's intended for a human. * * @param messageSource the source of localized text. * @param locale the locale to determine which localized text to use. * @return this message localized in a format suitable for serialization. * @throws org.springframework.context.NoSuchMessageException if the message doesn't exist in the * messageSource. */ public LocalizedMessage localMessage(MessageSource messageSource, Locale locale) { return new LocalizedMessage(messageSource.getMessage(key, params, locale)); } @Override public boolean equals(Object other) { if (this == other) { return true; } if (!(other instanceof Message)) { return false; } Message otherMessage = (Message) other; return this.key.equals(otherMessage.key); } @Override public int hashCode() { return key.hashCode(); } /** * Value class of a localized message. Useful for JSON serialization, logging, etc... */ @Getter public final class LocalizedMessage { @JsonProperty(access = JsonProperty.Access.READ_ONLY) private String messageKey; @JsonProperty(access = JsonProperty.Access.READ_ONLY) private Object[] params; @JsonProperty(access = JsonProperty.Access.READ_ONLY) private String message; /** * Creates new LocalizedMessage based on given String. * @param message message. */ public LocalizedMessage(String message) { this.messageKey = Message.this.key; this.params = Message.this.params; Validate.notBlank(message); this.message = message; } @Override public String toString() { return messageKey + ": " + message; } } }
package org.realityforge.tarrabah; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonParser; import java.io.ByteArrayInputStream; import java.io.InputStream; import java.io.InputStreamReader; import java.util.logging.Level; import java.util.logging.Logger; import java.util.zip.DeflaterInputStream; import java.util.zip.GZIPInputStream; import javax.enterprise.context.Dependent; import javax.inject.Inject; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.channel.ChannelHandlerContext; import org.jboss.netty.channel.ExceptionEvent; import org.jboss.netty.channel.MessageEvent; import org.jboss.netty.channel.SimpleChannelHandler; @Dependent public class GelfHandler extends SimpleChannelHandler { static final short[] CHUNKED_BYTE_PREFIX = new short[]{ 0x1E, 0x0F }; private static final short[] ZLIP_BYTE_PREFIX = new short[]{ 0x78, 0x9C }; private static final short[] GZIP_BYTE_PREFIX = new short[]{ 0x1F, 0x8B }; @Inject private Logger _logger; @Override public void messageReceived( final ChannelHandlerContext context, final MessageEvent e ) throws Exception { final ChannelBuffer buffer = (ChannelBuffer) e.getMessage(); final byte[] readable = new byte[ buffer.readableBytes() ]; buffer.toByteBuffer().get( readable, buffer.readerIndex(), buffer.readableBytes() ); if ( readable.length > 2 && ZLIP_BYTE_PREFIX[ 0 ] == readable[ 0 ] && ZLIP_BYTE_PREFIX[ 1 ] == readable[ 1 ] ) { // ZLIB'd processJsonMessage( new DeflaterInputStream( new ByteArrayInputStream( readable ) ) ); } else if ( readable.length > 2 && GZIP_BYTE_PREFIX[ 0 ] == readable[ 0 ] && GZIP_BYTE_PREFIX[ 1 ] == readable[ 1 ] ) { //GZIP'd processJsonMessage( new GZIPInputStream( new ByteArrayInputStream( readable ) ) ); } else if ( readable.length > 2 && CHUNKED_BYTE_PREFIX[ 0 ] == readable[ 0 ] && CHUNKED_BYTE_PREFIX[ 1 ] == readable[ 1 ] ) { //Chunked } else { //plain processJsonMessage( new ByteArrayInputStream( readable ) ); } } private void processJsonMessage( final InputStream input ) { final JsonParser parser = new JsonParser(); final JsonElement element = parser.parse( new InputStreamReader( input ) ); if ( !( element instanceof JsonObject ) ) { //Error } final JsonObject object = (JsonObject) element; } @Override public void exceptionCaught( final ChannelHandlerContext context, final ExceptionEvent e ) throws Exception { _logger.log( Level.WARNING, "Problem handling gelf packet.", e.getCause() ); } }
package org.sipfoundry.log4j; import java.text.SimpleDateFormat; import java.util.TimeZone; import org.apache.log4j.Layout; import org.apache.log4j.Level; import org.apache.log4j.Priority; import org.apache.log4j.spi.LoggingEvent; import org.sipfoundry.util.Hostname; /** * A log4j Layout class that matches the SipFoundry C++ OsSyslog format (within * reason) * * @author Woof! */ public class SipFoundryLayout extends Layout { static Long lineNumber = 0L; SimpleDateFormat dateFormat; String hostName; String facility; /** * Sorry for this ugly routine. It is here because SIPX needs six digits * and I cannot find a way to make SimpleDateFormat do that. */ private String munge(String input) { String[] pieces = input.split("\\."); if ( pieces.length == 0 ) return input; StringBuffer newStringBuilder = new StringBuffer().append(input); if ( pieces[pieces.length -1 ].length() == 4 ) { newStringBuilder.deleteCharAt(input.length() -1); newStringBuilder.append("000Z"); return newStringBuilder.toString(); } else { return input; } } public SipFoundryLayout() { super(); dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); dateFormat.setTimeZone(TimeZone.getTimeZone("UTC")); hostName = Hostname.get(); facility = "JAVA"; // Can be set from the property // log4j.appender.xxx.layout.facility } /** * Map the log4j levels to the SipFoundry text (ERROR is ERR) * * @param l * The level to map * @return The SipFoundry level text if available, otherwise the log4j text */ private String mapLevel2SipFoundry(Level l) { switch (l.toInt()) { case Priority.DEBUG_INT: return "DEBUG"; case Priority.INFO_INT: return "INFO"; case Priority.WARN_INT: return "WARNING"; case Priority.ERROR_INT: return "ERR"; default: return l.toString(); } } /** * Map the SipFoundry text to the log4j Priority number * * @param level * @return */ public static Level mapSipFoundry2log4j(String level) { if (level == null) return Level.DEBUG; if (level.equalsIgnoreCase("DEBUG")) return Level.DEBUG; if (level.equalsIgnoreCase("INFO")) return Level.INFO; if (level.equalsIgnoreCase("NOTICE")) return Level.INFO; if (level.equalsIgnoreCase("WARNING")) return Level.WARN; if (level.equalsIgnoreCase("ERR")) return Level.ERROR; if (level.equalsIgnoreCase("ERROR")) return Level.ERROR; Level l = Level.toLevel(level); return l == null ? Level.DEBUG : l; } /** * Escape any CR or LF in the message with the \r \n escapes. SipFoundry * logging logs multiline messages (like a SIP PDU) on a single log entry by * escaping the CRs and LFs * * @param msg * The message to escape * @return The escaped message */ String escapeCrlf(String msg) { if (msg == null) { return null; } int n = msg.length(); // Ignore trailing CR LFs (why?) /* * for(int i=n-1; i>0; i--) { char c = msg.charAt(i) ; if (c == '\r' || * c == '\n') { n-- ; continue ; } break ; } */ // escape CR LFs StringBuffer sb = new StringBuffer(n + 2); for (int i = 0; i < n; i++) { char c = msg.charAt(i); if (c == '\r') { sb.append("\\r"); } else if (c == '\n') { sb.append("\\n"); } else { sb.append(c); } } return sb.toString(); } @Override public String format(LoggingEvent arg0) { String msg = escapeCrlf(arg0.getRenderedMessage()); String loggerNames[] = arg0.getLoggerName().split("[.]"); String loggerName = loggerNames[loggerNames.length - 1]; // syslog2siptrace needs these facilities, and this // is a cheap hack to get them! // Actually, syslog2siptrace needs to know ip addrs and ports from // the messages, and that info ain't there at the moment. So just // ignore this for now... String localFacility = facility; String newMessage = msg; if (msg.contains(SipFoundryLogRecordFactory.OUTGOING)) { localFacility = "OUTGOING"; newMessage = msg.replaceFirst(SipFoundryLogRecordFactory.OUTGOING, ""); } else if (msg.contains(SipFoundryLogRecordFactory.INCOMING)) { localFacility = "INCOMING"; newMessage = msg.replaceFirst(SipFoundryLogRecordFactory.INCOMING, ""); } // lineNumber is static across all loggers, so must be mutex protected. // time should also increase monotonically, so hold the lock synchronized (lineNumber) { lineNumber++; String out1 = String.format("\"%s\":%d:%s:%s:%s:%s:%s:%s:\"%s\"%n", munge(dateFormat.format(System.currentTimeMillis())), lineNumber, // line // number localFacility, // Facility mapLevel2SipFoundry(arg0.getLevel()), // msg priority // (DEBUG, WARN, // etc.) hostName, // Name of this machine arg0.getThreadName(), // Thread that called log "00000000", // Thread Id (not useful in Java) loggerName, // Name of the logger newMessage); // The message itself (w CRLF escaped) return out1; } } @Override public boolean ignoresThrowable() { return true; } @Override public void activateOptions() { // No options return; } public void setFacility(String facility) { this.facility = facility; } public String getFacility() { return facility; } }
package org.takes.facets.auth.social; import com.jcabi.http.request.JdkRequest; import com.jcabi.http.response.JsonResponse; import com.jcabi.http.response.RestResponse; import com.jcabi.http.response.XmlResponse; import java.io.IOException; import java.net.HttpURLConnection; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import javax.json.JsonObject; import lombok.EqualsAndHashCode; import org.takes.HttpException; import org.takes.Request; import org.takes.Response; import org.takes.facets.auth.Identity; import org.takes.facets.auth.Pass; import org.takes.misc.Href; import org.takes.misc.Opt; import org.takes.rq.RqHref; /** * Github OAuth landing/callback page. * * <p>The class is immutable and thread-safe. * * @author Yegor Bugayenko (yegor@teamed.io) * @version $Id$ * @since 0.1 * @checkstyle MultipleStringLiteralsCheck (500 lines) */ @EqualsAndHashCode(of = { "app", "key" }) public final class PsGithub implements Pass { /** * Access token. */ private static final String ACCESS_TOKEN = "access_token"; /** * Code. */ private static final String CODE = "code"; /** * Login. */ private static final String LOGIN = "login"; /** * App name. */ private final transient String app; /** * Key. */ private final transient String key; /** * GitHub OAuth url. */ private final transient String github; /** * GitHub API url. */ private final transient String api; /** * Ctor. * @param gapp Github app * @param gkey Github key */ public PsGithub(final String gapp, final String gkey) { this(gapp, gkey, "https: } /** * Ctor. * @param gapp Github app * @param gkey Github key * @param gurl Github OAuth server * @param aurl Github API server * @checkstyle ParameterNumberCheck (2 lines) */ PsGithub(final String gapp, final String gkey, final String gurl, final String aurl) { this.app = gapp; this.key = gkey; this.github = gurl; this.api = aurl; } @Override public Opt<Identity> enter(final Request request) throws IOException { final Href href = new RqHref.Base(request).href(); final Iterator<String> code = href.param(PsGithub.CODE).iterator(); if (!code.hasNext()) { throw new HttpException( HttpURLConnection.HTTP_BAD_REQUEST, "code is not provided by Github" ); } return new Opt.Single<Identity>( this.fetch(this.token(href.toString(), code.next())) ); } @Override public Response exit(final Response response, final Identity identity) { return response; } /** * Get user name from Github, with the token provided. * @param token Github access token * @return The user found in Github * @throws IOException If fails */ private Identity fetch(final String token) throws IOException { final String uri = new Href(this.api).path("user") .with(PsGithub.ACCESS_TOKEN, token).toString(); return PsGithub.parse( new JdkRequest(uri) .header("accept", "application/json") .fetch().as(RestResponse.class) .assertStatus(HttpURLConnection.HTTP_OK) .as(JsonResponse.class).json().readObject() ); } /** * Retrieve Github access token. * @param home Home of this page * @param code Github "authorization code" * @return The token * @throws IOException If failed */ private String token(final String home, final String code) throws IOException { final String uri = new Href(this.github) .path(PsGithub.LOGIN).path("oauth").path(PsGithub.ACCESS_TOKEN) .toString(); final List<String> tokens = new JdkRequest(uri) .method("POST") .header("Accept", "application/xml") .body() .formParam("client_id", this.app) .formParam("redirect_uri", home) .formParam("client_secret", this.key) .formParam(PsGithub.CODE, code) .back() .fetch().as(RestResponse.class) .assertStatus(HttpURLConnection.HTTP_OK) .as(XmlResponse.class) .xml().xpath("/OAuth/access_token/text()"); if (tokens.isEmpty()) { throw new HttpException( HttpURLConnection.HTTP_BAD_REQUEST, "No access token" ); } return tokens.get(0); } /** * Make identity from JSON object. * @param json JSON received from Github * @return Identity found */ private static Identity parse(final JsonObject json) { final Map<String, String> props = new HashMap<String, String>(json.size()); // @checkstyle MultipleStringLiteralsCheck (1 line) props.put(PsGithub.LOGIN, json.getString(PsGithub.LOGIN, "unknown")); props.put("avatar", json.getString("avatar_url", " return new Identity.Simple( String.format("urn:github:%d", json.getInt("id")), props ); } }
package org.teamstbf.yats.model.item; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; public class Schedule { public static final String MESSAGE_TIME_ERROR = "Invalid or empty date/time entry"; public static final String STRING_EMPTY = ""; public static final SimpleDateFormat FORMATTER_TIME = new SimpleDateFormat("hh:mma "); private static final SimpleDateFormat FORMATTER_DATE = new SimpleDateFormat("hh:mma dd/MM/yyyy"); public static final String MESSAGE_TIME_CONSTRAINTS = "non valid time"; public static final String TIME_VALIDATION_REGEX = "\\b((1[0-2]|0?[1-9]):([0-5][0-9])([AP][M]))"; public static final String MONTH_VALIDATION_REGEX = ".*(01|02|03|04|05|06|07|08|09|10|11|12).*"; private Date scheduleDate; // @@author A0116219L /* * Creates a Schedule object from the Date object given. Date can be null. */ public Schedule(Date dateObject) { this.scheduleDate = dateObject; } public String getTimeOnlyString() { return FORMATTER_TIME.format(scheduleDate); } public Schedule(String timeString) { validateDate(timeString); try { this.scheduleDate = STRING_EMPTY.equals(timeString) ? null : FORMATTER_DATE.parse(timeString); } catch (ParseException e) { // TODO Auto-generated catch block e.printStackTrace(); } } @Override public boolean equals(Object other) { return other == this // short circuit if same object || (other instanceof Schedule // instanceof handles nulls && this.toString().equals(((Schedule) other).toString())); // state // check } /* * public SimpleDate getDate() { return this.date; } public Timing getTime() * { return this.time; } public String getValue() { return this.value; } */ @Override public int hashCode() { return this.scheduleDate.hashCode(); } /* * public void setDate(SimpleDate endTime) { this.date = endTime; } public * void setTime(Timing startTime) { this.time = startTime; } */ @Override public String toString() { if (this.scheduleDate == null) { return STRING_EMPTY; } else if (this.scheduleDate.equals("")) { return STRING_EMPTY; } String dateString = FORMATTER_DATE.format(this.scheduleDate); return dateString; } public Date getDate() { return this.scheduleDate; } public static boolean isValidSchedule(String timeDate) { String[] date = timeDate.split("\\s+"); /*if (date.length != 2 ) { return false; }*/ return (date[0].trim().matches(TIME_VALIDATION_REGEX) && validateDate(date[1])); } public static boolean validateDate(String date) { String[] splitDate = date.split("/"); if (splitDate.length != 3 || splitDate[0].trim().length() != 2 || splitDate[1].trim().length() != 2 || splitDate[2].trim().length() != 4) { return false; } int day = Integer.parseInt(splitDate[0]); String month = splitDate[1]; int year = Integer.parseInt(splitDate[2]); if (!month.matches(MONTH_VALIDATION_REGEX)) { return false; } if (day > 31) { return false; } if (day > 30 && (month.equals("11") || month.equals("04") || month .equals("06") || month.equals("09"))) { return false; // only 1, 3, 5, 7, 8, 10, 12 have 31 days } else if (month.equals("02")) { if ((year % 4 == 0 && year % 100 != 0) || (year % 4 == 0 && year % 1000 == 0)) { // leap year if (day > 29) { return false; } else { return true; } } else { if (day > 28) { return false; } else { return true; } } } else { return true; } } }
package ru.ifmo.ctddev.gmwcs.solver; import ilog.concert.IloException; import ilog.concert.IloLinearNumExpr; import ilog.concert.IloNumExpr; import ilog.concert.IloNumVar; import ilog.cplex.IloCplex; import org.jgrapht.Graphs; import org.jgrapht.UndirectedGraph; import ru.ifmo.ctddev.gmwcs.LDSU; import ru.ifmo.ctddev.gmwcs.Pair; import ru.ifmo.ctddev.gmwcs.TimeLimit; import ru.ifmo.ctddev.gmwcs.graph.Blocks; import ru.ifmo.ctddev.gmwcs.graph.Edge; import ru.ifmo.ctddev.gmwcs.graph.Node; import ru.ifmo.ctddev.gmwcs.graph.Unit; import java.io.IOException; import java.io.OutputStream; import java.util.*; public class RLTSolver implements RootedSolver { public static final double EPS = 0.01; private IloCplex cplex; private Map<Node, IloNumVar> y; private Map<Edge, IloNumVar> w; private Map<Edge, Pair<IloNumVar, IloNumVar>> x; private Map<Node, IloNumVar> d; private Map<Node, IloNumVar> x0; private TimeLimit tl; private int threads; private boolean suppressOutput; private UndirectedGraph<Node, Edge> graph; private double minimum; private Node root; private SolutionCallback solutionCallback; public RLTSolver() { tl = new TimeLimit(Double.POSITIVE_INFINITY); threads = 1; this.minimum = -Double.MAX_VALUE; } public void setTimeLimit(TimeLimit tl) { this.tl = tl; } public void setThreadsNum(int threads) { if (threads < 1) { throw new IllegalArgumentException(); } this.threads = threads; } public void setRoot(Node root) { this.root = root; } @Override public List<Unit> solve(UndirectedGraph<Node, Edge> graph, LDSU<Unit> synonyms) throws SolverException { try { cplex = new IloCplex(); this.graph = graph; initVariables(); addConstraints(graph); addObjective(graph, synonyms); long timeBefore = System.currentTimeMillis(); if (root == null) { breakSymmetry(cplex, graph); } else { tighten(); } tuning(cplex); boolean solFound = cplex.solve(); tl.spend(Math.min(tl.getRemainingTime(), (System.currentTimeMillis() - timeBefore) / 1000.0)); if (solFound) { return getResult(); } return Collections.emptyList(); } catch (IloException e) { throw new SolverException(); } finally { cplex.end(); } } private void tighten() throws IloException { Blocks blocks = new Blocks(graph); if (!blocks.cutpoints().contains(root)) { throw new IllegalArgumentException(); } for (Set<Node> component : blocks.incidentBlocks(root)) { dfs(root, component, true, blocks); } } private void dfs(Node root, Set<Node> component, boolean fake, Blocks blocks) throws IloException { if (!fake) { for (Node node : component) { cplex.addLe(cplex.diff(y.get(node), y.get(root)), 0); } } for (Edge e : graph.edgesOf(root)) { if (!component.contains(Graphs.getOppositeVertex(graph, e, root))) { continue; } if (root == graph.getEdgeSource(e)) { cplex.addEq(x.get(e).first, 0); } else { cplex.addEq(x.get(e).second, 0); } } for (Node cp : blocks.cutpointsOf(component)) { if (root != cp) { for (Set<Node> comp : blocks.incidentBlocks(cp)) { if (comp != component) { dfs(cp, comp, false, blocks); } } } } } private List<Unit> getResult() throws IloException { List<Unit> result = new ArrayList<>(); for (Node node : graph.vertexSet()) { if (cplex.getValue(y.get(node)) > EPS) { result.add(node); } } for (Edge edge : graph.edgeSet()) { if (cplex.getValue(w.get(edge)) > EPS) { result.add(edge); } } return result; } private void initVariables() throws IloException { y = new LinkedHashMap<>(); w = new LinkedHashMap<>(); d = new LinkedHashMap<>(); x = new LinkedHashMap<>(); x0 = new LinkedHashMap<>(); for (Node node : graph.vertexSet()) { String nodeName = Integer.toString(node.getNum() + 1); d.put(node, cplex.numVar(0, Double.MAX_VALUE, "d" + nodeName)); y.put(node, cplex.boolVar("y" + nodeName)); x0.put(node, cplex.boolVar("x_0_" + (node.getNum() + 1))); } for (Edge edge : graph.edgeSet()) { Node from = graph.getEdgeSource(edge); Node to = graph.getEdgeTarget(edge); String edgeName = (from.getNum() + 1) + "_" + (to.getNum() + 1); w.put(edge, cplex.boolVar("w_" + edgeName)); IloNumVar in = cplex.boolVar(); IloNumVar out = cplex.boolVar(); x.put(edge, new Pair<>(in, out)); } } private void tuning(IloCplex cplex) throws IloException { if (suppressOutput) { OutputStream nos = new OutputStream() { @Override public void write(int b) throws IOException { } }; cplex.setOut(nos); cplex.setWarning(nos); } if (solutionCallback != null) { cplex.use(new MIPCallback()); } cplex.setParam(IloCplex.IntParam.Threads, threads); cplex.setParam(IloCplex.IntParam.ParallelMode, -1); cplex.setParam(IloCplex.IntParam.MIPOrdType, 3); if (tl.getRemainingTime() <= 0) { cplex.setParam(IloCplex.DoubleParam.TiLim, EPS); } else if (tl.getRemainingTime() != Double.POSITIVE_INFINITY) { cplex.setParam(IloCplex.DoubleParam.TiLim, tl.getRemainingTime()); } } private void breakSymmetry(IloCplex cplex, UndirectedGraph<Node, Edge> graph) throws IloException { int n = graph.vertexSet().size(); IloNumVar[] rootMul = new IloNumVar[n]; IloNumVar[] nodeMul = new IloNumVar[n]; PriorityQueue<Node> nodes = new PriorityQueue<>(); nodes.addAll(graph.vertexSet()); int k = nodes.size(); int j = nodes.size(); double last = Double.POSITIVE_INFINITY; while (!nodes.isEmpty()) { Node node = nodes.poll(); if (node.getWeight() == last) { j++; } last = node.getWeight(); nodeMul[k - 1] = cplex.intVar(0, n); rootMul[k - 1] = cplex.intVar(0, n); cplex.addEq(nodeMul[k - 1], cplex.prod(j, y.get(node))); cplex.addEq(rootMul[k - 1], cplex.prod(j, x0.get(node))); k j } IloNumVar rootSum = cplex.intVar(0, n); cplex.addEq(rootSum, cplex.sum(rootMul)); for (int i = 0; i < n; i++) { cplex.addGe(rootSum, nodeMul[i]); } } public void setCallback(SolutionCallback callback) { this.solutionCallback = callback; } private void addObjective(UndirectedGraph<Node, Edge> graph, LDSU<Unit> synonyms) throws IloException { Map<Unit, IloNumVar> summands = new LinkedHashMap<>(); Set<Unit> toConsider = new LinkedHashSet<>(); toConsider.addAll(graph.vertexSet()); toConsider.addAll(graph.edgeSet()); Set<Unit> visited = new LinkedHashSet<>(); for (Unit unit : toConsider) { if (visited.contains(unit)) { continue; } visited.addAll(synonyms.listOf(unit)); List<Unit> eq = synonyms.listOf(unit); if (eq.size() == 1) { summands.put(unit, getVar(unit)); continue; } IloNumVar var = cplex.boolVar(); summands.put(unit, var); int num = eq.size(); for (Unit i : eq) { if (getVar(i) == null) { num } } IloNumVar[] args = new IloNumVar[num]; int j = 0; for (Unit anEq : eq) { if (getVar(anEq) == null) { continue; } args[j++] = getVar(anEq); } if (unit.getWeight() > 0) { cplex.addLe(var, cplex.sum(args)); } else { cplex.addGe(cplex.prod(eq.size() + 0.5, var), cplex.sum(args)); } } IloNumExpr sum = unitScalProd(summands.keySet(), summands); cplex.addGe(sum, minimum); cplex.addMaximize(sum); } private IloNumVar getVar(Unit unit) { return unit instanceof Node ? y.get(unit) : w.get(unit); } @Override public void suppressOutput() { suppressOutput = true; } private void addConstraints(UndirectedGraph<Node, Edge> graph) throws IloException { sumConstraints(graph); otherConstraints(graph); distanceConstraints(graph); } private void distanceConstraints(UndirectedGraph<Node, Edge> graph) throws IloException { int n = graph.vertexSet().size(); for(Node v : graph.vertexSet()){ cplex.addLe(d.get(v), cplex.diff(n, cplex.prod(n, x0.get(v)))); } for(Edge e : graph.edgeSet()){ Node from = graph.getEdgeSource(e); Node to = graph.getEdgeTarget(e); addEdgeConstraints(e, from, to); addEdgeConstraints(e, to, from); } } private void addEdgeConstraints(Edge e, Node from, Node to) throws IloException { int n = graph.vertexSet().size(); IloNumVar z = getX(e, to); cplex.addGe(cplex.sum(n, d.get(to)), cplex.sum(d.get(from), cplex.prod(n + 1, z))); cplex.addLe(cplex.sum(d.get(to), cplex.prod(n - 1, z)), cplex.sum(d.get(from), n)); } private void otherConstraints(UndirectedGraph<Node, Edge> graph) throws IloException { // (36), (39) for (Edge edge : graph.edgeSet()) { Pair<IloNumVar, IloNumVar> arcs = x.get(edge); Node from = graph.getEdgeSource(edge); Node to = graph.getEdgeTarget(edge); cplex.addLe(cplex.sum(arcs.first, arcs.second), w.get(edge)); cplex.addLe(w.get(edge), y.get(from)); cplex.addLe(w.get(edge), y.get(to)); } } public IloNumVar[] getVars(Set<? extends Unit> units, Map<? extends Unit, IloNumVar> vars) { IloNumVar[] result = new IloNumVar[units.size()]; int i = 0; for (Unit unit : units) { result[i++] = vars.get(unit); } return result; } private void sumConstraints(UndirectedGraph<Node, Edge> graph) throws IloException { cplex.addEq(cplex.sum(getVars(graph.vertexSet(), x0)), 1); if (root != null) { cplex.addEq(x0.get(root), 1); } for (Node node : graph.vertexSet()) { Set<Edge> edges = graph.edgesOf(node); IloNumVar xSum[] = new IloNumVar[edges.size() + 1]; int i = 0; for (Edge edge : edges) { xSum[i++] = getX(edge, node); } xSum[xSum.length - 1] = x0.get(node); cplex.addEq(cplex.sum(xSum), y.get(node)); } } private IloNumVar getX(Edge e, Node to){ if(graph.getEdgeSource(e) == to){ return x.get(e).first; } else { return x.get(e).second; } } private IloLinearNumExpr unitScalProd(Set<? extends Unit> units, Map<? extends Unit, IloNumVar> vars) throws IloException { int n = units.size(); double[] coef = new double[n]; IloNumVar[] variables = new IloNumVar[n]; int i = 0; for (Unit unit : units) { coef[i] = unit.getWeight(); variables[i++] = vars.get(unit); } return cplex.scalProd(coef, variables); } public void setLB(double lb) { this.minimum = lb; } public static abstract class SolutionCallback { public abstract void main(List<Unit> solution); } private class MIPCallback extends IloCplex.IncumbentCallback { @Override protected void main() throws IloException { if (solutionCallback == null) { return; } List<Unit> result = new ArrayList<>(); for (Node node : graph.vertexSet()) { if (getValue(y.get(node)) > EPS) { result.add(node); } } for (Edge edge : graph.edgeSet()) { if (getValue(w.get(edge)) > EPS) { result.add(edge); } } solutionCallback.main(result); } } }
package ru.r2cloud.jradio.fec.ccsds; public class Randomize { public static byte[] shuffle(byte[] data) { int[] sequence = new int[255]; int[] x = new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1 }; int i; /* * The pseudo random sequence shall be generated using the polynomial * h(x) = x8 + x7 + x5 + x3 + 1 */ for (i = 0; i < sequence.length * 8; i++) { sequence[i / 8] = sequence[i / 8] | x[1] << 7 >> (i % 8); x[0] = (x[8] + x[6] + x[4] + x[1]) % 2; x[1] = x[2]; x[2] = x[3]; x[3] = x[4]; x[4] = x[5]; x[5] = x[6]; x[6] = x[7]; x[7] = x[8]; x[8] = x[0]; } byte[] result = new byte[data.length]; for (i = 0; i < data.length; i++) { result[i] = (byte)(data[i] ^ sequence[i % sequence.length]); } return result; } }
package seedu.taskmanager.logic.parser; import seedu.taskmanager.commons.exceptions.IllegalValueException; import seedu.taskmanager.commons.util.StringUtil; import seedu.taskmanager.logic.commands.AddCommand; import seedu.taskmanager.logic.commands.ClearCommand; import seedu.taskmanager.logic.commands.Command; import seedu.taskmanager.logic.commands.DeleteCommand; import seedu.taskmanager.logic.commands.EditCommand; import seedu.taskmanager.logic.commands.ExitCommand; import seedu.taskmanager.logic.commands.FindCommand; import seedu.taskmanager.logic.commands.HelpCommand; import seedu.taskmanager.logic.commands.IncorrectCommand; import seedu.taskmanager.logic.commands.ListCommand; import seedu.taskmanager.logic.commands.ListDeadlineCommand; import seedu.taskmanager.logic.commands.ListEventCommand; import seedu.taskmanager.logic.commands.ListTaskCommand; import seedu.taskmanager.logic.commands.SelectCommand; import seedu.taskmanager.logic.commands.DoneCommand; import seedu.taskmanager.logic.commands.NotDoneCommand; import seedu.taskmanager.logic.commands.UndoCommand; import seedu.taskmanager.model.item.ItemDate; import seedu.taskmanager.model.item.ItemTime; import seedu.taskmanager.model.item.ItemType; import java.text.SimpleDateFormat; import java.time.LocalDateTime; import java.util.*; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.ocpsoft.prettytime.nlp.PrettyTimeParser; import static seedu.taskmanager.commons.core.Messages.MESSAGE_INVALID_COMMAND_FORMAT; import static seedu.taskmanager.commons.core.Messages.MESSAGE_UNKNOWN_COMMAND; import java.text.ParseException; /** * Parses user input. */ public class Parser { /** * Used for initial separation of command word and args. */ private static final Pattern BASIC_COMMAND_FORMAT = Pattern.compile("(?<commandWord>\\S+)(?<arguments>.*)"); private static final Pattern ITEM_INDEX_ARGS_FORMAT = Pattern.compile("(?<targetIndex>[0-9]+)"); // single number of index delete\s+([0-9]+) private static final Pattern ITEM_INDEXES_ARGS_FORMAT = Pattern.compile("(?<targetIndexes>([0-9]+)\\s*([0-9]+\\s*)+)"); // variable number of indexes public static final String MESSAGE_DATETIME_PARSE_FAILURE = "Invalid datetime."; private static final Pattern KEYWORDS_ARGS_FORMAT = Pattern.compile("(?<keywords>\\S+(?:\\s+\\S+)*)"); // one or more keywords separated by whitespace private static final Pattern NAME_ARG_FORMAT = Pattern.compile("(n/(?<name>[^/]+))"); private static final Pattern START_DATE_ARG_FORMAT = Pattern.compile("(sd/(?<startDate>[^/]+))"); private static final Pattern START_TIME_ARG_FORMAT = Pattern.compile("(st/(?<startTime>[^/]+))"); private static final Pattern START_DATETIME_ARG_FORMAT = Pattern.compile("sdt/(?<startDateTime>[^/]+)"); private static final Pattern END_DATE_ARG_FORMAT = Pattern.compile("(ed/(?<endDate>[^/]+))"); private static final Pattern END_TIME_ARG_FORMAT = Pattern.compile("(et/(?<endTime>[^/]+))"); private static final Pattern END_DATETIME_ARG_FORMAT = Pattern.compile("edt/(?<endDateTime>[^/]+)"); private static final int PARSEDATETIME_ARRAY_DATE_INDEX = 0; private static final int PARSEDATETIME_ARRAY_TIME_INDEX = 1; private static final Pattern TASK_DATA_ARGS_FORMAT = // '/' forward slashes are reserved for delimiter prefixes Pattern.compile("(T|t)((A|a)(S|s)(K|k))?\\s*" + "(n/)?(?<name>[^/]+)" + "(?<tagArguments>(?: t/[^/]+)*)"); // variable number of tags private static final Pattern DEADLINE_DATA_ARGS_FORMAT = // '/' forward slashes are reserved for delimiter prefixes Pattern.compile("(D|d)((E|e)(A|a)(D|d)(L|l)(I|i)(N|n)(E|e))?\\s*" + "(n/)?(?<name>[^/]+)" + END_DATE_ARG_FORMAT + END_TIME_ARG_FORMAT + "?" + "(?<tagArguments>(?: t/[^/]+)*)"); // variable number of tags private static final Pattern EVENT_DATA_ARGS_FORMAT = // '/' forward slashes are reserved for delimiter prefixes Pattern.compile("(E|e)((V|v)(E|e)(N|n)(T|t))?\\s*" + "(n/)?(?<name>[^/]+)" + START_DATE_ARG_FORMAT + START_TIME_ARG_FORMAT + "?" + END_DATE_ARG_FORMAT + END_TIME_ARG_FORMAT + "?" + "(?<tagArguments>(?: t/[^/]+)*)"); // variable number of tags private static final Pattern DEADLINE_NLP_DATA_ARGS_FORMAT = // '/' forward slashes are reserved for delimiter prefixes Pattern.compile("(D|d)((E|e)(A|a)(D|d)(L|l)(I|i)(N|n)(E|e))?\\s*" + "(n/)?(?<name>[^/]+)" + END_DATETIME_ARG_FORMAT + "(?<tagArguments>(?: t/[^/]+)*)"); // variable number of tags private static final Pattern EVENT_NLP_DATA_ARGS_FORMAT = // '/' forward slashes are reserved for delimiter prefixes Pattern.compile("(E|e)((V|v)(E|e)(N|n)(T|t))?\\s*" + "(n/)?(?<name>[^/]+)" + START_DATETIME_ARG_FORMAT + END_DATETIME_ARG_FORMAT + "(?<tagArguments>(?: t/[^/]+)*)"); // variable number of tags private static final Pattern EDIT_COMMAND_ARGS_FORMAT = Pattern.compile("(?<targetIndex>[\\d]+)" + "(?<editCommandArguments>.+)"); public Parser() {} /** * Parses user input into command for execution. * * @param userInput full user input string * @return the command based on the user input */ public Command parseCommand(String userInput) { final Matcher matcher = BASIC_COMMAND_FORMAT.matcher(userInput.trim()); if (!matcher.matches()) { return new IncorrectCommand(String.format(MESSAGE_INVALID_COMMAND_FORMAT, HelpCommand.MESSAGE_USAGE)); } final String commandWord = matcher.group("commandWord"); final String arguments = matcher.group("arguments"); switch (commandWord) { case AddCommand.COMMAND_WORD: case AddCommand.SHORT_COMMAND_WORD: return prepareAdd(arguments); case EditCommand.COMMAND_WORD: case EditCommand.SHORT_COMMAND_WORD: return prepareEdit(arguments); case SelectCommand.COMMAND_WORD: case SelectCommand.SHORT_COMMAND_WORD: return prepareSelect(arguments); case DeleteCommand.COMMAND_WORD: case DeleteCommand.SHORT_COMMAND_WORD: return prepareDelete(arguments); case ClearCommand.COMMAND_WORD: case ClearCommand.SHORT_COMMAND_WORD: return new ClearCommand(); case FindCommand.COMMAND_WORD: case FindCommand.SHORT_COMMAND_WORD: return prepareFind(arguments); case ListCommand.COMMAND_WORD: case ListCommand.SHORT_COMMAND_WORD: return new ListCommand(); case ListTaskCommand.COMMAND_WORD: case ListTaskCommand.SHORT_COMMAND_WORD: return new ListTaskCommand(); case ListDeadlineCommand.COMMAND_WORD: case ListDeadlineCommand.SHORT_COMMAND_WORD: return new ListDeadlineCommand(); case ListEventCommand.COMMAND_WORD: case ListEventCommand.SHORT_COMMAND_WORD: return new ListEventCommand(); case ExitCommand.COMMAND_WORD: return new ExitCommand(); case HelpCommand.COMMAND_WORD: case HelpCommand.SHORT_COMMAND_WORD: return new HelpCommand(); case DoneCommand.COMMAND_WORD: case DoneCommand.SHORT_COMMAND_WORD: case DoneCommand.ALTERNATE_SHORT_COMMAND_WORD: return prepareDone(arguments); case NotDoneCommand.COMMAND_WORD: case NotDoneCommand.SHORT_COMMAND_WORD: return prepareNotDone(arguments); case UndoCommand.COMMAND_WORD: case UndoCommand.SHORT_COMMAND_WORD: return new UndoCommand(); default: return new IncorrectCommand(MESSAGE_UNKNOWN_COMMAND); } } /** * Parses arguments in the context of the add item command. * * @param args full command args string * @return the prepared command */ private Command prepareAdd(String args){ final Matcher taskMatcher = TASK_DATA_ARGS_FORMAT.matcher(args.trim()); final Matcher deadlineMatcher = DEADLINE_DATA_ARGS_FORMAT.matcher(args.trim()); final Matcher eventMatcher = EVENT_DATA_ARGS_FORMAT.matcher(args.trim()); final Matcher deadlineNlpMatcher = DEADLINE_NLP_DATA_ARGS_FORMAT.matcher(args.trim()); final Matcher eventNlpMatcher = EVENT_NLP_DATA_ARGS_FORMAT.matcher(args.trim()); // Validate arg string format if (!taskMatcher.matches() && !deadlineMatcher.matches() && !eventMatcher.matches() && !deadlineNlpMatcher.matches() && !eventNlpMatcher.matches()) { return new IncorrectCommand(String.format(MESSAGE_INVALID_COMMAND_FORMAT, AddCommand.MESSAGE_USAGE)); } try { if (taskMatcher.matches()) { return new AddCommand( ItemType.TASK_WORD, taskMatcher.group("name"), getTagsFromArgs(taskMatcher.group("tagArguments")) ); } else if (deadlineMatcher.matches()) { return addDeadline(deadlineMatcher); } else if (eventMatcher.matches()) { return addEvent(eventMatcher); } else if (deadlineNlpMatcher.matches()) { return addNlpDeadline(deadlineNlpMatcher); } else if (eventNlpMatcher.matches()) { return addNlpEvent(eventNlpMatcher); } else { return new IncorrectCommand(String.format(MESSAGE_INVALID_COMMAND_FORMAT, AddCommand.MESSAGE_USAGE)); } } catch (IllegalValueException ive) { return new IncorrectCommand(ive.getMessage()); } } private Command addNlpEvent(final Matcher eventNlpMatcher) throws IllegalValueException { String endDateTime = eventNlpMatcher.group("endDateTime"); String startDateTime = eventNlpMatcher.group("startDateTime"); List<Date> startDateTimes = new PrettyTimeParser().parse(startDateTime); List<Date> endDateTimes = new PrettyTimeParser().parse(endDateTime); // Just Take First Value for Start and End if (startDateTimes.isEmpty() || endDateTimes.isEmpty()) { return new IncorrectCommand(String.format(MESSAGE_INVALID_COMMAND_FORMAT, MESSAGE_DATETIME_PARSE_FAILURE)); } Date processedStartDateTime = startDateTimes.get(0); Date processedEndDateTime = endDateTimes.get(0); if (processedEndDateTime.before(processedStartDateTime)) { return new IncorrectCommand(String.format(MESSAGE_INVALID_COMMAND_FORMAT, AddCommand.EVENT_MESSAGE_USAGE)); } SimpleDateFormat dateFormat = new SimpleDateFormat(ItemDate.DATE_FORMAT); SimpleDateFormat timeFormat = new SimpleDateFormat("HH:mm"); String startDate = dateFormat.format(processedStartDateTime); String startTime = timeFormat.format(processedStartDateTime); String endDate = dateFormat.format(processedEndDateTime); String endTime = timeFormat.format(processedEndDateTime); return new AddCommand(ItemType.EVENT_WORD, eventNlpMatcher.group("name"), startDate, startTime, endDate, endTime, getTagsFromArgs(eventNlpMatcher.group("tagArguments"))); } private Command addNlpDeadline(final Matcher deadlineNlpMatcher) throws IllegalValueException { String endDateTime = deadlineNlpMatcher.group("endDateTime"); List<Date> endDateTimes = new PrettyTimeParser().parse(endDateTime); // Just Take First Value for Start and End if (endDateTimes.isEmpty()) { return new IncorrectCommand(String.format(MESSAGE_INVALID_COMMAND_FORMAT, MESSAGE_DATETIME_PARSE_FAILURE)); } for (int i=0; i<endDateTimes.size(); ++i) { System.out.println(endDateTimes.get(i)); } Date processedEndDateTime = endDateTimes.get(0); SimpleDateFormat dateFormat = new SimpleDateFormat(ItemDate.DATE_FORMAT); SimpleDateFormat timeFormat = new SimpleDateFormat("HH:mm"); String endDate = dateFormat.format(processedEndDateTime); String endTime = timeFormat.format(processedEndDateTime); return new AddCommand(ItemType.DEADLINE_WORD, deadlineNlpMatcher.group("name"), endDate, endTime, getTagsFromArgs(deadlineNlpMatcher.group("tagArguments"))); } private Command addEvent(final Matcher eventMatcher) throws IllegalValueException { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd hh:mm"); SimpleDateFormat df = new SimpleDateFormat(ItemDate.DATE_FORMAT); String endTime = eventMatcher.group("endTime"); String startTime = eventMatcher.group("startTime"); try { df.setLenient(false); // If yyyy-MM-dd String startDateString; String endDateString; String[] parts = eventMatcher.group("endDate").split("-"); if (parts.length == 3) { endDateString = eventMatcher.group("endDate"); df.parse(eventMatcher.group("endDate")); } else { // MM-dd LocalDateTime ldt = LocalDateTime.now(); endDateString = ldt.getYear() + "-" + eventMatcher.group("endDate"); df.parse(endDateString); } String[] parts2 = eventMatcher.group("startDate").split("-"); // If yyyy-MM-dd if (parts2.length == 3) { startDateString = eventMatcher.group("startDate"); df.parse(eventMatcher.group("startDate")); } else { // MM-dd LocalDateTime ldt = LocalDateTime.now(); startDateString = ldt.getYear() + "-" + eventMatcher.group("startDate"); df.parse(startDateString); } if (endTime == null) { endTime = AddCommand.DEFAULT_END_TIME; } if (startTime == null) { startTime = AddCommand.DEFAULT_START_TIME; } if (sdf.parse(endDateString + " " + endTime).before(sdf.parse(startDateString + " " + startTime))) { return new IncorrectCommand(String.format(MESSAGE_INVALID_COMMAND_FORMAT, AddCommand.EVENT_MESSAGE_USAGE)); } } catch (ParseException e) { return new IncorrectCommand(String.format(MESSAGE_INVALID_COMMAND_FORMAT, ItemDate.MESSAGE_DATE_CONSTRAINTS)); } return new AddCommand(ItemType.EVENT_WORD, eventMatcher.group("name"), eventMatcher.group("startDate"), startTime, eventMatcher.group("endDate"), endTime, getTagsFromArgs(eventMatcher.group("tagArguments"))); } private Command addDeadline(final Matcher deadlineMatcher) throws IllegalValueException { try { SimpleDateFormat df = new SimpleDateFormat(ItemDate.DATE_FORMAT); SimpleDateFormat df2 = new SimpleDateFormat(ItemDate.ALTERNATE_DATE_FORMAT); df.setLenient(false); String[] parts = deadlineMatcher.group("endDate").split("-"); // If yyyy-MM-dd if (parts.length == 3) { df.parse(deadlineMatcher.group("endDate")); } else { // MM-dd df2.parse(deadlineMatcher.group("endDate")); } } catch (ParseException e) { return new IncorrectCommand(String.format(MESSAGE_INVALID_COMMAND_FORMAT, ItemDate.MESSAGE_DATE_CONSTRAINTS)); } return new AddCommand(ItemType.DEADLINE_WORD, deadlineMatcher.group("name"), deadlineMatcher.group("endDate"), deadlineMatcher.group("endTime"), getTagsFromArgs(deadlineMatcher.group("tagArguments"))); } /** * Parses arguments in the context of the edit item command. * * @param args full command args string * @return the prepared command */ private Command prepareEdit(String args) { assert args != null; final Matcher matcher = EDIT_COMMAND_ARGS_FORMAT.matcher(args.trim()); if (matcher.matches()) { Optional<Integer> index = parseIndex(matcher.group("targetIndex")); if (index.isPresent()) { String editCommandArgs = matcher.group("editCommandArguments"); String name = parseArgument(NAME_ARG_FORMAT, "name", editCommandArgs); String startDate = parseArgument(START_DATE_ARG_FORMAT, "startDate", editCommandArgs); String startTime = parseArgument(START_TIME_ARG_FORMAT, "startTime", editCommandArgs); String endDate = parseArgument(END_DATE_ARG_FORMAT, "endDate", editCommandArgs); String endTime = parseArgument(END_TIME_ARG_FORMAT, "endTime", editCommandArgs); String startDateTime = parseArgument(START_DATETIME_ARG_FORMAT, "startDateTime", editCommandArgs); String endDateTime = parseArgument(END_DATETIME_ARG_FORMAT, "endDateTime", editCommandArgs); try { if (startDateTime != null) { String[] startDateTimeArr = parseDateTime(startDateTime, ItemDate.DATE_FORMAT, ItemTime.TIME_FORMAT); startDate = startDateTimeArr[PARSEDATETIME_ARRAY_DATE_INDEX]; startTime = startDateTimeArr[PARSEDATETIME_ARRAY_TIME_INDEX]; } if (endDateTime != null) { String[] endDateTimeArr = parseDateTime(endDateTime, ItemDate.DATE_FORMAT, ItemTime.TIME_FORMAT); endDate = endDateTimeArr[PARSEDATETIME_ARRAY_DATE_INDEX]; endTime = endDateTimeArr[PARSEDATETIME_ARRAY_TIME_INDEX]; } if (name != null || startDate != null || startTime!= null || endDate != null || endTime != null) { return new EditCommand(index.get(), name, startDate, startTime, endDate, endTime); } } catch (IllegalValueException ive) { return new IncorrectCommand(ive.getMessage()); } } } return new IncorrectCommand(String.format(MESSAGE_INVALID_COMMAND_FORMAT, EditCommand.MESSAGE_USAGE)); } /** * Parses date and time from argument acquired through NLP input * @param argument * @param dateFormat the format the argument should be returned in * @return parsed argument as string or null if argument not parsed */ private String[] parseDateTime(String argument, String dateFormat, String timeFormat) throws IllegalValueException { assert dateFormat != null && !dateFormat.isEmpty(); assert timeFormat != null && !timeFormat.isEmpty(); if (argument != null) { List<Date> dateTimes = new PrettyTimeParser().parse(argument); if (dateTimes.isEmpty()) { throw new IllegalValueException(MESSAGE_DATETIME_PARSE_FAILURE); } Date prettyParsedDateTime = dateTimes.get(0); SimpleDateFormat simpleDateFormat = new SimpleDateFormat(dateFormat); SimpleDateFormat simpleTimeFormat = new SimpleDateFormat(timeFormat); String parsedDate = simpleDateFormat.format(prettyParsedDateTime); String parsedTime = simpleTimeFormat.format(prettyParsedDateTime); String[] parsedDateTime = new String[2]; parsedDateTime[PARSEDATETIME_ARRAY_DATE_INDEX] = parsedDate; parsedDateTime[PARSEDATETIME_ARRAY_TIME_INDEX] = parsedTime; return parsedDateTime; } else { return null; } } /** * Extracts argument from a string containing command arguments * @param argumentPattern the pattern used to extract the argument from commandArgs * @param argumentGroupName the matcher group name of the argument used in argumentPattern * @param commandArgs string containing command arguments * @return parsed argument as string or null if argument not parsed */ private String parseArgument(Pattern argumentPattern, String argumentGroupName, String commandArgs) { String argument = null; final Matcher argumentMatcher = argumentPattern.matcher(commandArgs); if (argumentMatcher.find()) { argument = argumentMatcher.group(argumentGroupName); argument = removeTrailingCommandChars(argument, commandArgs); } return argument; } /** * Removes unwanted trailing command characters from argument * @param argument * @param commandArgs * @return cleaned argument string */ private String removeTrailingCommandChars(String argument, String commandArgs) { //maximum size of trailing command characters is 3, including the space before them if (argument.length() >= 3 && argument.length() < commandArgs.trim().length()-3) { //size of trailing name command characters is 2, including the space before it if (argument.substring(argument.length()-2, argument.length()).matches(" n")) { argument = argument.substring(0, argument.length()-2); } else if (argument.substring(argument.length()-3, argument.length()).matches(" (sd|st|ed|et)")) { //size of trailing command characters is 3, including the space before them argument = argument.substring(0, argument.length()-3); } } return argument; } /** * Extracts the new person's tags from the add command's tag arguments string. * Merges duplicate tag strings. */ private static Set<String> getTagsFromArgs(String tagArguments) throws IllegalValueException { // no tags if (tagArguments.isEmpty()) { return Collections.emptySet(); } // replace first delimiter prefix, then split final Collection<String> tagStrings = Arrays.asList(tagArguments.replaceFirst(" t/", "").split(" t/")); return new HashSet<>(tagStrings); } /** * Parses arguments in the context of the delete person command. * * @param args full command args string * @return the prepared command */ private Command prepareDelete(String args) { final Matcher itemIndexesMatcher = ITEM_INDEXES_ARGS_FORMAT.matcher(args.trim()); final Matcher itemIndexMatcher = ITEM_INDEX_ARGS_FORMAT.matcher(args.trim()); if(itemIndexMatcher.matches()) { Optional<Integer> index = parseIndex(args); if(!index.isPresent()) { return new IncorrectCommand( String.format(MESSAGE_INVALID_COMMAND_FORMAT, DeleteCommand.MESSAGE_USAGE)); } return new DeleteCommand(index.get()); } else if(itemIndexesMatcher.matches()) { // separate into the different indexes ArrayList<String> indexList = new ArrayList<String>(Arrays.asList(args.trim().split("[^0-9]*"))); for(String indexString : indexList) { if(indexString.equals("")) { indexList.remove(indexString); } } ArrayList<Integer> indexesToDelete = new ArrayList<Integer>(); for(String indexInList: indexList) { Optional<Integer> index = parseIndex(indexInList); if(!index.isPresent()) { return new IncorrectCommand( String.format(MESSAGE_INVALID_COMMAND_FORMAT, "test")); } else { indexesToDelete.add(index.get()); } } return new DeleteCommand(indexesToDelete); } else { return new IncorrectCommand(String.format(MESSAGE_INVALID_COMMAND_FORMAT, DeleteCommand.MESSAGE_USAGE)); } } /** * Parses arguments in the context of the done item command. * * @param args full command args string * @return the prepared command */ private Command prepareDone(String args) { Optional<Integer> index = parseIndex(args); if(!index.isPresent()){ return new IncorrectCommand( String.format(MESSAGE_INVALID_COMMAND_FORMAT, DoneCommand.MESSAGE_USAGE)); } return new DoneCommand(index.get()); } /** * Parses arguments in the context of the not done item command. * * @param args full command args string * @return the prepared command */ private Command prepareNotDone(String args) { Optional<Integer> index = parseIndex(args); if(!index.isPresent()){ return new IncorrectCommand( String.format(MESSAGE_INVALID_COMMAND_FORMAT, DoneCommand.MESSAGE_USAGE)); } return new NotDoneCommand(index.get()); } /** * Parses arguments in the context of the select person command. * * @param args full command args string * @return the prepared command */ private Command prepareSelect(String args) { Optional<Integer> index = parseIndex(args); if(!index.isPresent()){ return new IncorrectCommand( String.format(MESSAGE_INVALID_COMMAND_FORMAT, SelectCommand.MESSAGE_USAGE)); } return new SelectCommand(index.get()); } /** * Returns the specified index in the {@code command} IF a positive unsigned integer is given as the index. * Returns an {@code Optional.empty()} otherwise. */ private Optional<Integer> parseIndex(String command) { final Matcher matcher = ITEM_INDEX_ARGS_FORMAT.matcher(command.trim()); if (!matcher.matches()) { return Optional.empty(); } String index = matcher.group("targetIndex"); if(!StringUtil.isUnsignedInteger(index)){ return Optional.empty(); } return Optional.of(Integer.parseInt(index)); } /** * Parses arguments in the context of the find person command. * * @param args full command args string * @return the prepared command */ private Command prepareFind(String args) { final Matcher matcher = KEYWORDS_ARGS_FORMAT.matcher(args.trim()); if (!matcher.matches()) { return new IncorrectCommand(String.format(MESSAGE_INVALID_COMMAND_FORMAT, FindCommand.MESSAGE_USAGE)); } // keywords delimited by whitespace final String[] keywords = matcher.group("keywords").split("\\s+"); final Set<String> keywordSet = new HashSet<>(Arrays.asList(keywords)); return new FindCommand(keywordSet); } }
package seedu.todo.controllers; import java.util.HashMap; import java.util.Map; import seedu.todo.commons.exceptions.UnmatchedQuotesException; import seedu.todo.commons.util.StringUtil; import seedu.todo.models.TodoListDB; import seedu.todo.ui.UiManager; import seedu.todo.ui.views.IndexView; public class UndoController implements Controller { private static final String MESSAGE_SUCCESS = "Successfully undid %s %s!\nTo redo, type \"redo\"."; private static final String MESSAGE_MULTIPLE_FAILURE = "We cannot undo %s %s! At most, you can undo %s %s."; private static final String MESSAGE_FAILURE = "There is no command to undo!"; @Override public float inputConfidence(String input) { return input.startsWith("undo") ? 1 : 0; } private static Map<String, String[]> getTokenDefinitions() { Map<String, String[]> tokenDefinitions = new HashMap<String, String[]>(); tokenDefinitions.put("default", new String[] {"undo"}); return tokenDefinitions; } @Override public void process(String input) { Map<String, String[]> parsedResult; try { parsedResult = Tokenizer.tokenize(getTokenDefinitions(), input); } catch (UnmatchedQuotesException e) { System.out.println("Unmatched quote!"); return; } int numUndo = 1; if (parsedResult.get("default")[1] != null) { numUndo = Integer.parseInt(parsedResult.get("default")[1]); } // We don't really have a nice way to support SQL transactions, so yeah >_< TodoListDB db = TodoListDB.getInstance(); if (db.undoSize() < numUndo || numUndo < 0) { UiManager.updateConsoleMessage(String.format(MESSAGE_MULTIPLE_FAILURE, numUndo, StringUtil.pluralizer(numUndo, "command", "commands"), db.undoSize(), StringUtil.pluralizer(db.undoSize(), "command", "commands"))); return; } for (int i = 0; i < numUndo; i++) { if (!db.undo()) { UiManager.updateConsoleMessage(MESSAGE_FAILURE); return; } } db = TodoListDB.getInstance(); // Render IndexView view = UiManager.loadView(IndexView.class); view.tasks = db.getAllTasks(); view.events = db.getAllEvents(); UiManager.renderView(view); // Update console message UiManager.updateConsoleMessage(String.format(MESSAGE_SUCCESS, numUndo, StringUtil.pluralizer(numUndo, "command", "commands"))); } }
package sizebay.catalog.client.model; import java.util.*; import com.fasterxml.jackson.annotation.JsonValue; import lombok.*; @Getter @Setter public class Modeling { private Long id; private String name; private Gender gender; private String gMerchantBrandName; private String gMerchantAgeGroup; private String gMerchantSizeType; private String observation; private TypeEnum type; private Long brandId; private Long categoryId; private List<ModelingSizeMeasures> measures = new ArrayList<>(); private Integer strongCategoryTypeId; private String strongCategoryTypeName; private int isShoe; @Getter @RequiredArgsConstructor public enum TypeEnum { PRODUCT("product"), BODY("body"); final private String value; @Override @JsonValue public String toString() { return value; } } @Getter @RequiredArgsConstructor public enum Gender { M("M"), F("F"), U("U"); final private String value; @Override @JsonValue public String toString() { return value; } } }
package stsc.general.trading; import java.io.ByteArrayInputStream; import java.io.File; import java.io.FileNotFoundException; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.Properties; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.core.config.XMLConfigurationFactory; import org.apache.logging.log4j.Logger; import stsc.algorithms.AlgorithmSettingsImpl; import stsc.common.FromToPeriod; import stsc.common.algorithms.AlgorithmSettings; import stsc.common.algorithms.BadAlgorithmException; import stsc.common.algorithms.EodAlgorithm; import stsc.common.algorithms.EodExecution; import stsc.common.algorithms.StockAlgorithm; import stsc.common.algorithms.StockExecution; import stsc.storage.AlgorithmsStorage; import stsc.storage.ExecutionsStorage; /** * Executions Loader - load (create set of instances for algorithms) executions * settings from text file / string. */ final class ExecutionsLoader { private static final class PropertyNames { public static String INCLUDES_LINE = "Includes"; public static String STOCK_EXECUTIONS_LINE = "StockExecutions"; public static String EOD_EXECUTIONS_LINE = "EodExecutions"; } private static final class Regexps { public static final Pattern loadLine = Pattern.compile("^(.?\\w+)\\((.*)\\)$"); public static final Pattern subAlgoParameter = Pattern.compile("^([^\\(]+)\\((.*)\\)(\\s)*$"); public static final Pattern integerParameter = Pattern.compile("^(.+)=(.+)[iI]$"); public static final Pattern doubleParameter = Pattern.compile("^(.+)=(.+)[dD]$"); public static final Pattern stringParameter = Pattern.compile("^(.+)=(.+)$"); public static final Pattern subExecutionParameter = Pattern.compile("^(.+)$"); } static { System.setProperty(XMLConfigurationFactory.CONFIGURATION_FILE_PROPERTY, "./config/algorithmLoader.log4j2.xml"); } private static Logger logger = LogManager.getLogger("ExecutionsLoader"); public File configPath = new File("./config/algs.ini"); private String configFileFolder; final private AlgorithmSettingsImpl settings; final private AlgorithmsStorage algorithmsStorage; final private ExecutionsStorage executionsStorage = new ExecutionsStorage(); final private Set<String> openedPropertyFileNames = new HashSet<>(); final private HashMap<String, String> registeredStockExecutions = new HashMap<>(); final private HashMap<String, String> namedStockExecutions = new HashMap<>(); final private HashMap<String, String> registeredEodExecutions = new HashMap<>(); final private HashMap<String, String> namedEodExecutions = new HashMap<>(); ExecutionsLoader(FromToPeriod period, String config) throws BadAlgorithmException { this.settings = new AlgorithmSettingsImpl(period); this.algorithmsStorage = AlgorithmsStorage.getInstance(); loadAlgorithms(config); } ExecutionsLoader(File configPath, FromToPeriod period) throws BadAlgorithmException { this.configPath = configPath; this.settings = new AlgorithmSettingsImpl(period); this.algorithmsStorage = AlgorithmsStorage.getInstance(); loadAlgorithms(); } ExecutionsLoader(File configPath, FromToPeriod period, String algoPackageName) throws BadAlgorithmException { this.configPath = configPath; this.settings = new AlgorithmSettingsImpl(period); this.algorithmsStorage = AlgorithmsStorage.getInstance(algoPackageName); loadAlgorithms(); } private void loadAlgorithms() throws BadAlgorithmException { logger.info("start executions loader"); configFileFolder = new File(configPath.getParent()).toString() + File.separatorChar; logger.debug("configuration path: {}", configFileFolder); openedPropertyFileNames.add(configPath.getName()); try (FileInputStream in = new FileInputStream(configPath)) { final Properties p = new Properties(); logger.debug("main properties file '{}' opened", configFileFolder); p.load(in); processProperties(p); } catch (IOException e) { throw new BadAlgorithmException(e.getMessage()); } logger.info("stop executions loader"); } private void loadAlgorithms(String config) throws BadAlgorithmException { final Properties p = new Properties(); final InputStream stream = new ByteArrayInputStream(config.getBytes()); try { p.load(stream); processProperties(p); } catch (IOException e) { throw new BadAlgorithmException(e.getMessage()); } } private void processProperties(final Properties p) throws FileNotFoundException, IOException, BadAlgorithmException { processIncludes(p); processStockLoadLines(p); processEodLoadLines(p); } private void processIncludes(final Properties p) throws FileNotFoundException, IOException, BadAlgorithmException { final String includes = p.getProperty(PropertyNames.INCLUDES_LINE); if (includes == null) return; final String[] includesFileNames = includes.split(","); for (String rawFileName : includesFileNames) { final String fileName = rawFileName.trim(); if (openedPropertyFileNames.contains(fileName)) continue; openedPropertyFileNames.add(fileName); try (FileInputStream in = new FileInputStream(configFileFolder + fileName)) { final Properties includeProperty = new Properties(); logger.debug("read include property file '{}'", fileName); includeProperty.load(in); processProperties(includeProperty); } } } private void processStockLoadLines(final Properties p) throws BadAlgorithmException { final String stockNames = p.getProperty(PropertyNames.STOCK_EXECUTIONS_LINE); if (stockNames == null) return; for (String rawExecutionName : stockNames.split(",")) { final String executionName = rawExecutionName.trim(); final String loadLine = p.getProperty(executionName + ".loadLine"); if (loadLine == null) throw new BadAlgorithmException("bad stock execution registration, no " + executionName + ".loadLine property"); checkNewStockExecution(executionName); final String generatedName = processStockExecution(executionName, loadLine); namedStockExecutions.put(executionName, generatedName); registeredStockExecutions.put(generatedName, executionName); } } private void checkNewStockExecution(final String executionName) throws BadAlgorithmException { if (namedStockExecutions.containsKey(executionName)) throw new BadAlgorithmException("algorithm " + executionName + " already registered"); } private String processStockExecution(String executionName, String loadLine) throws BadAlgorithmException { final Matcher loadLineMatch = Regexps.loadLine.matcher(loadLine); if (loadLineMatch.matches()) { return processStockSubExecution(executionName, loadLineMatch); } else throw new BadAlgorithmException("bad algorithm load line: " + loadLine); } private String processStockSubExecution(Matcher match) throws BadAlgorithmException { final List<String> params = parseParams(match.group(2).trim()); return processStockExecution(match.group(1).trim(), params); } private String processStockSubExecution(String executionName, Matcher match) throws BadAlgorithmException { final List<String> params = parseParams(match.group(2).trim()); return processStockExecution(executionName, match.group(1).trim(), params); } private String processStockExecution(String realExecutionName, String algorithmName, final List<String> params) throws BadAlgorithmException { final Class<? extends StockAlgorithm> stockAlgorithm = algorithmsStorage.getStock(algorithmName); if (stockAlgorithm == null) throw new BadAlgorithmException("there is no such algorithm like " + algorithmName); final AlgorithmSettings algorithmSettings = generateStockAlgorithmSettings(params); final String executionName = algorithmName + "(" + algorithmSettings.toString() + ")"; final String oldRealExecutionName = registeredStockExecutions.get(executionName); if (oldRealExecutionName != null) return oldRealExecutionName; final StockExecution execution = new StockExecution(realExecutionName, stockAlgorithm, algorithmSettings); executionsStorage.addStockExecution(execution); return executionName; } private String processStockExecution(String algorithmName, final List<String> params) throws BadAlgorithmException { final Class<? extends StockAlgorithm> stockAlgorithm = algorithmsStorage.getStock(algorithmName); if (stockAlgorithm == null) throw new BadAlgorithmException("there is no such algorithm like " + algorithmName); final AlgorithmSettings algorithmSettings = generateStockAlgorithmSettings(params); final String executionName = algorithmName + "(" + algorithmSettings.toString() + ")"; final String oldRealExecutionName = registeredStockExecutions.get(executionName); if (oldRealExecutionName != null) return oldRealExecutionName; final StockExecution execution = new StockExecution(executionName, stockAlgorithm, algorithmSettings); executionsStorage.addStockExecution(execution); return executionName; } private void processEodLoadLines(final Properties p) throws BadAlgorithmException { final String eodNames = p.getProperty(PropertyNames.EOD_EXECUTIONS_LINE); if (eodNames == null) return; for (String rawExecutionName : eodNames.split(",")) { final String executionName = rawExecutionName.trim(); final String loadLine = p.getProperty(executionName + ".loadLine"); if (loadLine == null) throw new BadAlgorithmException("bad eod algorithm execution registration, no " + executionName + ".loadLine property"); checkNewEodExecution(executionName); final String generatedName = processEodExecution(executionName, loadLine); namedStockExecutions.put(executionName, generatedName); registeredStockExecutions.put(generatedName, executionName); } } private void checkNewEodExecution(final String executionName) throws BadAlgorithmException { if (namedEodExecutions.containsKey(executionName)) throw new BadAlgorithmException("eod algorithm " + executionName + " already registered"); } private String processEodExecution(final String executionName, final String loadLine) throws BadAlgorithmException { final Matcher loadLineMatch = Regexps.loadLine.matcher(loadLine); if (loadLineMatch.matches()) { return processEodSubExecution(executionName, loadLineMatch); } else throw new BadAlgorithmException("bad algorithm load line: " + loadLine); } private Optional<String> processEodSubExecution(final Matcher match) throws BadAlgorithmException { final List<String> params = parseParams(match.group(2).trim()); return processEodExecution(match.group(1).trim(), params); } private String processEodSubExecution(final String executionName, final Matcher match) throws BadAlgorithmException { final List<String> params = parseParams(match.group(2).trim()); return processEodExecution(executionName, match.group(1).trim(), params); } private String processEodExecution(String realExecutionName, String algorithmName, final List<String> params) throws BadAlgorithmException { final Class<? extends EodAlgorithm> eodAlgorithm = algorithmsStorage.getEod(algorithmName); if (eodAlgorithm == null) throw new BadAlgorithmException("there is no such algorithm like " + algorithmName); final AlgorithmSettings algorithmSettings = generateEodAlgorithmSettings(params); final String executionName = algorithmName + "(" + algorithmSettings.toString() + ")"; final String oldRealExecutionName = registeredEodExecutions.get(executionName); if (oldRealExecutionName != null) return oldRealExecutionName; final EodExecution execution = new EodExecution(realExecutionName, eodAlgorithm, algorithmSettings); executionsStorage.addEodExecution(execution); return executionName; } private Optional<String> processEodExecution(String algorithmName, final List<String> params) throws BadAlgorithmException { final Class<? extends EodAlgorithm> eodAlgorithm = algorithmsStorage.getEod(algorithmName); if (eodAlgorithm == null) return Optional.empty(); final AlgorithmSettings algorithmSettings = generateEodAlgorithmSettings(params); final String executionName = algorithmName + "(" + algorithmSettings.toString() + ")"; final String oldRealExecutionName = registeredEodExecutions.get(executionName); if (oldRealExecutionName != null) return Optional.of(oldRealExecutionName); final EodExecution execution = new EodExecution(executionName, eodAlgorithm, algorithmSettings); executionsStorage.addEodExecution(execution); return Optional.of(executionName); } private AlgorithmSettings generateStockAlgorithmSettings(final List<String> params) throws BadAlgorithmException { final AlgorithmSettingsImpl algorithmSettings = settings.clone(); for (final String parameter : params) { final Matcher subAlgoMatch = Regexps.subAlgoParameter.matcher(parameter); final Matcher integerMatch = Regexps.integerParameter.matcher(parameter); final Matcher doubleMatch = Regexps.doubleParameter.matcher(parameter); final Matcher stringMatch = Regexps.stringParameter.matcher(parameter); final Matcher subExecutionMatch = Regexps.subExecutionParameter.matcher(parameter); if (subAlgoMatch.matches()) { final String subName = processStockSubExecution(subAlgoMatch); if (!namedStockExecutions.containsKey(subName)) { registeredStockExecutions.put(subName, subName); } algorithmSettings.addSubExecutionName(subName); } else if (integerMatch.matches()) { algorithmSettings.setInteger(integerMatch.group(1).trim(), Integer.valueOf(integerMatch.group(2).trim())); } else if (doubleMatch.matches()) { algorithmSettings.setDouble(doubleMatch.group(1).trim(), Double.valueOf(doubleMatch.group(2).trim())); } else if (stringMatch.matches()) { algorithmSettings.setString(stringMatch.group(1).trim(), stringMatch.group(2).trim()); } else if (subExecutionMatch.matches()) { final String subExecutionName = subExecutionMatch.group(1).trim(); final String executionCode = namedStockExecutions.get(subExecutionName); if (executionCode != null) algorithmSettings.addSubExecutionName(subExecutionName); else throw new BadAlgorithmException("unknown sub execution name: " + parameter); } else throw new BadAlgorithmException("bad sub execution line: " + parameter); } return algorithmSettings; } private AlgorithmSettings generateEodAlgorithmSettings(final List<String> params) throws BadAlgorithmException { final AlgorithmSettingsImpl algorithmSettings = settings.clone(); for (final String parameter : params) { final Matcher subAlgoMatch = Regexps.subAlgoParameter.matcher(parameter); final Matcher integerMatch = Regexps.integerParameter.matcher(parameter); final Matcher doubleMatch = Regexps.doubleParameter.matcher(parameter); final Matcher stringMatch = Regexps.stringParameter.matcher(parameter); final Matcher subExecutionMatch = Regexps.subExecutionParameter.matcher(parameter); if (subAlgoMatch.matches()) { final Optional<String> subEodName = processEodSubExecution(subAlgoMatch); if (subEodName.isPresent()) { final String subName = subEodName.get(); if (!namedEodExecutions.containsKey(subName)) { registeredStockExecutions.put(subName, subName); } algorithmSettings.addSubExecutionName(subName); } else { final String subStockName = processStockSubExecution(subAlgoMatch); if (!namedStockExecutions.containsKey(subStockName)) { registeredStockExecutions.put(subStockName, subStockName); } algorithmSettings.addSubExecutionName(subStockName); } } else if (integerMatch.matches()) { algorithmSettings.setInteger(integerMatch.group(1).trim(), Integer.valueOf(integerMatch.group(2).trim())); } else if (doubleMatch.matches()) { algorithmSettings.setDouble(doubleMatch.group(1).trim(), Double.valueOf(doubleMatch.group(2).trim())); } else if (stringMatch.matches()) { algorithmSettings.setString(stringMatch.group(1).trim(), stringMatch.group(2).trim()); } else if (subExecutionMatch.matches()) { final String subExecutionName = subExecutionMatch.group(1).trim(); final String executionEodCode = namedEodExecutions.get(subExecutionName); if (executionEodCode != null) { algorithmSettings.addSubExecutionName(subExecutionName); } else { final String executionStockCode = namedStockExecutions.get(subExecutionName); if (executionStockCode != null) algorithmSettings.addSubExecutionName(subExecutionName); else throw new BadAlgorithmException("unknown sub execution name: " + parameter); } } else throw new BadAlgorithmException("bad sub execution line: " + parameter); } return algorithmSettings; } private List<String> parseParams(final String paramsString) { int inBracketsStack = 0; int lastParamIndex = 0; final ArrayList<String> params = new ArrayList<>(); for (int i = 0; i < paramsString.length(); ++i) { if (paramsString.charAt(i) == '(') { inBracketsStack += 1; } else if (paramsString.charAt(i) == ')') { inBracketsStack -= 1; } else if (paramsString.charAt(i) == ',' && inBracketsStack == 0) { params.add(paramsString.substring(lastParamIndex, i).trim()); lastParamIndex = i + 1; } } if (lastParamIndex != paramsString.length()) { params.add(paramsString.substring(lastParamIndex, paramsString.length()).trim()); } return params; } public ExecutionsStorage getExecutionsStorage() { return executionsStorage; } }