text
stringlengths
2
99k
meta
dict
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. // *************************************************************** // - [#] indicates a test step (e.g. # Go to a page) // - [*] indicates an assertion (e.g. * Check the title) // - Use element ID when selecting an element. Create one if none. // *************************************************************** // Stage: @prod // Group: @accessibility import {getRandomId} from '../../utils'; describe('Verify Accessibility Support in Post', () => { let testUser; let otherUser; let testTeam; let testChannel; before(() => { // # Update Configs cy.apiUpdateConfig({ ServiceSettings: { ExperimentalChannelOrganization: false, }, }); cy.apiInitSetup().then(({team, channel, user}) => { testUser = user; testTeam = team; testChannel = channel; cy.apiCreateUser({prefix: 'other'}).then(({user: user1}) => { otherUser = user1; cy.apiAddUserToTeam(testTeam.id, otherUser.id).then(() => { cy.apiAddUserToChannel(testChannel.id, otherUser.id); }); }); }); }); beforeEach(() => { // # Login as test user and visit the Town Square channel cy.apiLogin(testUser); cy.visit(`/${testTeam.name}/channels/${testChannel.name}`); cy.get('#postListContent').should('be.visible'); }); it('MM-T1479 Verify Reader reads out the post correctly on Center Channel', () => { const {lastMessage} = postMessages(testChannel, otherUser, 1); performActionsToLastPost(); // # Shift focus to the last post cy.get('#fileUploadButton').focus().tab({shift: true}).tab({shift: true}); cy.get('body').type('{uparrow}{downarrow}'); // * Verify post message in Center Channel cy.getLastPostId().then((postId) => { // * Verify reader reads out the post correctly verifyPostLabel(`#post_${postId}`, otherUser.username, `wrote, ${lastMessage}, 2 reactions, message is saved and pinned`); }); }); it('MM-T1480 Verify Reader reads out the post correctly on RHS', () => { const {lastMessage} = postMessages(testChannel, otherUser, 1); performActionsToLastPost(); // # Post a reply on RHS cy.getLastPostId().then((postId) => { cy.clickPostCommentIcon(postId); cy.get('#rhsContainer').should('be.visible'); const replyMessage = 'A reply to an older post'; cy.postMessageReplyInRHS(replyMessage); // * Verify post message in RHS cy.get('#rhsContainer').within(() => { // # Shift the focus to the last post cy.get('#fileUploadButton').focus().tab({shift: true}).tab({shift: true}).type('{uparrow}'); // * Verify reader reads out the post correctly verifyPostLabel(`#rhsPost_${postId}`, otherUser.username, `wrote, ${lastMessage}, 2 reactions, message is saved and pinned`); }); // * Verify reply message in RHS cy.getLastPostId().then((replyId) => { cy.get('#rhsContainer').within(() => { // # Shift the focus to the last reply message cy.get('#fileUploadButton').focus().tab({shift: true}).tab({shift: true}).type('{uparrow}{downarrow}'); // * Verify reader reads out the post correctly verifyPostLabel(`#rhsPost_${replyId}`, testUser.username, `replied, ${replyMessage}`); }); }); }); }); it('MM-T1486_1 Verify different Post Focus on Center Channel', () => { postMessages(testChannel, otherUser, 5); // # Shift focus to the last post cy.get('#fileUploadButton').focus().tab({shift: true}).tab({shift: true}).type('{uparrow}'); // * Verify if focus changes to different posts when we use up arrows for (let index = 1; index < 5; index++) { cy.getNthPostId(-index - 1).then((postId) => { cy.get(`#post_${postId}`).should('have.class', 'a11y--active a11y--focused'); cy.get('body').type('{uparrow}'); }); } // * Verify if focus changes to different posts when we use down arrows for (let index = 5; index > 0; index--) { cy.getNthPostId(-index - 1).then((postId) => { cy.get(`#post_${postId}`).should('have.class', 'a11y--active a11y--focused'); cy.get('body').type('{downarrow}'); }); } }); it('MM-T1486_2 Verify different Post Focus on RHS', () => { // # Post Message as Current user const message = `hello from current user: ${getRandomId()}`; cy.postMessage(message); // # Post few replies on RHS cy.getLastPostId().then((postId) => { cy.clickPostCommentIcon(postId); cy.get('#rhsContainer').should('be.visible'); for (let index = 0; index < 3; index++) { const replyMessage = `A reply ${getRandomId()}`; cy.postMessageReplyInRHS(replyMessage); const otherMessage = `reply from ${otherUser.username}: ${getRandomId()}`; cy.postMessageAs({sender: otherUser, message: otherMessage, channelId: testChannel.id, rootId: postId}); } }); cy.get('#rhsContainer').within(() => { // # Shift focus to the last post cy.get('#fileUploadButton').focus().tab({shift: true}).tab({shift: true}).type('{uparrow}'); }); // * Verify if focus changes to different posts when we use up arrows for (let index = 1; index < 5; index++) { cy.getNthPostId(-index - 1).then((postId) => { cy.get(`#rhsPost_${postId}`).should('have.class', 'a11y--active a11y--focused'); cy.get('body').type('{uparrow}'); }); } // * Verify if focus changes to different posts when we use down arrows for (let index = 5; index > 1; index--) { cy.getNthPostId(-index - 1).then((postId) => { cy.get(`#rhsPost_${postId}`).should('have.class', 'a11y--active a11y--focused'); cy.get('body').type('{downarrow}'); }); } }); it('MM-T1486_3 Verify Tab support on Post on Center Channel', () => { postMessages(testChannel, otherUser, 1); // # Shift focus to the last post cy.get('#fileUploadButton').focus().tab({shift: true}).tab({shift: true}); cy.get('body').type('{uparrow}{downarrow}'); cy.focused().tab(); cy.getLastPostId().then((postId) => { cy.get(`#post_${postId}`).within(() => { // * Verify focus is on the username cy.get('button.user-popover').should('have.class', 'a11y--active a11y--focused').and('have.attr', 'aria-label', otherUser.username); cy.focused().tab(); // * Verify focus is on the time cy.get(`#CENTER_time_${postId}`).should('have.class', 'a11y--active a11y--focused'); cy.focused().tab(); // * Verify focus is on the actions button cy.get(`#CENTER_button_${postId}`).should('have.class', 'a11y--active a11y--focused').and('have.attr', 'aria-label', 'more actions'); cy.focused().tab(); // * Verify focus is on the reactions button cy.get(`#CENTER_reaction_${postId}`).should('have.class', 'a11y--active a11y--focused').and('have.attr', 'aria-label', 'add reaction'); cy.focused().tab(); // * Verify focus is on the save post button cy.get(`#CENTER_flagIcon_${postId}`).should('have.class', 'a11y--active a11y--focused').and('have.attr', 'aria-label', 'save'); cy.focused().tab(); // * Verify focus is on the comment button cy.get(`#CENTER_commentIcon_${postId}`).should('have.class', 'a11y--active a11y--focused').and('have.attr', 'aria-label', 'reply'); cy.focused().tab(); // * Verify focus is on the post text cy.get(`#postMessageText_${postId}`).should('have.class', 'a11y--active a11y--focused').and('have.attr', 'aria-readonly', 'true'); }); }); }); it('MM-T1486_4 Verify Tab support on Post on RHS', () => { // # Post Message as Current user const message = `hello from current user: ${getRandomId()}`; cy.postMessage(message); // # Post few replies on RHS cy.getLastPostId().then((postId) => { cy.clickPostCommentIcon(postId); cy.get('#rhsContainer').should('be.visible'); const replyMessage = `A reply ${getRandomId()}`; cy.postMessageReplyInRHS(replyMessage); const otherMessage = `reply from ${otherUser.username}: ${getRandomId()}`; cy.postMessageAs({sender: otherUser, message: otherMessage, channelId: testChannel.id, rootId: postId}); }); cy.get('#rhsContainer').within(() => { // # Shift focus to the last post cy.get('#fileUploadButton').focus().tab({shift: true}).tab({shift: true}); }); // * Verify reverse tab on RHS cy.getLastPostId().then((postId) => { cy.get(`#rhsPost_${postId}`).within(() => { // * Verify focus is on the post text cy.get(`#rhsPostMessageText_${postId}`).should('have.class', 'a11y--active a11y--focused').and('have.attr', 'aria-readonly', 'true'); cy.focused().tab({shift: true}); // * Verify focus is on the save icon cy.get(`#RHS_COMMENT_flagIcon_${postId}`).should('have.class', 'a11y--active a11y--focused').and('have.attr', 'aria-label', 'save'); cy.focused().tab({shift: true}); // * Verify focus is on the reactions button cy.get(`#RHS_COMMENT_reaction_${postId}`).should('have.class', 'a11y--active a11y--focused').and('have.attr', 'aria-label', 'add reaction'); cy.focused().tab({shift: true}); // * Verify focus is on the actions button cy.get(`#RHS_COMMENT_button_${postId}`).should('have.class', 'a11y--active a11y--focused').and('have.attr', 'aria-label', 'more actions'); cy.focused().tab({shift: true}); // * Verify focus is on the time cy.get(`#RHS_COMMENT_time_${postId}`).should('have.class', 'a11y--active a11y--focused'); cy.focused().tab({shift: true}); // * Verify focus is on the username cy.get('button.user-popover').should('have.class', 'a11y--active a11y--focused').and('have.attr', 'aria-label', otherUser.username); cy.focused().tab({shift: true}); }); }); }); it('MM-T1462 Verify incoming messages are read', () => { // # Submit a post as another user const message = `verify incoming message from ${otherUser.username}: ${getRandomId()}`; cy.postMessageAs({sender: otherUser, message, channelId: testChannel.id}); // # Get the element which stores the incoming messages cy.get('#postListContent').within(() => { cy.get('.sr-only').should('have.attr', 'aria-live', 'polite').as('incomingMessage'); }); // * Verify incoming message is read cy.get('@incomingMessage').invoke('text').then((text) => { expect(text).contain(message); }); }); }); function postMessages(testChannel, otherUser, count) { let lastMessage; for (let index = 0; index < count; index++) { // # Post Message as Current user const message = `hello from current user: ${getRandomId()}`; cy.postMessage(message); lastMessage = `hello from ${otherUser.username}: ${getRandomId()}`; cy.postMessageAs({sender: otherUser, message: lastMessage, channelId: testChannel.id}); } return {lastMessage}; } function performActionsToLastPost() { // # Take some actions on the last post cy.getLastPostId().then((postId) => { // # Add grinning reaction cy.clickPostReactionIcon(postId); cy.findByTestId('grinning').trigger('mouseover'); cy.get('#emojiPickerSpritePreview').should('be.visible'); cy.get('#emojiPickerAliasesPreview').should('be.visible').and('have.text', ':grinning:'); cy.findByTestId('grinning').click(); cy.get(`#postReaction-${postId}-grinning`).should('be.visible'); // # Add smile reaction cy.clickPostReactionIcon(postId); cy.findByTestId('smile').trigger('mouseover'); cy.get('#emojiPickerSpritePreview').should('be.visible'); cy.get('#emojiPickerAliasesPreview').should('be.visible').and('have.text', ':smile:'); cy.findByTestId('smile').click(); cy.get(`#postReaction-${postId}-smile`).should('be.visible'); // # Save the post cy.clickPostSaveIcon(postId); // # Pin the post cy.clickPostDotMenu(postId); cy.get(`#pin_post_${postId}`).click(); cy.clickPostDotMenu(postId); cy.get('body').type('{esc}'); }); } function verifyPostLabel(elementId, username, labelSuffix) { // # Shift focus to the last post cy.get(elementId).as('lastPost').should('have.class', 'a11y--active a11y--focused'); // * Verify reader reads out the post correctly cy.get('@lastPost').then((el) => { // # Get the post time cy.wrap(el).find('time.post__time').invoke('text').then((time) => { const expectedLabel = `At ${time} ${Cypress.moment().format('dddd, MMMM D')}, ${username} ${labelSuffix}`; cy.wrap(el).should('have.attr', 'aria-label', expectedLabel); }); }); }
{ "pile_set_name": "Github" }
from .learner_glm import LearnerGLM from .learner_optim import LearnerOptim
{ "pile_set_name": "Github" }
# coding=utf-8 import tensorflow as tf from modeling import embedding_lookup_factorized,transformer_model import os """ ๆต‹่ฏ•albertไธป่ฆ็š„ๆ”น่ฟ›็‚น๏ผš่ฏๅตŒๅ…ฅ็š„ๅ› ๅผๅˆ†่งฃใ€ๅฑ‚้—ดๅ‚ๆ•ฐๅ…ฑไบซใ€ๆฎต่ฝ้—ด่ฟž่ดฏๆ€ง test main change of albert from bert """ batch_size = 2048 sequence_length = 512 vocab_size = 30000 hidden_size = 1024 num_attention_heads = int(hidden_size / 64) def get_total_parameters(): """ get total parameters of a graph :return: """ total_parameters = 0 for variable in tf.trainable_variables(): # shape is an array of tf.Dimension shape = variable.get_shape() # print(shape) # print(len(shape)) variable_parameters = 1 for dim in shape: # print(dim) variable_parameters *= dim.value # print(variable_parameters) total_parameters += variable_parameters return total_parameters def test_factorized_embedding(): """ test of Factorized embedding parameterization :return: """ input_ids=tf.zeros((batch_size, sequence_length),dtype=tf.int32) output, embedding_table, embedding_table_2=embedding_lookup_factorized(input_ids,vocab_size,hidden_size) print("output:",output) def test_share_parameters(): """ test of share parameters across all layers: how many parameter after share parameter across layers of transformer. :return: """ def total_parameters_transformer(share_parameter_across_layers): input_tensor=tf.zeros((batch_size, sequence_length, hidden_size),dtype=tf.float32) print("transformer_model. input:",input_tensor) transformer_result=transformer_model(input_tensor,hidden_size=hidden_size,num_attention_heads=num_attention_heads,share_parameter_across_layers=share_parameter_across_layers) print("transformer_result:",transformer_result) total_parameters=get_total_parameters() print('total_parameters(not share):',total_parameters) share_parameter_across_layers=False total_parameters_transformer(share_parameter_across_layers) # total parameters, not share: 125,976,576 = 125 million tf.reset_default_graph() # Clears the default graph stack and resets the global default graph share_parameter_across_layers=True total_parameters_transformer(share_parameter_across_layers) # total parameters, share: 10,498,048 = 10.5 million def test_sentence_order_prediction(): """ sentence order prediction. check method of create_instances_from_document_albert from create_pretrining_data.py :return: """ # ๆทปๅŠ ่ฟ่กŒๆƒ้™ os.system("chmod +x create_pretrain_data.sh") os.system("./create_pretrain_data.sh") # 1.test of Factorized embedding parameterization #test_factorized_embedding() # 2. test of share parameters across all layers: how many parameter after share parameter across layers of transformer. # before share parameter: 125,976,576; after share parameter: #test_share_parameters() # 3. test of sentence order prediction(SOP) test_sentence_order_prediction()
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="iso-8859-1"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <title>Qt 4.6: addressbook.cpp Example File (tutorials/addressbook/part5/addressbook.cpp)</title> <link href="classic.css" rel="stylesheet" type="text/css" /> </head> <body> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr> <td align="left" valign="top" width="32"><a href="http://qt.nokia.com/"><img src="images/qt-logo.png" align="left" border="0" /></a></td> <td width="1">&nbsp;&nbsp;</td><td class="postheader" valign="center"><a href="index.html"><font color="#004faf">Home</font></a>&nbsp;&middot; <a href="classes.html"><font color="#004faf">All&nbsp;Classes</font></a>&nbsp;&middot; <a href="functions.html"><font color="#004faf">All&nbsp;Functions</font></a>&nbsp;&middot; <a href="overviews.html"><font color="#004faf">Overviews</font></a></td></tr></table><h1 class="title">addressbook.cpp Example File<br /><span class="small-subtitle">tutorials/addressbook/part5/addressbook.cpp</span> </h1> <pre><span class="comment"> /**************************************************************************** ** ** Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies). ** All rights reserved. ** Contact: Nokia Corporation (qt-info@nokia.com) ** ** This file is part of the examples of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL$ ** Commercial Usage ** Licensees holding valid Qt Commercial licenses may use this file in ** accordance with the Qt Commercial License Agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and Nokia. ** ** GNU Lesser General Public License Usage ** Alternatively, this file may be used under the terms of the GNU Lesser ** General Public License version 2.1 as published by the Free Software ** Foundation and appearing in the file LICENSE.LGPL included in the ** packaging of this file. Please review the following information to ** ensure the GNU Lesser General Public License version 2.1 requirements ** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ** ** In addition, as a special exception, Nokia gives you certain additional ** rights. These rights are described in the Nokia Qt LGPL Exception ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License version 3.0 as published by the Free Software ** Foundation and appearing in the file LICENSE.GPL included in the ** packaging of this file. Please review the following information to ** ensure the GNU General Public License version 3.0 requirements will be ** met: http://www.gnu.org/copyleft/gpl.html. ** ** If you have questions regarding the use of this file, please contact ** Nokia at qt-info@nokia.com. ** $QT_END_LICENSE$ ** ****************************************************************************/</span> #include &lt;QtGui&gt; #include &quot;addressbook.h&quot; AddressBook::AddressBook(QWidget *parent) : QWidget(parent) { QLabel *nameLabel = new QLabel(tr(&quot;Name:&quot;)); nameLine = new QLineEdit; nameLine-&gt;setReadOnly(true); QLabel *addressLabel = new QLabel(tr(&quot;Address:&quot;)); addressText = new QTextEdit; addressText-&gt;setReadOnly(true); addButton = new QPushButton(tr(&quot;&amp;Add&quot;)); editButton = new QPushButton(tr(&quot;&amp;Edit&quot;)); editButton-&gt;setEnabled(false); removeButton = new QPushButton(tr(&quot;&amp;Remove&quot;)); removeButton-&gt;setEnabled(false); findButton = new QPushButton(tr(&quot;&amp;Find&quot;)); findButton-&gt;setEnabled(false); submitButton = new QPushButton(tr(&quot;&amp;Submit&quot;)); submitButton-&gt;hide(); cancelButton = new QPushButton(tr(&quot;&amp;Cancel&quot;)); cancelButton-&gt;hide(); nextButton = new QPushButton(tr(&quot;&amp;Next&quot;)); nextButton-&gt;setEnabled(false); previousButton = new QPushButton(tr(&quot;&amp;Previous&quot;)); previousButton-&gt;setEnabled(false); dialog = new FindDialog; connect(addButton, SIGNAL(clicked()), this, SLOT(addContact())); connect(submitButton, SIGNAL(clicked()), this, SLOT(submitContact())); connect(editButton, SIGNAL(clicked()), this, SLOT(editContact())); connect(cancelButton, SIGNAL(clicked()), this, SLOT(cancel())); connect(removeButton, SIGNAL(clicked()), this, SLOT(removeContact())); connect(findButton, SIGNAL(clicked()), this, SLOT(findContact())); connect(nextButton, SIGNAL(clicked()), this, SLOT(next())); connect(previousButton, SIGNAL(clicked()), this, SLOT(previous())); QVBoxLayout *buttonLayout1 = new QVBoxLayout; buttonLayout1-&gt;addWidget(addButton); buttonLayout1-&gt;addWidget(editButton); buttonLayout1-&gt;addWidget(removeButton); buttonLayout1-&gt;addWidget(findButton); buttonLayout1-&gt;addWidget(submitButton); buttonLayout1-&gt;addWidget(cancelButton); buttonLayout1-&gt;addStretch(); QHBoxLayout *buttonLayout2 = new QHBoxLayout; buttonLayout2-&gt;addWidget(previousButton); buttonLayout2-&gt;addWidget(nextButton); QGridLayout *mainLayout = new QGridLayout; mainLayout-&gt;addWidget(nameLabel, 0, 0); mainLayout-&gt;addWidget(nameLine, 0, 1); mainLayout-&gt;addWidget(addressLabel, 1, 0, Qt::AlignTop); mainLayout-&gt;addWidget(addressText, 1, 1); mainLayout-&gt;addLayout(buttonLayout1, 1, 2); mainLayout-&gt;addLayout(buttonLayout2, 2, 1); setLayout(mainLayout); setWindowTitle(tr(&quot;Simple Address Book&quot;)); } void AddressBook::addContact() { oldName = nameLine-&gt;text(); oldAddress = addressText-&gt;toPlainText(); nameLine-&gt;clear(); addressText-&gt;clear(); updateInterface(AddingMode); } void AddressBook::editContact() { oldName = nameLine-&gt;text(); oldAddress = addressText-&gt;toPlainText(); updateInterface(EditingMode); } void AddressBook::submitContact() { QString name = nameLine-&gt;text(); QString address = addressText-&gt;toPlainText(); if (name == &quot;&quot; || address == &quot;&quot;) { QMessageBox::information(this, tr(&quot;Empty Field&quot;), tr(&quot;Please enter a name and address.&quot;)); } if (currentMode == AddingMode) { if (!contacts.contains(name)) { contacts.insert(name, address); QMessageBox::information(this, tr(&quot;Add Successful&quot;), tr(&quot;\&quot;%1\&quot; has been added to your address book.&quot;).arg(name)); } else { QMessageBox::information(this, tr(&quot;Add Unsuccessful&quot;), tr(&quot;Sorry, \&quot;%1\&quot; is already in your address book.&quot;).arg(name)); } } else if (currentMode == EditingMode) { if (oldName != name) { if (!contacts.contains(name)) { QMessageBox::information(this, tr(&quot;Edit Successful&quot;), tr(&quot;\&quot;%1\&quot; has been edited in your address book.&quot;).arg(oldName)); contacts.remove(oldName); contacts.insert(name, address); } else { QMessageBox::information(this, tr(&quot;Edit Unsuccessful&quot;), tr(&quot;Sorry, \&quot;%1\&quot; is already in your address book.&quot;).arg(name)); } } else if (oldAddress != address) { QMessageBox::information(this, tr(&quot;Edit Successful&quot;), tr(&quot;\&quot;%1\&quot; has been edited in your address book.&quot;).arg(name)); contacts[name] = address; } } updateInterface(NavigationMode); } void AddressBook::cancel() { nameLine-&gt;setText(oldName); addressText-&gt;setText(oldAddress); updateInterface(NavigationMode); } void AddressBook::removeContact() { QString name = nameLine-&gt;text(); QString address = addressText-&gt;toPlainText(); if (contacts.contains(name)) { int button = QMessageBox::question(this, tr(&quot;Confirm Remove&quot;), tr(&quot;Are you sure you want to remove \&quot;%1\&quot;?&quot;).arg(name), QMessageBox::Yes | QMessageBox::No); if (button == QMessageBox::Yes) { previous(); contacts.remove(name); QMessageBox::information(this, tr(&quot;Remove Successful&quot;), tr(&quot;\&quot;%1\&quot; has been removed from your address book.&quot;).arg(name)); } } updateInterface(NavigationMode); } void AddressBook::next() { QString name = nameLine-&gt;text(); QMap&lt;QString, QString&gt;::iterator i = contacts.find(name); if (i != contacts.end()) i++; if (i == contacts.end()) i = contacts.begin(); nameLine-&gt;setText(i.key()); addressText-&gt;setText(i.value()); } void AddressBook::previous() { QString name = nameLine-&gt;text(); QMap&lt;QString, QString&gt;::iterator i = contacts.find(name); if (i == contacts.end()) { nameLine-&gt;clear(); addressText-&gt;clear(); return; } if (i == contacts.begin()) i = contacts.end(); i--; nameLine-&gt;setText(i.key()); addressText-&gt;setText(i.value()); } void AddressBook::findContact() { dialog-&gt;show(); if (dialog-&gt;exec() == QDialog::Accepted) { QString contactName = dialog-&gt;getFindText(); if (contacts.contains(contactName)) { nameLine-&gt;setText(contactName); addressText-&gt;setText(contacts.value(contactName)); } else { QMessageBox::information(this, tr(&quot;Contact Not Found&quot;), tr(&quot;Sorry, \&quot;%1\&quot; is not in your address book.&quot;).arg(contactName)); return; } } updateInterface(NavigationMode); } void AddressBook::updateInterface(Mode mode) { currentMode = mode; switch (currentMode) { case AddingMode: case EditingMode: nameLine-&gt;setReadOnly(false); nameLine-&gt;setFocus(Qt::OtherFocusReason); addressText-&gt;setReadOnly(false); addButton-&gt;setEnabled(false); editButton-&gt;setEnabled(false); removeButton-&gt;setEnabled(false); nextButton-&gt;setEnabled(false); previousButton-&gt;setEnabled(false); submitButton-&gt;show(); cancelButton-&gt;show(); break; case NavigationMode: if (contacts.isEmpty()) { nameLine-&gt;clear(); addressText-&gt;clear(); } nameLine-&gt;setReadOnly(true); addressText-&gt;setReadOnly(true); addButton-&gt;setEnabled(true); int number = contacts.size(); editButton-&gt;setEnabled(number &gt;= 1); removeButton-&gt;setEnabled(number &gt;= 1); findButton-&gt;setEnabled(number &gt; 2); nextButton-&gt;setEnabled(number &gt; 1); previousButton-&gt;setEnabled(number &gt; 1); submitButton-&gt;hide(); cancelButton-&gt;hide(); break; } }</pre> <p /><address><hr /><div align="center"> <table width="100%" cellspacing="0" border="0"><tr class="address"> <td width="40%" align="left">Copyright &copy; 2009 Nokia Corporation and/or its subsidiary(-ies)</td> <td width="20%" align="center"><a href="trademarks.html">Trademarks</a></td> <td width="40%" align="right"><div align="right">Qt 4.6.0</div></td> </tr></table></div></address></body> </html>
{ "pile_set_name": "Github" }
package Paws::Organizations::ListAccountsResponse; use Moose; has Accounts => (is => 'ro', isa => 'ArrayRef[Paws::Organizations::Account]'); has NextToken => (is => 'ro', isa => 'Str'); has _request_id => (is => 'ro', isa => 'Str'); ### main pod documentation begin ### =head1 NAME Paws::Organizations::ListAccountsResponse =head1 ATTRIBUTES =head2 Accounts => ArrayRef[L<Paws::Organizations::Account>] A list of objects in the organization. =head2 NextToken => Str If present, this value indicates that there is more output available than is included in the current response. Use this value in the C<NextToken> request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the C<NextToken> response element comes back as C<null>. =head2 _request_id => Str =cut 1;
{ "pile_set_name": "Github" }
// Copyright (c) 2003-present, Jodd Team (http://jodd.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. package jodd.vtor.constraint; import jodd.vtor.Constraint; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.FIELD) @Constraint(WildcardMatchConstraint.class) public @interface WildcardPathMatch { String value(); // ---------------------------------------------------------------- common /** * Profiles. */ String[] profiles() default {}; /** * Severity. */ int severity() default 0; /** * Message. */ String message() default "jodd.vtor.constraint.WildcardPathMatch"; }
{ "pile_set_name": "Github" }
/* * @BEGIN LICENSE * * Psi4: an open-source quantum chemistry software package * * Copyright (c) 2007-2019 The Psi4 Developers. * * The copyrights for code used from other parties are included in * the corresponding files. * * This file is part of Psi4. * * Psi4 is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, version 3. * * Psi4 is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License along * with Psi4; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * @END LICENSE */ /*! \defgroup QT libqt: The Quantum-Trio Miscellaneous Library */ /*! \file \ingroup QT \brief The PSI3 BLAS1 interface routines Interface to the BLAS routines C. David Sherrill Anna I. Krylov May 1998 NOTE: Refactored by Rob Parrish on 1/24/2010 This file now contains all relevant BLAS1 routines, with provisions made for >2^31 elements (size_t sizes). All BLAS2 and BLAS3 routines are now wrapped and are in blas_inftc23.cc */ #include <cstdio> #include <climits> #include <cmath> #include "psi4/pragma.h" #include "psi4/libqt/blas_intfc_mangle.h" extern "C" { extern void F_DSWAP(int *length, double *x, int *incx, double *y, int *inc_y); extern void F_DAXPY(int *length, double *a, double *x, int *inc_x, double *y, int *inc_y); extern void F_DCOPY(int *length, double *x, int *inc_x, double *y, int *inc_y); extern void F_DGEMM(char *transa, char *transb, int *m, int *n, int *k, double *alpha, double *A, int *lda, double *B, int *ldb, double *beta, double *C, int *ldc); extern void F_DSYMM(char *side, char *uplo, int *m, int *n, double *alpha, double *A, int *lda, double *B, int *ldb, double *beta, double *C, int *ldc); extern void F_DROT(int *ntot, double *x, int *incx, double *y, int *incy, double *cotheta, double *sintheta); extern void F_DSCAL(int *n, double *alpha, double *vec, int *inc); extern void F_DGEMV(char *transa, int *m, int *n, double *alpha, double *A, int *lda, double *X, int *inc_x, double *beta, double *Y, int *inc_y); extern void F_DSYMV(char *uplo, int *n, double *alpha, double *A, int *lda, double *X, int *inc_x, double *beta, double *Y, int *inc_y); extern void F_DSPMV(char *uplo, int *n, double *alpha, double *A, double *X, int *inc_x, double *beta, double *Y, int *inc_y); extern double F_DDOT(int *n, double *x, int *incx, double *y, int *incy); extern double F_DNRM2(int *n, double *x, int *incx); extern double F_DASUM(int *n, double *x, int *incx); extern int F_IDAMAX(int *n, double *x, int *incx); } namespace psi { /** * Swaps a vector with another vector. * * @param length Specifies the number of elements in vectors x and y. * @param x Array, DIMENSION at least (1 + (n-1)*abs(incx)). * @param inc_x Specifies the increment for the elements of x. * @param y Array, DIMENSION at least (1 + (n-1)*abs(incy)). * @param inc_y Specifies the increment for the elements of y. * * @ingroup QT */ void PSI_API C_DSWAP(size_t length, double *x, int inc_x, double *y, int inc_y) { int big_blocks = (int)(length / INT_MAX); int small_size = (int)(length % INT_MAX); for (int block = 0; block <= big_blocks; block++) { double *x_s = &x[static_cast<size_t>(block) * inc_x * INT_MAX]; double *y_s = &y[static_cast<size_t>(block) * inc_y * INT_MAX]; signed int length_s = (block == big_blocks) ? small_size : INT_MAX; ::F_DSWAP(&length_s, x_s, &inc_x, y_s, &inc_y); } } /*! * This function performs y = a * x + y. * * Steps every inc_x in x and every inc_y in y (normally both 1). * * \param length length of arrays * \param a scalar a to multiply vector x * \param x vector x * \param inc_x how many places to skip to get to next element in x * \param y vector y * \param inc_y how many places to skip to get to next element in y * * \ingroup QT */ void PSI_API C_DAXPY(size_t length, double a, double *x, int inc_x, double *y, int inc_y) { int big_blocks = (int)(length / INT_MAX); int small_size = (int)(length % INT_MAX); for (int block = 0; block <= big_blocks; block++) { double *x_s = &x[static_cast<size_t>(block) * inc_x * INT_MAX]; double *y_s = &y[static_cast<size_t>(block) * inc_y * INT_MAX]; signed int length_s = (block == big_blocks) ? small_size : INT_MAX; ::F_DAXPY(&length_s, &a, x_s, &inc_x, y_s, &inc_y); } } /*! * This function copies x into y. * * Steps every inc_x in x and every inc_y in y (normally both 1). * * \param length = length of array * \param x = vector x * \param inc_x = how many places to skip to get to next element in x * \param y = vector y * \param inc_y = how many places to skip to get to next element in y * * \ingroup QT */ void PSI_API C_DCOPY(size_t length, double *x, int inc_x, double *y, int inc_y) { int big_blocks = (int)(length / INT_MAX); int small_size = (int)(length % INT_MAX); for (int block = 0; block <= big_blocks; block++) { double *x_s = &x[static_cast<size_t>(block) * inc_x * INT_MAX]; double *y_s = &y[static_cast<size_t>(block) * inc_y * INT_MAX]; signed int length_s = (block == big_blocks) ? small_size : INT_MAX; ::F_DCOPY(&length_s, x_s, &inc_x, y_s, &inc_y); } } /*! * This function scales a vector by a real scalar. * * \param length length of array * \param alpha scale factor * \param vec vector to scale * \param inc how many places to skip to get to next element in vec * * \ingroup QT */ void PSI_API C_DSCAL(size_t length, double alpha, double *vec, int inc) { int big_blocks = (int)(length / INT_MAX); int small_size = (int)(length % INT_MAX); for (int block = 0; block <= big_blocks; block++) { double *vec_s = &vec[static_cast<size_t>(block) * inc * INT_MAX]; signed int length_s = (block == big_blocks) ? small_size : INT_MAX; ::F_DSCAL(&length_s, &alpha, vec_s, &inc); } } /*! *Calculates a plane Givens rotation for vectors x, y and * angle theta. x = x*cos + y*sin, y = -x*sin + y*cos. * * \param x vector x * \param y vector Y * \param length length of x,y * \param inc_x how many places to skip to get to the next element of x * \param inc_y how many places to skip to get to the next element of y * * \ingroup QT */ void PSI_API C_DROT(size_t length, double *x, int inc_x, double *y, int inc_y, double costheta, double sintheta) { int big_blocks = (int)(length / INT_MAX); int small_size = (int)(length % INT_MAX); for (int block = 0; block <= big_blocks; block++) { double *x_s = &x[static_cast<size_t>(block) * inc_x * INT_MAX]; double *y_s = &y[static_cast<size_t>(block) * inc_y * INT_MAX]; signed int length_s = (block == big_blocks) ? small_size : INT_MAX; ::F_DROT(&length_s, x_s, &inc_x, y_s, &inc_y, &costheta, &sintheta); } } /*! * This function returns the dot product of two vectors, x and y. * * \param length Number of elements in x and y. * \param x A pointer to the beginning of the data in x. * Must be of at least length (1+(N-1)*abs(inc_x). * \param inc_x how many places to skip to get to next element in x * \param y A pointer to the beginning of the data in y. * \param inc_y how many places to skip to get to next element in y * * @returns the dot product * * \ingroup QT */ double PSI_API C_DDOT(size_t length, double *x, int inc_x, double *y, int inc_y) { if (length == 0) return 0.0; double reg = 0.0; int big_blocks = (int)(length / INT_MAX); int small_size = (int)(length % INT_MAX); for (int block = 0; block <= big_blocks; block++) { double *x_s = &x[static_cast<size_t>(block) * inc_x * INT_MAX]; double *y_s = &y[static_cast<size_t>(block) * inc_y * INT_MAX]; signed int length_s = (block == big_blocks) ? small_size : INT_MAX; reg += ::F_DDOT(&length_s, x_s, &inc_x, y_s, &inc_y); } return reg; } /*! * This function returns the square of the norm of this vector. * * \param length Number of elements in x. * \param x A pointer to the beginning of the data in x. * Must be of at least length (1+(N-1)*abs(inc_x). * \param inc_x how many places to skip to get to next element in x * * @returns the norm squared product * * \ingroup QT */ double PSI_API C_DNRM2(size_t length, double *x, int inc_x) { if (length == 0) return 0.0; double reg = 0.0; int big_blocks = (int)(length / INT_MAX); int small_size = (int)(length % INT_MAX); for (int block = 0; block <= big_blocks; block++) { double *x_s = &x[static_cast<size_t>(block) * inc_x * INT_MAX]; signed int length_s = (block == big_blocks) ? small_size : INT_MAX; reg += ::F_DNRM2(&length_s, x_s, &inc_x); } return reg; } /*! * This function returns the sum of the absolute value of this vector. * * \param length Number of elements in x. * \param x A pointer to the beginning of the data in x. * Must be of at least length (1+(N-1)*abs(inc_x). * \param inc_x how many places to skip to get to next element in x * * @returns the sum of the absolute value * * \ingroup QT */ double PSI_API C_DASUM(size_t length, double *x, int inc_x) { if (length == 0) return 0.0; double reg = 0.0; int big_blocks = (int)(length / INT_MAX); int small_size = (int)(length % INT_MAX); for (int block = 0; block <= big_blocks; block++) { double *x_s = &x[static_cast<size_t>(block) * inc_x * INT_MAX]; signed int length_s = (block == big_blocks) ? small_size : INT_MAX; reg += ::F_DASUM(&length_s, x_s, &inc_x); } return reg; } /*! * This function returns the index of the largest absolute value compoment of this vector. * * \param length Number of elements in x. * \param x A pointer to the beginning of the data in x. * Must be of at least length (1+(N-1)*abs(inc_x). * \param inc_x how many places to skip to get to next element in x * * @returns the index of the largest absolute value * * \ingroup QT */ size_t C_IDAMAX(size_t length, double *x, int inc_x) { if (length == 0) return 0L; size_t reg = 0L; size_t reg2 = 0L; int big_blocks = (int)(length / INT_MAX); int small_size = (int)(length % INT_MAX); for (int block = 0; block <= big_blocks; block++) { double *x_s = &x[static_cast<size_t>(block) * inc_x * INT_MAX]; signed int length_s = (block == big_blocks) ? small_size : INT_MAX; reg2 = ::F_IDAMAX(&length_s, x_s, &inc_x) + static_cast<size_t>(block) * inc_x * INT_MAX; if (std::fabs(x[reg]) > std::fabs(x[reg2])) reg = reg2; } return reg; } } // namespace psi
{ "pile_set_name": "Github" }
๏ปฟusing System; namespace NTMiner.MinerStudio.Vms { public interface IWsStateViewModel { bool IsWsOnline { get; set; } string WsDescription { get; set; } int WsNextTrySecondsDelay { get; set; } DateTime WsLastTryOn { get; set; } bool IsConnecting { get; set; } } }
{ "pile_set_name": "Github" }
FileIn := A_ScriptDir "\Zhang-Suen.txt" FileOut := A_ScriptDir "\NewFile.txt" if (!FileExist(FileIn)) { MsgBox, 48, File Not Found, % "File """ FileIn """ not found." ExitApp } S := {} N := [2,3,4,5,6,7,8,9,2] Loop, Read, % FileIn { LineNum := A_Index Loop, Parse, A_LoopReadLine S[LineNum, A_Index] := A_LoopField } Loop { FlipCount := 0 Loop, 2 { Noted := [], i := A_Index for LineNum, Line in S { for PixNum, Pix in Line { ; (0) if (Pix = 0 || (P := GetNeighbors(LineNum, PixNum, S)) = 1) continue ; (1) BP := 0 for j, Val in P BP += Val if (BP < 2 || BP > 6) continue ; (2) AP := 0 Loop, 8 if (P[N[A_Index]] = "0" && P[N[A_Index + 1]] = "1") AP++ if (AP != 1) continue ; (3 and 4) if (i = 1) { if (P[2] + P[4] + P[6] = 3 || P[4] + P[6] + P[8] = 3) continue } else if (P[2] + P[4] + P[8] = 3 || P[2] + P[6] + P[8] = 3) continue Noted.Insert([LineNum, PixNum]) FlipCount++ } } for j, Coords in Noted S[Coords[1], Coords[2]] := 0 } if (!FlipCount) break } for LineNum, Line in S { for PixNum, Pix in Line Out .= Pix ? "#" : " " Out .= "`n" } FileAppend, % Out, % FileOut GetNeighbors(Y, X, S) { Neighbors := [] if ((Neighbors[8] := S[Y, X - 1]) = "") return 1 if ((Neighbors[4] := S[Y, X + 1]) = "") return 1 Loop, 3 if ((Neighbors[A_Index = 1 ? 9 : A_Index] := S[Y - 1, X - 2 + A_Index]) = "") return 1 Loop, 3 if ((Neighbors[8 - A_Index] := S[Y + 1, X - 2 + A_Index]) = "") return 1 return Neighbors }
{ "pile_set_name": "Github" }
package de.onyxbits.raccoon.gplay; import java.util.ArrayList; import java.util.List; import com.akdeniz.googleplaycrawler.GooglePlay.DocV2; import com.akdeniz.googleplaycrawler.GooglePlay.PreFetch; import com.akdeniz.googleplaycrawler.GooglePlay.ResponseWrapper; import com.google.protobuf.InvalidProtocolBufferException; /** * A (relatively) smart adapter for transforming the various search response * formats into a flat, continuous list. * * @author patrick * */ public class SearchEngineResultPage { private ArrayList<DocV2> items; private String nextPageUrl; private String title; private int type; /** * Type: everything */ public static final int ALL = 0; /** * Type: only append what was searched for */ public static final int SEARCH = 1; /** * Type: only append similar items. This requires an exact match */ public static final int SIMILIAR = 2; /** * Type: only append items of the "other users also..." type. This requires an * exact match. */ public static final int RELATED = 3; /** * * @param type * Either ALL, SEARCH, SIMILAR or RELATED. Only Applies when trying * to add {@link DocumentType#MULTILIST}. */ public SearchEngineResultPage(int type) { this.items = new ArrayList<DocV2>(); this.nextPageUrl = null; this.type = type; } /** * Try to make sense of a {@link ResponseWrapper}, containing a search result. * * @param rw * a wrapper containing either a {@link SearchResponse}, * {@link ListResponse} or a {@link PreFetch} */ public void append(ResponseWrapper rw) { // The SearchResponse format changed considerably over time. The message // type seems to have gotten deprecated for Android 5 and later in favor of // ListResponse. Apparently, SearchResponse got too too unwieldy. append(Unwrap.searchResponse(rw).getDocList()); append(Unwrap.listResponse(rw).getDocList()); for (PreFetch pf : rw.getPreFetchList()) { try { append(ResponseWrapper.parseFrom(pf.getResponse())); } catch (InvalidProtocolBufferException e) { // We tried, we failed. } } } private void append(List<DocV2> list) { for (DocV2 doc : list) { append(doc); } } /** * Grow the SERP * * @param doc * a document of type {@link DocumentType#PRODUCTLIST} or a document * containing a {@link DocumentType#PRODUCTLIST}. */ public void append(DocV2 doc) { switch (doc.getDocType()) { case 46: { for (DocV2 child : doc.getChildList()) { if (accept(child)) { append(child); } } break; } case 45: { for (DocV2 d:doc.getChildList()) { if (d.getDocType()==1) { items.add(d); } } nextPageUrl = null; if (doc.hasContainerMetadata()) { nextPageUrl = doc.getContainerMetadata().getNextPageUrl(); } if (title == null && doc.hasTitle()) { title = doc.getTitle(); } break; } default: { for (DocV2 child : doc.getChildList()) { append(child); } break; } } } private boolean accept(DocV2 doc) { String dbid = doc.getBackendDocid(); switch (type) { case ALL: { return true; } case SEARCH: { return (dbid != null && dbid.matches(".*search.*")); } case SIMILIAR: { return (dbid != null && dbid.matches("similar_apps")); } case RELATED: { return (dbid != null && dbid .matches("pre_install_users_also_installed")); } default: { return false; } } } /** * Get the entry list. * * @return a flat list. */ public List<DocV2> getContent() { return items; } /** * Get the title of this page (if any). * * @return null or the title of the first appended doc. */ public String getTitle() { return title; } /** * Check if results are available. * * @return null if there are no more search results to load. */ public String getNextPageUrl() { return nextPageUrl; } public String toString() { StringBuilder ret = new StringBuilder(); if (title != null) { ret.append('['); ret.append(title); ret.append("]\n"); } for (DocV2 item : items) { ret.append(item.getDocid()); ret.append(", "); ret.append("\""); ret.append(item.getTitle()); ret.append("\"\n"); } if (nextPageUrl != null) { ret.append("-> "); ret.append(nextPageUrl); ret.append('\n'); } return ret.toString(); } }
{ "pile_set_name": "Github" }
/* * Vortex OpenSplice * * This software and documentation are Copyright 2006 to TO_YEAR ADLINK * Technology Limited, its affiliated companies and licensors. All rights * reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef DDS_BUFFERREGISTRY_H #define DDS_BUFFERREGISTRY_H #include "dds_dcps.h" #include "dds_dcps_private.h" typedef struct dds_loanRegistry_s *dds_loanRegistry_t; dds_loanRegistry_t dds_loanRegistry_new ( DDS_TypeSupport typeSupport); void dds_loanRegistry_free ( dds_loanRegistry_t _this); int dds_loanRegistry_register ( dds_loanRegistry_t _this, void **buffer, uint32_t length); int dds_loanRegistry_deregister ( dds_loanRegistry_t _this, void **buffer, uint32_t length); uint32_t dds_loanRegistry_typeSize ( dds_loanRegistry_t _this); DDS_TypeSupportCopyInfo dds_loanRegistry_copyInfo ( dds_loanRegistry_t _this); #endif
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE ldml SYSTEM "../../common/dtd/ldml.dtd"> <!-- Copyright ยฉ 1991-2020 Unicode, Inc. For terms of use, see http://www.unicode.org/copyright.html Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. CLDR data files are interpreted according to the LDML specification (http://unicode.org/reports/tr35/) --> <ldml> <identity> <version number="$Revision$"/> <language type="da"/> <territory type="DK"/> </identity> </ldml>
{ "pile_set_name": "Github" }
package org.eternity.theater.step03; import java.util.ArrayList; import java.util.Arrays; import java.util.List; public class TicketOffice { private Long amount; private List<Ticket> tickets = new ArrayList<>(); public TicketOffice(Long amount, Ticket... tickets) { this.amount = amount; this.tickets.addAll(Arrays.asList(tickets)); } public Ticket getTicket() { return tickets.remove(0); } public void minusAmount(Long amount) { this.amount -= amount; } public void plusAmount(Long sell) { this.amount += amount; } }
{ "pile_set_name": "Github" }
/* * EFI application tables support * * Copyright (c) 2016 Alexander Graf * * SPDX-License-Identifier: GPL-2.0+ */ #include <common.h> #include <efi_loader.h> #include <inttypes.h> #include <smbios.h> static const efi_guid_t smbios_guid = SMBIOS_TABLE_GUID; void efi_smbios_register(void) { /* Map within the low 32 bits, to allow for 32bit SMBIOS tables */ uint64_t dmi = 0xffffffff; /* Reserve 4kb for SMBIOS */ uint64_t pages = 1; int memtype = EFI_RUNTIME_SERVICES_DATA; if (efi_allocate_pages(1, memtype, pages, &dmi) != EFI_SUCCESS) return; /* Generate SMBIOS tables */ write_smbios_table(dmi); /* And expose them to our EFI payload */ efi_install_configuration_table(&smbios_guid, (void*)(uintptr_t)dmi); }
{ "pile_set_name": "Github" }
## N ``` N(expr) ``` > gives the numerical value of `expr`. ``` N(expr, precision) ``` > evaluates `expr` numerically with a precision of `prec` digits. **Note**: the upper case identifier `N` is different from the lower case identifier `n`. ### Examples ``` >> N(Pi) 3.141592653589793 >> N(Pi, 50) 3.1415926535897932384626433832795028841971693993751 >> N(1/7) 0.14285714285714285 >> N(1/7, 5) 1.4285714285714285714e-1 ``` ### Related terms [EvalF](EvalF.md)
{ "pile_set_name": "Github" }
import { Request, Response } from 'express'; import { getConnection, In } from 'typeorm'; import { api } from '../../../barrels/api'; import { entities } from '../../../barrels/entities'; import { enums } from '../../../barrels/enums'; import { helper } from '../../../barrels/helper'; import { interfaces } from '../../../barrels/interfaces'; import { proc } from '../../../barrels/proc'; import { sender } from '../../../barrels/sender'; import { store } from '../../../barrels/store'; import { validator } from '../../../barrels/validator'; import { wrapper } from '../../../barrels/wrapper'; import { ServerError } from '../../server-error'; export async function runQueriesDry(req: Request, res: Response) { let initId = validator.getRequestInfoInitId(req); let payload: api.RunQueriesDryRequestBody['payload'] = validator.getPayload( req ); let queryIds = payload.query_ids; let dryId = payload.dry_id; let storeQueries = store.getQueriesRepo(); let queries = <entities.QueryEntity[]>await storeQueries .find({ query_id: In(queryIds) }) .catch(e => helper.reThrow(e, enums.storeErrorsEnum.STORE_QUERIES_FIND)); let projectId = queries[0].project_id; let storeProjects = store.getProjectsRepo(); let project = <entities.ProjectEntity>await storeProjects .findOne({ project_id: projectId }) .catch(e => helper.reThrow(e, enums.storeErrorsEnum.STORE_PROJECTS_FIND_ONE) ); if (!project) { throw new ServerError({ name: enums.otherErrorsEnum.PROJECT_NOT_FOUND }); } let newLastRunDryTs = Number(helper.makeTs()); // number (not to save in db) let results = <interfaces.ItemRunQueryDry[]>await Promise.all( queries.map( async query => <Promise<interfaces.ItemRunQueryDry>>proc .runQueryDryBigquery({ query: query, new_last_run_dry_ts: newLastRunDryTs, credentials_file_path: project.bigquery_credentials_file_path, bigquery_project: project.bigquery_project }) .catch(e => helper.reThrow(e, enums.procErrorsEnum.PROC_RUN_QUERY_DRY) ) ) ).catch(e => helper.reThrow(e, enums.storeErrorsEnum.STORE_PROJECTS_FIND_ONE) ); let validEstimates = results .filter(result => !!result.valid_estimate) .map(x => x.valid_estimate); let errorQueries = results .filter(result => !!result.error_query) .map(x => x.error_query); // update server_ts let newServerTs = helper.makeTs(); errorQueries = helper.refreshServerTs(errorQueries, newServerTs); // save to database let connection = getConnection(); await connection .transaction(async manager => { await store .save({ manager: manager, records: { queries: errorQueries }, server_ts: newServerTs, source_init_id: initId }) .catch(e => helper.reThrow(e, enums.storeErrorsEnum.STORE_PROJECTS_FIND_ONE) ); }) .catch(e => helper.reThrow(e, enums.storeErrorsEnum.STORE_PROJECTS_FIND_ONE) ); // response let responsePayload: api.RunQueriesDryResponse200Body['payload'] = { dry_id: dryId, valid_estimates: validEstimates, error_queries: errorQueries.map(query => wrapper.wrapToApiQuery(query)) }; sender.sendClientResponse(req, res, responsePayload); }
{ "pile_set_name": "Github" }
// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto_test import ( "bytes" "encoding/json" "errors" "fmt" "math" "math/rand" "reflect" "runtime/debug" "strings" "testing" "time" . "github.com/golang/protobuf/proto" . "github.com/golang/protobuf/proto/testdata" ) var globalO *Buffer func old() *Buffer { if globalO == nil { globalO = NewBuffer(nil) } globalO.Reset() return globalO } func equalbytes(b1, b2 []byte, t *testing.T) { if len(b1) != len(b2) { t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) return } for i := 0; i < len(b1); i++ { if b1[i] != b2[i] { t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) } } } func initGoTestField() *GoTestField { f := new(GoTestField) f.Label = String("label") f.Type = String("type") return f } // These are all structurally equivalent but the tag numbers differ. // (It's remarkable that required, optional, and repeated all have // 8 letters.) func initGoTest_RequiredGroup() *GoTest_RequiredGroup { return &GoTest_RequiredGroup{ RequiredField: String("required"), } } func initGoTest_OptionalGroup() *GoTest_OptionalGroup { return &GoTest_OptionalGroup{ RequiredField: String("optional"), } } func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { return &GoTest_RepeatedGroup{ RequiredField: String("repeated"), } } func initGoTest(setdefaults bool) *GoTest { pb := new(GoTest) if setdefaults { pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) } pb.Kind = GoTest_TIME.Enum() pb.RequiredField = initGoTestField() pb.F_BoolRequired = Bool(true) pb.F_Int32Required = Int32(3) pb.F_Int64Required = Int64(6) pb.F_Fixed32Required = Uint32(32) pb.F_Fixed64Required = Uint64(64) pb.F_Uint32Required = Uint32(3232) pb.F_Uint64Required = Uint64(6464) pb.F_FloatRequired = Float32(3232) pb.F_DoubleRequired = Float64(6464) pb.F_StringRequired = String("string") pb.F_BytesRequired = []byte("bytes") pb.F_Sint32Required = Int32(-32) pb.F_Sint64Required = Int64(-64) pb.Requiredgroup = initGoTest_RequiredGroup() return pb } func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { data := b.Bytes() ld := len(data) ls := len(s) / 2 fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) // find the interesting spot - n n := ls if ld < ls { n = ld } j := 0 for i := 0; i < n; i++ { bs := hex(s[j])*16 + hex(s[j+1]) j += 2 if data[i] == bs { continue } n = i break } l := n - 10 if l < 0 { l = 0 } h := n + 10 // find the interesting spot - n fmt.Printf("is[%d]:", l) for i := l; i < h; i++ { if i >= ld { fmt.Printf(" --") continue } fmt.Printf(" %.2x", data[i]) } fmt.Printf("\n") fmt.Printf("sb[%d]:", l) for i := l; i < h; i++ { if i >= ls { fmt.Printf(" --") continue } bs := hex(s[j])*16 + hex(s[j+1]) j += 2 fmt.Printf(" %.2x", bs) } fmt.Printf("\n") t.Fail() // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) // Print the output in a partially-decoded format; can // be helpful when updating the test. It produces the output // that is pasted, with minor edits, into the argument to verify(). // data := b.Bytes() // nesting := 0 // for b.Len() > 0 { // start := len(data) - b.Len() // var u uint64 // u, err := DecodeVarint(b) // if err != nil { // fmt.Printf("decode error on varint:", err) // return // } // wire := u & 0x7 // tag := u >> 3 // switch wire { // case WireVarint: // v, err := DecodeVarint(b) // if err != nil { // fmt.Printf("decode error on varint:", err) // return // } // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", // data[start:len(data)-b.Len()], tag, wire, v) // case WireFixed32: // v, err := DecodeFixed32(b) // if err != nil { // fmt.Printf("decode error on fixed32:", err) // return // } // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", // data[start:len(data)-b.Len()], tag, wire, v) // case WireFixed64: // v, err := DecodeFixed64(b) // if err != nil { // fmt.Printf("decode error on fixed64:", err) // return // } // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", // data[start:len(data)-b.Len()], tag, wire, v) // case WireBytes: // nb, err := DecodeVarint(b) // if err != nil { // fmt.Printf("decode error on bytes:", err) // return // } // after_tag := len(data) - b.Len() // str := make([]byte, nb) // _, err = b.Read(str) // if err != nil { // fmt.Printf("decode error on bytes:", err) // return // } // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", // data[start:after_tag], str, tag, wire) // case WireStartGroup: // nesting++ // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", // data[start:len(data)-b.Len()], tag, nesting) // case WireEndGroup: // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", // data[start:len(data)-b.Len()], tag, nesting) // nesting-- // default: // fmt.Printf("unrecognized wire type %d\n", wire) // return // } // } } func hex(c uint8) uint8 { if '0' <= c && c <= '9' { return c - '0' } if 'a' <= c && c <= 'f' { return 10 + c - 'a' } if 'A' <= c && c <= 'F' { return 10 + c - 'A' } return 0 } func equal(b []byte, s string, t *testing.T) bool { if 2*len(b) != len(s) { // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) return false } for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { x := hex(s[j])*16 + hex(s[j+1]) if b[i] != x { // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) return false } } return true } func overify(t *testing.T, pb *GoTest, expected string) { o := old() err := o.Marshal(pb) if err != nil { fmt.Printf("overify marshal-1 err = %v", err) o.DebugPrint("", o.Bytes()) t.Fatalf("expected = %s", expected) } if !equal(o.Bytes(), expected, t) { o.DebugPrint("overify neq 1", o.Bytes()) t.Fatalf("expected = %s", expected) } // Now test Unmarshal by recreating the original buffer. pbd := new(GoTest) err = o.Unmarshal(pbd) if err != nil { t.Fatalf("overify unmarshal err = %v", err) o.DebugPrint("", o.Bytes()) t.Fatalf("string = %s", expected) } o.Reset() err = o.Marshal(pbd) if err != nil { t.Errorf("overify marshal-2 err = %v", err) o.DebugPrint("", o.Bytes()) t.Fatalf("string = %s", expected) } if !equal(o.Bytes(), expected, t) { o.DebugPrint("overify neq 2", o.Bytes()) t.Fatalf("string = %s", expected) } } // Simple tests for numeric encode/decode primitives (varint, etc.) func TestNumericPrimitives(t *testing.T) { for i := uint64(0); i < 1e6; i += 111 { o := old() if o.EncodeVarint(i) != nil { t.Error("EncodeVarint") break } x, e := o.DecodeVarint() if e != nil { t.Fatal("DecodeVarint") } if x != i { t.Fatal("varint decode fail:", i, x) } o = old() if o.EncodeFixed32(i) != nil { t.Fatal("encFixed32") } x, e = o.DecodeFixed32() if e != nil { t.Fatal("decFixed32") } if x != i { t.Fatal("fixed32 decode fail:", i, x) } o = old() if o.EncodeFixed64(i*1234567) != nil { t.Error("encFixed64") break } x, e = o.DecodeFixed64() if e != nil { t.Error("decFixed64") break } if x != i*1234567 { t.Error("fixed64 decode fail:", i*1234567, x) break } o = old() i32 := int32(i - 12345) if o.EncodeZigzag32(uint64(i32)) != nil { t.Fatal("EncodeZigzag32") } x, e = o.DecodeZigzag32() if e != nil { t.Fatal("DecodeZigzag32") } if x != uint64(uint32(i32)) { t.Fatal("zigzag32 decode fail:", i32, x) } o = old() i64 := int64(i - 12345) if o.EncodeZigzag64(uint64(i64)) != nil { t.Fatal("EncodeZigzag64") } x, e = o.DecodeZigzag64() if e != nil { t.Fatal("DecodeZigzag64") } if x != uint64(i64) { t.Fatal("zigzag64 decode fail:", i64, x) } } } // fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. type fakeMarshaler struct { b []byte err error } func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } func (f *fakeMarshaler) ProtoMessage() {} func (f *fakeMarshaler) Reset() {} type msgWithFakeMarshaler struct { M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` } func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } func (m *msgWithFakeMarshaler) ProtoMessage() {} func (m *msgWithFakeMarshaler) Reset() {} // Simple tests for proto messages that implement the Marshaler interface. func TestMarshalerEncoding(t *testing.T) { tests := []struct { name string m Message want []byte errType reflect.Type }{ { name: "Marshaler that fails", m: &fakeMarshaler{ err: errors.New("some marshal err"), b: []byte{5, 6, 7}, }, // Since the Marshal method returned bytes, they should be written to the // buffer. (For efficiency, we assume that Marshal implementations are // always correct w.r.t. RequiredNotSetError and output.) want: []byte{5, 6, 7}, errType: reflect.TypeOf(errors.New("some marshal err")), }, { name: "Marshaler that fails with RequiredNotSetError", m: &msgWithFakeMarshaler{ M: &fakeMarshaler{ err: &RequiredNotSetError{}, b: []byte{5, 6, 7}, }, }, // Since there's an error that can be continued after, // the buffer should be written. want: []byte{ 10, 3, // for &msgWithFakeMarshaler 5, 6, 7, // for &fakeMarshaler }, errType: reflect.TypeOf(&RequiredNotSetError{}), }, { name: "Marshaler that succeeds", m: &fakeMarshaler{ b: []byte{0, 1, 2, 3, 4, 127, 255}, }, want: []byte{0, 1, 2, 3, 4, 127, 255}, }, } for _, test := range tests { b := NewBuffer(nil) err := b.Marshal(test.m) if reflect.TypeOf(err) != test.errType { t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType) } if !reflect.DeepEqual(test.want, b.Bytes()) { t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) } if size := Size(test.m); size != len(b.Bytes()) { t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes())) } m, mErr := Marshal(test.m) if !bytes.Equal(b.Bytes(), m) { t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes()) } if !reflect.DeepEqual(err, mErr) { t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q", test.name, fmt.Sprint(mErr), fmt.Sprint(err)) } } } // Simple tests for bytes func TestBytesPrimitives(t *testing.T) { o := old() bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} if o.EncodeRawBytes(bytes) != nil { t.Error("EncodeRawBytes") } decb, e := o.DecodeRawBytes(false) if e != nil { t.Error("DecodeRawBytes") } equalbytes(bytes, decb, t) } // Simple tests for strings func TestStringPrimitives(t *testing.T) { o := old() s := "now is the time" if o.EncodeStringBytes(s) != nil { t.Error("enc_string") } decs, e := o.DecodeStringBytes() if e != nil { t.Error("dec_string") } if s != decs { t.Error("string encode/decode fail:", s, decs) } } // Do we catch the "required bit not set" case? func TestRequiredBit(t *testing.T) { o := old() pb := new(GoTest) err := o.Marshal(pb) if err == nil { t.Error("did not catch missing required fields") } else if strings.Index(err.Error(), "Kind") < 0 { t.Error("wrong error type:", err) } } // Check that all fields are nil. // Clearly silly, and a residue from a more interesting test with an earlier, // different initialization property, but it once caught a compiler bug so // it lives. func checkInitialized(pb *GoTest, t *testing.T) { if pb.F_BoolDefaulted != nil { t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) } if pb.F_Int32Defaulted != nil { t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) } if pb.F_Int64Defaulted != nil { t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) } if pb.F_Fixed32Defaulted != nil { t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) } if pb.F_Fixed64Defaulted != nil { t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) } if pb.F_Uint32Defaulted != nil { t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) } if pb.F_Uint64Defaulted != nil { t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) } if pb.F_FloatDefaulted != nil { t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) } if pb.F_DoubleDefaulted != nil { t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) } if pb.F_StringDefaulted != nil { t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) } if pb.F_BytesDefaulted != nil { t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) } if pb.F_Sint32Defaulted != nil { t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) } if pb.F_Sint64Defaulted != nil { t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) } } // Does Reset() reset? func TestReset(t *testing.T) { pb := initGoTest(true) // muck with some values pb.F_BoolDefaulted = Bool(false) pb.F_Int32Defaulted = Int32(237) pb.F_Int64Defaulted = Int64(12346) pb.F_Fixed32Defaulted = Uint32(32000) pb.F_Fixed64Defaulted = Uint64(666) pb.F_Uint32Defaulted = Uint32(323232) pb.F_Uint64Defaulted = nil pb.F_FloatDefaulted = nil pb.F_DoubleDefaulted = Float64(0) pb.F_StringDefaulted = String("gotcha") pb.F_BytesDefaulted = []byte("asdfasdf") pb.F_Sint32Defaulted = Int32(123) pb.F_Sint64Defaulted = Int64(789) pb.Reset() checkInitialized(pb, t) } // All required fields set, no defaults provided. func TestEncodeDecode1(t *testing.T) { pb := initGoTest(false) overify(t, pb, "0807"+ // field 1, encoding 0, value 7 "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) "5001"+ // field 10, encoding 0, value 1 "5803"+ // field 11, encoding 0, value 3 "6006"+ // field 12, encoding 0, value 6 "6d20000000"+ // field 13, encoding 5, value 0x20 "714000000000000000"+ // field 14, encoding 1, value 0x40 "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 "8d0100004a45"+ // field 17, encoding 5, value 3232.0 "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" "b304"+ // field 70, encoding 3, start group "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" "b404"+ // field 70, encoding 4, end group "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f") // field 103, encoding 0, 0x7f zigzag64 } // All required fields set, defaults provided. func TestEncodeDecode2(t *testing.T) { pb := initGoTest(true) overify(t, pb, "0807"+ // field 1, encoding 0, value 7 "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) "5001"+ // field 10, encoding 0, value 1 "5803"+ // field 11, encoding 0, value 3 "6006"+ // field 12, encoding 0, value 6 "6d20000000"+ // field 13, encoding 5, value 32 "714000000000000000"+ // field 14, encoding 1, value 64 "78a019"+ // field 15, encoding 0, value 3232 "8001c032"+ // field 16, encoding 0, value 6464 "8d0100004a45"+ // field 17, encoding 5, value 3232.0 "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" "c00201"+ // field 40, encoding 0, value 1 "c80220"+ // field 41, encoding 0, value 32 "d00240"+ // field 42, encoding 0, value 64 "dd0240010000"+ // field 43, encoding 5, value 320 "e1028002000000000000"+ // field 44, encoding 1, value 640 "e8028019"+ // field 45, encoding 0, value 3200 "f0028032"+ // field 46, encoding 0, value 6400 "fd02e0659948"+ // field 47, encoding 5, value 314159.0 "81030000000050971041"+ // field 48, encoding 1, value 271828.0 "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" "b304"+ // start group field 70 level 1 "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" "b404"+ // end group field 70 level 1 "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" "90193f"+ // field 402, encoding 0, value 63 "98197f") // field 403, encoding 0, value 127 } // All default fields set to their default value by hand func TestEncodeDecode3(t *testing.T) { pb := initGoTest(false) pb.F_BoolDefaulted = Bool(true) pb.F_Int32Defaulted = Int32(32) pb.F_Int64Defaulted = Int64(64) pb.F_Fixed32Defaulted = Uint32(320) pb.F_Fixed64Defaulted = Uint64(640) pb.F_Uint32Defaulted = Uint32(3200) pb.F_Uint64Defaulted = Uint64(6400) pb.F_FloatDefaulted = Float32(314159) pb.F_DoubleDefaulted = Float64(271828) pb.F_StringDefaulted = String("hello, \"world!\"\n") pb.F_BytesDefaulted = []byte("Bignose") pb.F_Sint32Defaulted = Int32(-32) pb.F_Sint64Defaulted = Int64(-64) overify(t, pb, "0807"+ // field 1, encoding 0, value 7 "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) "5001"+ // field 10, encoding 0, value 1 "5803"+ // field 11, encoding 0, value 3 "6006"+ // field 12, encoding 0, value 6 "6d20000000"+ // field 13, encoding 5, value 32 "714000000000000000"+ // field 14, encoding 1, value 64 "78a019"+ // field 15, encoding 0, value 3232 "8001c032"+ // field 16, encoding 0, value 6464 "8d0100004a45"+ // field 17, encoding 5, value 3232.0 "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" "c00201"+ // field 40, encoding 0, value 1 "c80220"+ // field 41, encoding 0, value 32 "d00240"+ // field 42, encoding 0, value 64 "dd0240010000"+ // field 43, encoding 5, value 320 "e1028002000000000000"+ // field 44, encoding 1, value 640 "e8028019"+ // field 45, encoding 0, value 3200 "f0028032"+ // field 46, encoding 0, value 6400 "fd02e0659948"+ // field 47, encoding 5, value 314159.0 "81030000000050971041"+ // field 48, encoding 1, value 271828.0 "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" "b304"+ // start group field 70 level 1 "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" "b404"+ // end group field 70 level 1 "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" "90193f"+ // field 402, encoding 0, value 63 "98197f") // field 403, encoding 0, value 127 } // All required fields set, defaults provided, all non-defaulted optional fields have values. func TestEncodeDecode4(t *testing.T) { pb := initGoTest(true) pb.Table = String("hello") pb.Param = Int32(7) pb.OptionalField = initGoTestField() pb.F_BoolOptional = Bool(true) pb.F_Int32Optional = Int32(32) pb.F_Int64Optional = Int64(64) pb.F_Fixed32Optional = Uint32(3232) pb.F_Fixed64Optional = Uint64(6464) pb.F_Uint32Optional = Uint32(323232) pb.F_Uint64Optional = Uint64(646464) pb.F_FloatOptional = Float32(32.) pb.F_DoubleOptional = Float64(64.) pb.F_StringOptional = String("hello") pb.F_BytesOptional = []byte("Bignose") pb.F_Sint32Optional = Int32(-32) pb.F_Sint64Optional = Int64(-64) pb.Optionalgroup = initGoTest_OptionalGroup() overify(t, pb, "0807"+ // field 1, encoding 0, value 7 "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" "1807"+ // field 3, encoding 0, value 7 "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) "5001"+ // field 10, encoding 0, value 1 "5803"+ // field 11, encoding 0, value 3 "6006"+ // field 12, encoding 0, value 6 "6d20000000"+ // field 13, encoding 5, value 32 "714000000000000000"+ // field 14, encoding 1, value 64 "78a019"+ // field 15, encoding 0, value 3232 "8001c032"+ // field 16, encoding 0, value 6464 "8d0100004a45"+ // field 17, encoding 5, value 3232.0 "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" "f00101"+ // field 30, encoding 0, value 1 "f80120"+ // field 31, encoding 0, value 32 "800240"+ // field 32, encoding 0, value 64 "8d02a00c0000"+ // field 33, encoding 5, value 3232 "91024019000000000000"+ // field 34, encoding 1, value 6464 "9802a0dd13"+ // field 35, encoding 0, value 323232 "a002c0ba27"+ // field 36, encoding 0, value 646464 "ad0200000042"+ // field 37, encoding 5, value 32.0 "b1020000000000005040"+ // field 38, encoding 1, value 64.0 "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" "c00201"+ // field 40, encoding 0, value 1 "c80220"+ // field 41, encoding 0, value 32 "d00240"+ // field 42, encoding 0, value 64 "dd0240010000"+ // field 43, encoding 5, value 320 "e1028002000000000000"+ // field 44, encoding 1, value 640 "e8028019"+ // field 45, encoding 0, value 3200 "f0028032"+ // field 46, encoding 0, value 6400 "fd02e0659948"+ // field 47, encoding 5, value 314159.0 "81030000000050971041"+ // field 48, encoding 1, value 271828.0 "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" "b304"+ // start group field 70 level 1 "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" "b404"+ // end group field 70 level 1 "d305"+ // start group field 90 level 1 "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" "d405"+ // end group field 90 level 1 "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" "f0123f"+ // field 302, encoding 0, value 63 "f8127f"+ // field 303, encoding 0, value 127 "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" "90193f"+ // field 402, encoding 0, value 63 "98197f") // field 403, encoding 0, value 127 } // All required fields set, defaults provided, all repeated fields given two values. func TestEncodeDecode5(t *testing.T) { pb := initGoTest(true) pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} pb.F_BoolRepeated = []bool{false, true} pb.F_Int32Repeated = []int32{32, 33} pb.F_Int64Repeated = []int64{64, 65} pb.F_Fixed32Repeated = []uint32{3232, 3333} pb.F_Fixed64Repeated = []uint64{6464, 6565} pb.F_Uint32Repeated = []uint32{323232, 333333} pb.F_Uint64Repeated = []uint64{646464, 656565} pb.F_FloatRepeated = []float32{32., 33.} pb.F_DoubleRepeated = []float64{64., 65.} pb.F_StringRepeated = []string{"hello", "sailor"} pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} pb.F_Sint32Repeated = []int32{32, -32} pb.F_Sint64Repeated = []int64{64, -64} pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} overify(t, pb, "0807"+ // field 1, encoding 0, value 7 "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) "5001"+ // field 10, encoding 0, value 1 "5803"+ // field 11, encoding 0, value 3 "6006"+ // field 12, encoding 0, value 6 "6d20000000"+ // field 13, encoding 5, value 32 "714000000000000000"+ // field 14, encoding 1, value 64 "78a019"+ // field 15, encoding 0, value 3232 "8001c032"+ // field 16, encoding 0, value 6464 "8d0100004a45"+ // field 17, encoding 5, value 3232.0 "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" "a00100"+ // field 20, encoding 0, value 0 "a00101"+ // field 20, encoding 0, value 1 "a80120"+ // field 21, encoding 0, value 32 "a80121"+ // field 21, encoding 0, value 33 "b00140"+ // field 22, encoding 0, value 64 "b00141"+ // field 22, encoding 0, value 65 "bd01a00c0000"+ // field 23, encoding 5, value 3232 "bd01050d0000"+ // field 23, encoding 5, value 3333 "c1014019000000000000"+ // field 24, encoding 1, value 6464 "c101a519000000000000"+ // field 24, encoding 1, value 6565 "c801a0dd13"+ // field 25, encoding 0, value 323232 "c80195ac14"+ // field 25, encoding 0, value 333333 "d001c0ba27"+ // field 26, encoding 0, value 646464 "d001b58928"+ // field 26, encoding 0, value 656565 "dd0100000042"+ // field 27, encoding 5, value 32.0 "dd0100000442"+ // field 27, encoding 5, value 33.0 "e1010000000000005040"+ // field 28, encoding 1, value 64.0 "e1010000000000405040"+ // field 28, encoding 1, value 65.0 "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" "c00201"+ // field 40, encoding 0, value 1 "c80220"+ // field 41, encoding 0, value 32 "d00240"+ // field 42, encoding 0, value 64 "dd0240010000"+ // field 43, encoding 5, value 320 "e1028002000000000000"+ // field 44, encoding 1, value 640 "e8028019"+ // field 45, encoding 0, value 3200 "f0028032"+ // field 46, encoding 0, value 6400 "fd02e0659948"+ // field 47, encoding 5, value 314159.0 "81030000000050971041"+ // field 48, encoding 1, value 271828.0 "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" "b304"+ // start group field 70 level 1 "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" "b404"+ // end group field 70 level 1 "8305"+ // start group field 80 level 1 "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" "8405"+ // end group field 80 level 1 "8305"+ // start group field 80 level 1 "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" "8405"+ // end group field 80 level 1 "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 "ca0c03"+"626967"+ // field 201, encoding 2, string "big" "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" "d00c40"+ // field 202, encoding 0, value 32 "d00c3f"+ // field 202, encoding 0, value -32 "d80c8001"+ // field 203, encoding 0, value 64 "d80c7f"+ // field 203, encoding 0, value -64 "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" "90193f"+ // field 402, encoding 0, value 63 "98197f") // field 403, encoding 0, value 127 } // All required fields set, all packed repeated fields given two values. func TestEncodeDecode6(t *testing.T) { pb := initGoTest(false) pb.F_BoolRepeatedPacked = []bool{false, true} pb.F_Int32RepeatedPacked = []int32{32, 33} pb.F_Int64RepeatedPacked = []int64{64, 65} pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} pb.F_FloatRepeatedPacked = []float32{32., 33.} pb.F_DoubleRepeatedPacked = []float64{64., 65.} pb.F_Sint32RepeatedPacked = []int32{32, -32} pb.F_Sint64RepeatedPacked = []int64{64, -64} overify(t, pb, "0807"+ // field 1, encoding 0, value 7 "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) "5001"+ // field 10, encoding 0, value 1 "5803"+ // field 11, encoding 0, value 3 "6006"+ // field 12, encoding 0, value 6 "6d20000000"+ // field 13, encoding 5, value 32 "714000000000000000"+ // field 14, encoding 1, value 64 "78a019"+ // field 15, encoding 0, value 3232 "8001c032"+ // field 16, encoding 0, value 6464 "8d0100004a45"+ // field 17, encoding 5, value 3232.0 "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 "aa0308"+ // field 53, encoding 2, 8 bytes "a00c0000050d0000"+ // value 3232, value 3333 "b20310"+ // field 54, encoding 2, 16 bytes "4019000000000000a519000000000000"+ // value 6464, value 6565 "ba0306"+ // field 55, encoding 2, 6 bytes "a0dd1395ac14"+ // value 323232, value 333333 "c20306"+ // field 56, encoding 2, 6 bytes "c0ba27b58928"+ // value 646464, value 656565 "ca0308"+ // field 57, encoding 2, 8 bytes "0000004200000442"+ // value 32.0, value 33.0 "d20310"+ // field 58, encoding 2, 16 bytes "00000000000050400000000000405040"+ // value 64.0, value 65.0 "b304"+ // start group field 70 level 1 "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" "b404"+ // end group field 70 level 1 "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 "b21f02"+ // field 502, encoding 2, 2 bytes "403f"+ // value 32, value -32 "ba1f03"+ // field 503, encoding 2, 3 bytes "80017f") // value 64, value -64 } // Test that we can encode empty bytes fields. func TestEncodeDecodeBytes1(t *testing.T) { pb := initGoTest(false) // Create our bytes pb.F_BytesRequired = []byte{} pb.F_BytesRepeated = [][]byte{{}} pb.F_BytesOptional = []byte{} d, err := Marshal(pb) if err != nil { t.Error(err) } pbd := new(GoTest) if err := Unmarshal(d, pbd); err != nil { t.Error(err) } if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { t.Error("required empty bytes field is incorrect") } if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { t.Error("repeated empty bytes field is incorrect") } if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { t.Error("optional empty bytes field is incorrect") } } // Test that we encode nil-valued fields of a repeated bytes field correctly. // Since entries in a repeated field cannot be nil, nil must mean empty value. func TestEncodeDecodeBytes2(t *testing.T) { pb := initGoTest(false) // Create our bytes pb.F_BytesRepeated = [][]byte{nil} d, err := Marshal(pb) if err != nil { t.Error(err) } pbd := new(GoTest) if err := Unmarshal(d, pbd); err != nil { t.Error(err) } if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { t.Error("Unexpected value for repeated bytes field") } } // All required fields set, defaults provided, all repeated fields given two values. func TestSkippingUnrecognizedFields(t *testing.T) { o := old() pb := initGoTestField() // Marshal it normally. o.Marshal(pb) // Now new a GoSkipTest record. skip := &GoSkipTest{ SkipInt32: Int32(32), SkipFixed32: Uint32(3232), SkipFixed64: Uint64(6464), SkipString: String("skipper"), Skipgroup: &GoSkipTest_SkipGroup{ GroupInt32: Int32(75), GroupString: String("wxyz"), }, } // Marshal it into same buffer. o.Marshal(skip) pbd := new(GoTestField) o.Unmarshal(pbd) // The __unrecognized field should be a marshaling of GoSkipTest skipd := new(GoSkipTest) o.SetBuf(pbd.XXX_unrecognized) o.Unmarshal(skipd) if *skipd.SkipInt32 != *skip.SkipInt32 { t.Error("skip int32", skipd.SkipInt32) } if *skipd.SkipFixed32 != *skip.SkipFixed32 { t.Error("skip fixed32", skipd.SkipFixed32) } if *skipd.SkipFixed64 != *skip.SkipFixed64 { t.Error("skip fixed64", skipd.SkipFixed64) } if *skipd.SkipString != *skip.SkipString { t.Error("skip string", *skipd.SkipString) } if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { t.Error("skip group int32", skipd.Skipgroup.GroupInt32) } if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { t.Error("skip group string", *skipd.Skipgroup.GroupString) } } // Check that unrecognized fields of a submessage are preserved. func TestSubmessageUnrecognizedFields(t *testing.T) { nm := &NewMessage{ Nested: &NewMessage_Nested{ Name: String("Nigel"), FoodGroup: String("carbs"), }, } b, err := Marshal(nm) if err != nil { t.Fatalf("Marshal of NewMessage: %v", err) } // Unmarshal into an OldMessage. om := new(OldMessage) if err := Unmarshal(b, om); err != nil { t.Fatalf("Unmarshal to OldMessage: %v", err) } exp := &OldMessage{ Nested: &OldMessage_Nested{ Name: String("Nigel"), // normal protocol buffer users should not do this XXX_unrecognized: []byte("\x12\x05carbs"), }, } if !Equal(om, exp) { t.Errorf("om = %v, want %v", om, exp) } // Clone the OldMessage. om = Clone(om).(*OldMessage) if !Equal(om, exp) { t.Errorf("Clone(om) = %v, want %v", om, exp) } // Marshal the OldMessage, then unmarshal it into an empty NewMessage. if b, err = Marshal(om); err != nil { t.Fatalf("Marshal of OldMessage: %v", err) } t.Logf("Marshal(%v) -> %q", om, b) nm2 := new(NewMessage) if err := Unmarshal(b, nm2); err != nil { t.Fatalf("Unmarshal to NewMessage: %v", err) } if !Equal(nm, nm2) { t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) } } // Check that an int32 field can be upgraded to an int64 field. func TestNegativeInt32(t *testing.T) { om := &OldMessage{ Num: Int32(-1), } b, err := Marshal(om) if err != nil { t.Fatalf("Marshal of OldMessage: %v", err) } // Check the size. It should be 11 bytes; // 1 for the field/wire type, and 10 for the negative number. if len(b) != 11 { t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) } // Unmarshal into a NewMessage. nm := new(NewMessage) if err := Unmarshal(b, nm); err != nil { t.Fatalf("Unmarshal to NewMessage: %v", err) } want := &NewMessage{ Num: Int64(-1), } if !Equal(nm, want) { t.Errorf("nm = %v, want %v", nm, want) } } // Check that we can grow an array (repeated field) to have many elements. // This test doesn't depend only on our encoding; for variety, it makes sure // we create, encode, and decode the correct contents explicitly. It's therefore // a bit messier. // This test also uses (and hence tests) the Marshal/Unmarshal functions // instead of the methods. func TestBigRepeated(t *testing.T) { pb := initGoTest(true) // Create the arrays const N = 50 // Internally the library starts much smaller. pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) pb.F_Sint64Repeated = make([]int64, N) pb.F_Sint32Repeated = make([]int32, N) pb.F_BytesRepeated = make([][]byte, N) pb.F_StringRepeated = make([]string, N) pb.F_DoubleRepeated = make([]float64, N) pb.F_FloatRepeated = make([]float32, N) pb.F_Uint64Repeated = make([]uint64, N) pb.F_Uint32Repeated = make([]uint32, N) pb.F_Fixed64Repeated = make([]uint64, N) pb.F_Fixed32Repeated = make([]uint32, N) pb.F_Int64Repeated = make([]int64, N) pb.F_Int32Repeated = make([]int32, N) pb.F_BoolRepeated = make([]bool, N) pb.RepeatedField = make([]*GoTestField, N) // Fill in the arrays with checkable values. igtf := initGoTestField() igtrg := initGoTest_RepeatedGroup() for i := 0; i < N; i++ { pb.Repeatedgroup[i] = igtrg pb.F_Sint64Repeated[i] = int64(i) pb.F_Sint32Repeated[i] = int32(i) s := fmt.Sprint(i) pb.F_BytesRepeated[i] = []byte(s) pb.F_StringRepeated[i] = s pb.F_DoubleRepeated[i] = float64(i) pb.F_FloatRepeated[i] = float32(i) pb.F_Uint64Repeated[i] = uint64(i) pb.F_Uint32Repeated[i] = uint32(i) pb.F_Fixed64Repeated[i] = uint64(i) pb.F_Fixed32Repeated[i] = uint32(i) pb.F_Int64Repeated[i] = int64(i) pb.F_Int32Repeated[i] = int32(i) pb.F_BoolRepeated[i] = i%2 == 0 pb.RepeatedField[i] = igtf } // Marshal. buf, _ := Marshal(pb) // Now test Unmarshal by recreating the original buffer. pbd := new(GoTest) Unmarshal(buf, pbd) // Check the checkable values for i := uint64(0); i < N; i++ { if pbd.Repeatedgroup[i] == nil { // TODO: more checking? t.Error("pbd.Repeatedgroup bad") } var x uint64 x = uint64(pbd.F_Sint64Repeated[i]) if x != i { t.Error("pbd.F_Sint64Repeated bad", x, i) } x = uint64(pbd.F_Sint32Repeated[i]) if x != i { t.Error("pbd.F_Sint32Repeated bad", x, i) } s := fmt.Sprint(i) equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) if pbd.F_StringRepeated[i] != s { t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) } x = uint64(pbd.F_DoubleRepeated[i]) if x != i { t.Error("pbd.F_DoubleRepeated bad", x, i) } x = uint64(pbd.F_FloatRepeated[i]) if x != i { t.Error("pbd.F_FloatRepeated bad", x, i) } x = pbd.F_Uint64Repeated[i] if x != i { t.Error("pbd.F_Uint64Repeated bad", x, i) } x = uint64(pbd.F_Uint32Repeated[i]) if x != i { t.Error("pbd.F_Uint32Repeated bad", x, i) } x = pbd.F_Fixed64Repeated[i] if x != i { t.Error("pbd.F_Fixed64Repeated bad", x, i) } x = uint64(pbd.F_Fixed32Repeated[i]) if x != i { t.Error("pbd.F_Fixed32Repeated bad", x, i) } x = uint64(pbd.F_Int64Repeated[i]) if x != i { t.Error("pbd.F_Int64Repeated bad", x, i) } x = uint64(pbd.F_Int32Repeated[i]) if x != i { t.Error("pbd.F_Int32Repeated bad", x, i) } if pbd.F_BoolRepeated[i] != (i%2 == 0) { t.Error("pbd.F_BoolRepeated bad", x, i) } if pbd.RepeatedField[i] == nil { // TODO: more checking? t.Error("pbd.RepeatedField bad") } } } // Verify we give a useful message when decoding to the wrong structure type. func TestTypeMismatch(t *testing.T) { pb1 := initGoTest(true) // Marshal o := old() o.Marshal(pb1) // Now Unmarshal it to the wrong type. pb2 := initGoTestField() err := o.Unmarshal(pb2) if err == nil { t.Error("expected error, got no error") } else if !strings.Contains(err.Error(), "bad wiretype") { t.Error("expected bad wiretype error, got", err) } } func encodeDecode(t *testing.T, in, out Message, msg string) { buf, err := Marshal(in) if err != nil { t.Fatalf("failed marshaling %v: %v", msg, err) } if err := Unmarshal(buf, out); err != nil { t.Fatalf("failed unmarshaling %v: %v", msg, err) } } func TestPackedNonPackedDecoderSwitching(t *testing.T) { np, p := new(NonPackedTest), new(PackedTest) // non-packed -> packed np.A = []int32{0, 1, 1, 2, 3, 5} encodeDecode(t, np, p, "non-packed -> packed") if !reflect.DeepEqual(np.A, p.B) { t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) } // packed -> non-packed np.Reset() p.B = []int32{3, 1, 4, 1, 5, 9} encodeDecode(t, p, np, "packed -> non-packed") if !reflect.DeepEqual(p.B, np.A) { t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) } } func TestProto1RepeatedGroup(t *testing.T) { pb := &MessageList{ Message: []*MessageList_Message{ { Name: String("blah"), Count: Int32(7), }, // NOTE: pb.Message[1] is a nil nil, }, } o := old() err := o.Marshal(pb) if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { t.Fatalf("unexpected or no error when marshaling: %v", err) } } // Test that enums work. Checks for a bug introduced by making enums // named types instead of int32: newInt32FromUint64 would crash with // a type mismatch in reflect.PointTo. func TestEnum(t *testing.T) { pb := new(GoEnum) pb.Foo = FOO_FOO1.Enum() o := old() if err := o.Marshal(pb); err != nil { t.Fatal("error encoding enum:", err) } pb1 := new(GoEnum) if err := o.Unmarshal(pb1); err != nil { t.Fatal("error decoding enum:", err) } if *pb1.Foo != FOO_FOO1 { t.Error("expected 7 but got ", *pb1.Foo) } } // Enum types have String methods. Check that enum fields can be printed. // We don't care what the value actually is, just as long as it doesn't crash. func TestPrintingNilEnumFields(t *testing.T) { pb := new(GoEnum) _ = fmt.Sprintf("%+v", pb) } // Verify that absent required fields cause Marshal/Unmarshal to return errors. func TestRequiredFieldEnforcement(t *testing.T) { pb := new(GoTestField) _, err := Marshal(pb) if err == nil { t.Error("marshal: expected error, got nil") } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") { t.Errorf("marshal: bad error type: %v", err) } // A slightly sneaky, yet valid, proto. It encodes the same required field twice, // so simply counting the required fields is insufficient. // field 1, encoding 2, value "hi" buf := []byte("\x0A\x02hi\x0A\x02hi") err = Unmarshal(buf, pb) if err == nil { t.Error("unmarshal: expected error, got nil") } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") { t.Errorf("unmarshal: bad error type: %v", err) } } // Verify that absent required fields in groups cause Marshal/Unmarshal to return errors. func TestRequiredFieldEnforcementGroups(t *testing.T) { pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}} if _, err := Marshal(pb); err == nil { t.Error("marshal: expected error, got nil") } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") { t.Errorf("marshal: bad error type: %v", err) } buf := []byte{11, 12} if err := Unmarshal(buf, pb); err == nil { t.Error("unmarshal: expected error, got nil") } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") { t.Errorf("unmarshal: bad error type: %v", err) } } func TestTypedNilMarshal(t *testing.T) { // A typed nil should return ErrNil and not crash. { var m *GoEnum if _, err := Marshal(m); err != ErrNil { t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err) } } { m := &Communique{Union: &Communique_Msg{nil}} if _, err := Marshal(m); err == nil || err == ErrNil { t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err) } } } // A type that implements the Marshaler interface, but is not nillable. type nonNillableInt uint64 func (nni nonNillableInt) Marshal() ([]byte, error) { return EncodeVarint(uint64(nni)), nil } type NNIMessage struct { nni nonNillableInt } func (*NNIMessage) Reset() {} func (*NNIMessage) String() string { return "" } func (*NNIMessage) ProtoMessage() {} // A type that implements the Marshaler interface and is nillable. type nillableMessage struct { x uint64 } func (nm *nillableMessage) Marshal() ([]byte, error) { return EncodeVarint(nm.x), nil } type NMMessage struct { nm *nillableMessage } func (*NMMessage) Reset() {} func (*NMMessage) String() string { return "" } func (*NMMessage) ProtoMessage() {} // Verify a type that uses the Marshaler interface, but has a nil pointer. func TestNilMarshaler(t *testing.T) { // Try a struct with a Marshaler field that is nil. // It should be directly marshable. nmm := new(NMMessage) if _, err := Marshal(nmm); err != nil { t.Error("unexpected error marshaling nmm: ", err) } // Try a struct with a Marshaler field that is not nillable. nnim := new(NNIMessage) nnim.nni = 7 var _ Marshaler = nnim.nni // verify it is truly a Marshaler if _, err := Marshal(nnim); err != nil { t.Error("unexpected error marshaling nnim: ", err) } } func TestAllSetDefaults(t *testing.T) { // Exercise SetDefaults with all scalar field types. m := &Defaults{ // NaN != NaN, so override that here. F_Nan: Float32(1.7), } expected := &Defaults{ F_Bool: Bool(true), F_Int32: Int32(32), F_Int64: Int64(64), F_Fixed32: Uint32(320), F_Fixed64: Uint64(640), F_Uint32: Uint32(3200), F_Uint64: Uint64(6400), F_Float: Float32(314159), F_Double: Float64(271828), F_String: String(`hello, "world!"` + "\n"), F_Bytes: []byte("Bignose"), F_Sint32: Int32(-32), F_Sint64: Int64(-64), F_Enum: Defaults_GREEN.Enum(), F_Pinf: Float32(float32(math.Inf(1))), F_Ninf: Float32(float32(math.Inf(-1))), F_Nan: Float32(1.7), StrZero: String(""), } SetDefaults(m) if !Equal(m, expected) { t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) } } func TestSetDefaultsWithSetField(t *testing.T) { // Check that a set value is not overridden. m := &Defaults{ F_Int32: Int32(12), } SetDefaults(m) if v := m.GetF_Int32(); v != 12 { t.Errorf("m.FInt32 = %v, want 12", v) } } func TestSetDefaultsWithSubMessage(t *testing.T) { m := &OtherMessage{ Key: Int64(123), Inner: &InnerMessage{ Host: String("gopher"), }, } expected := &OtherMessage{ Key: Int64(123), Inner: &InnerMessage{ Host: String("gopher"), Port: Int32(4000), }, } SetDefaults(m) if !Equal(m, expected) { t.Errorf("\n got %v\nwant %v", m, expected) } } func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { m := &MyMessage{ RepInner: []*InnerMessage{{}}, } expected := &MyMessage{ RepInner: []*InnerMessage{{ Port: Int32(4000), }}, } SetDefaults(m) if !Equal(m, expected) { t.Errorf("\n got %v\nwant %v", m, expected) } } func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { m := &MyMessage{ Pet: []string{"turtle", "wombat"}, } expected := Clone(m) SetDefaults(m) if !Equal(m, expected) { t.Errorf("\n got %v\nwant %v", m, expected) } } func TestMaximumTagNumber(t *testing.T) { m := &MaxTag{ LastField: String("natural goat essence"), } buf, err := Marshal(m) if err != nil { t.Fatalf("proto.Marshal failed: %v", err) } m2 := new(MaxTag) if err := Unmarshal(buf, m2); err != nil { t.Fatalf("proto.Unmarshal failed: %v", err) } if got, want := m2.GetLastField(), *m.LastField; got != want { t.Errorf("got %q, want %q", got, want) } } func TestJSON(t *testing.T) { m := &MyMessage{ Count: Int32(4), Pet: []string{"bunny", "kitty"}, Inner: &InnerMessage{ Host: String("cauchy"), }, Bikeshed: MyMessage_GREEN.Enum(), } const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` b, err := json.Marshal(m) if err != nil { t.Fatalf("json.Marshal failed: %v", err) } s := string(b) if s != expected { t.Errorf("got %s\nwant %s", s, expected) } received := new(MyMessage) if err := json.Unmarshal(b, received); err != nil { t.Fatalf("json.Unmarshal failed: %v", err) } if !Equal(received, m) { t.Fatalf("got %s, want %s", received, m) } // Test unmarshalling of JSON with symbolic enum name. const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` received.Reset() if err := json.Unmarshal([]byte(old), received); err != nil { t.Fatalf("json.Unmarshal failed: %v", err) } if !Equal(received, m) { t.Fatalf("got %s, want %s", received, m) } } func TestBadWireType(t *testing.T) { b := []byte{7<<3 | 6} // field 7, wire type 6 pb := new(OtherMessage) if err := Unmarshal(b, pb); err == nil { t.Errorf("Unmarshal did not fail") } else if !strings.Contains(err.Error(), "unknown wire type") { t.Errorf("wrong error: %v", err) } } func TestBytesWithInvalidLength(t *testing.T) { // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} Unmarshal(b, new(MyMessage)) } func TestLengthOverflow(t *testing.T) { // Overflowing a length should not panic. b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} Unmarshal(b, new(MyMessage)) } func TestVarintOverflow(t *testing.T) { // Overflowing a 64-bit length should not be allowed. b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} if err := Unmarshal(b, new(MyMessage)); err == nil { t.Fatalf("Overflowed uint64 length without error") } } func TestUnmarshalFuzz(t *testing.T) { const N = 1000 seed := time.Now().UnixNano() t.Logf("RNG seed is %d", seed) rng := rand.New(rand.NewSource(seed)) buf := make([]byte, 20) for i := 0; i < N; i++ { for j := range buf { buf[j] = byte(rng.Intn(256)) } fuzzUnmarshal(t, buf) } } func TestMergeMessages(t *testing.T) { pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} data, err := Marshal(pb) if err != nil { t.Fatalf("Marshal: %v", err) } pb1 := new(MessageList) if err := Unmarshal(data, pb1); err != nil { t.Fatalf("first Unmarshal: %v", err) } if err := Unmarshal(data, pb1); err != nil { t.Fatalf("second Unmarshal: %v", err) } if len(pb1.Message) != 1 { t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) } pb2 := new(MessageList) if err := UnmarshalMerge(data, pb2); err != nil { t.Fatalf("first UnmarshalMerge: %v", err) } if err := UnmarshalMerge(data, pb2); err != nil { t.Fatalf("second UnmarshalMerge: %v", err) } if len(pb2.Message) != 2 { t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) } } func TestExtensionMarshalOrder(t *testing.T) { m := &MyMessage{Count: Int(123)} if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { t.Fatalf("SetExtension: %v", err) } if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { t.Fatalf("SetExtension: %v", err) } if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { t.Fatalf("SetExtension: %v", err) } // Serialize m several times, and check we get the same bytes each time. var orig []byte for i := 0; i < 100; i++ { b, err := Marshal(m) if err != nil { t.Fatalf("Marshal: %v", err) } if i == 0 { orig = b continue } if !bytes.Equal(b, orig) { t.Errorf("Bytes differ on attempt #%d", i) } } } // Many extensions, because small maps might not iterate differently on each iteration. var exts = []*ExtensionDesc{ E_X201, E_X202, E_X203, E_X204, E_X205, E_X206, E_X207, E_X208, E_X209, E_X210, E_X211, E_X212, E_X213, E_X214, E_X215, E_X216, E_X217, E_X218, E_X219, E_X220, E_X221, E_X222, E_X223, E_X224, E_X225, E_X226, E_X227, E_X228, E_X229, E_X230, E_X231, E_X232, E_X233, E_X234, E_X235, E_X236, E_X237, E_X238, E_X239, E_X240, E_X241, E_X242, E_X243, E_X244, E_X245, E_X246, E_X247, E_X248, E_X249, E_X250, } func TestMessageSetMarshalOrder(t *testing.T) { m := &MyMessageSet{} for _, x := range exts { if err := SetExtension(m, x, &Empty{}); err != nil { t.Fatalf("SetExtension: %v", err) } } buf, err := Marshal(m) if err != nil { t.Fatalf("Marshal: %v", err) } // Serialize m several times, and check we get the same bytes each time. for i := 0; i < 10; i++ { b1, err := Marshal(m) if err != nil { t.Fatalf("Marshal: %v", err) } if !bytes.Equal(b1, buf) { t.Errorf("Bytes differ on re-Marshal #%d", i) } m2 := &MyMessageSet{} if err := Unmarshal(buf, m2); err != nil { t.Errorf("Unmarshal: %v", err) } b2, err := Marshal(m2) if err != nil { t.Errorf("re-Marshal: %v", err) } if !bytes.Equal(b2, buf) { t.Errorf("Bytes differ on round-trip #%d", i) } } } func TestUnmarshalMergesMessages(t *testing.T) { // If a nested message occurs twice in the input, // the fields should be merged when decoding. a := &OtherMessage{ Key: Int64(123), Inner: &InnerMessage{ Host: String("polhode"), Port: Int32(1234), }, } aData, err := Marshal(a) if err != nil { t.Fatalf("Marshal(a): %v", err) } b := &OtherMessage{ Weight: Float32(1.2), Inner: &InnerMessage{ Host: String("herpolhode"), Connected: Bool(true), }, } bData, err := Marshal(b) if err != nil { t.Fatalf("Marshal(b): %v", err) } want := &OtherMessage{ Key: Int64(123), Weight: Float32(1.2), Inner: &InnerMessage{ Host: String("herpolhode"), Port: Int32(1234), Connected: Bool(true), }, } got := new(OtherMessage) if err := Unmarshal(append(aData, bData...), got); err != nil { t.Fatalf("Unmarshal: %v", err) } if !Equal(got, want) { t.Errorf("\n got %v\nwant %v", got, want) } } func TestEncodingSizes(t *testing.T) { tests := []struct { m Message n int }{ {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, } for _, test := range tests { b, err := Marshal(test.m) if err != nil { t.Errorf("Marshal(%v): %v", test.m, err) continue } if len(b) != test.n { t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) } } } func TestRequiredNotSetError(t *testing.T) { pb := initGoTest(false) pb.RequiredField.Label = nil pb.F_Int32Required = nil pb.F_Int64Required = nil expected := "0807" + // field 1, encoding 0, value 7 "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) "5001" + // field 10, encoding 0, value 1 "6d20000000" + // field 13, encoding 5, value 0x20 "714000000000000000" + // field 14, encoding 1, value 0x40 "78a019" + // field 15, encoding 0, value 0xca0 = 3232 "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 "8d0100004a45" + // field 17, encoding 5, value 3232.0 "9101000000000040b940" + // field 18, encoding 1, value 6464.0 "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" "b304" + // field 70, encoding 3, start group "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" "b404" + // field 70, encoding 4, end group "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" "b0063f" + // field 102, encoding 0, 0x3f zigzag32 "b8067f" // field 103, encoding 0, 0x7f zigzag64 o := old() bytes, err := Marshal(pb) if _, ok := err.(*RequiredNotSetError); !ok { fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) o.DebugPrint("", bytes) t.Fatalf("expected = %s", expected) } if strings.Index(err.Error(), "RequiredField.Label") < 0 { t.Errorf("marshal-1 wrong err msg: %v", err) } if !equal(bytes, expected, t) { o.DebugPrint("neq 1", bytes) t.Fatalf("expected = %s", expected) } // Now test Unmarshal by recreating the original buffer. pbd := new(GoTest) err = Unmarshal(bytes, pbd) if _, ok := err.(*RequiredNotSetError); !ok { t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) o.DebugPrint("", bytes) t.Fatalf("string = %s", expected) } if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { t.Errorf("unmarshal wrong err msg: %v", err) } bytes, err = Marshal(pbd) if _, ok := err.(*RequiredNotSetError); !ok { t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) o.DebugPrint("", bytes) t.Fatalf("string = %s", expected) } if strings.Index(err.Error(), "RequiredField.Label") < 0 { t.Errorf("marshal-2 wrong err msg: %v", err) } if !equal(bytes, expected, t) { o.DebugPrint("neq 2", bytes) t.Fatalf("string = %s", expected) } } func fuzzUnmarshal(t *testing.T, data []byte) { defer func() { if e := recover(); e != nil { t.Errorf("These bytes caused a panic: %+v", data) t.Logf("Stack:\n%s", debug.Stack()) t.FailNow() } }() pb := new(MyMessage) Unmarshal(data, pb) } func TestMapFieldMarshal(t *testing.T) { m := &MessageWithMap{ NameMapping: map[int32]string{ 1: "Rob", 4: "Ian", 8: "Dave", }, } b, err := Marshal(m) if err != nil { t.Fatalf("Marshal: %v", err) } // b should be the concatenation of these three byte sequences in some order. parts := []string{ "\n\a\b\x01\x12\x03Rob", "\n\a\b\x04\x12\x03Ian", "\n\b\b\x08\x12\x04Dave", } ok := false for i := range parts { for j := range parts { if j == i { continue } for k := range parts { if k == i || k == j { continue } try := parts[i] + parts[j] + parts[k] if bytes.Equal(b, []byte(try)) { ok = true break } } } } if !ok { t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) } t.Logf("FYI b: %q", b) (new(Buffer)).DebugPrint("Dump of b", b) } func TestMapFieldRoundTrips(t *testing.T) { m := &MessageWithMap{ NameMapping: map[int32]string{ 1: "Rob", 4: "Ian", 8: "Dave", }, MsgMapping: map[int64]*FloatingPoint{ 0x7001: &FloatingPoint{F: Float64(2.0)}, }, ByteMapping: map[bool][]byte{ false: []byte("that's not right!"), true: []byte("aye, 'tis true!"), }, } b, err := Marshal(m) if err != nil { t.Fatalf("Marshal: %v", err) } t.Logf("FYI b: %q", b) m2 := new(MessageWithMap) if err := Unmarshal(b, m2); err != nil { t.Fatalf("Unmarshal: %v", err) } for _, pair := range [][2]interface{}{ {m.NameMapping, m2.NameMapping}, {m.MsgMapping, m2.MsgMapping}, {m.ByteMapping, m2.ByteMapping}, } { if !reflect.DeepEqual(pair[0], pair[1]) { t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) } } } func TestMapFieldWithNil(t *testing.T) { m1 := &MessageWithMap{ MsgMapping: map[int64]*FloatingPoint{ 1: nil, }, } b, err := Marshal(m1) if err != nil { t.Fatalf("Marshal: %v", err) } m2 := new(MessageWithMap) if err := Unmarshal(b, m2); err != nil { t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) } if v, ok := m2.MsgMapping[1]; !ok { t.Error("msg_mapping[1] not present") } else if v != nil { t.Errorf("msg_mapping[1] not nil: %v", v) } } func TestMapFieldWithNilBytes(t *testing.T) { m1 := &MessageWithMap{ ByteMapping: map[bool][]byte{ false: []byte{}, true: nil, }, } n := Size(m1) b, err := Marshal(m1) if err != nil { t.Fatalf("Marshal: %v", err) } if n != len(b) { t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b)) } m2 := new(MessageWithMap) if err := Unmarshal(b, m2); err != nil { t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) } if v, ok := m2.ByteMapping[false]; !ok { t.Error("byte_mapping[false] not present") } else if len(v) != 0 { t.Errorf("byte_mapping[false] not empty: %#v", v) } if v, ok := m2.ByteMapping[true]; !ok { t.Error("byte_mapping[true] not present") } else if len(v) != 0 { t.Errorf("byte_mapping[true] not empty: %#v", v) } } func TestDecodeMapFieldMissingKey(t *testing.T) { b := []byte{ 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes // no key 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m" } got := &MessageWithMap{} err := Unmarshal(b, got) if err != nil { t.Fatalf("failed to marshal map with missing key: %v", err) } want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}} if !Equal(got, want) { t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want) } } func TestDecodeMapFieldMissingValue(t *testing.T) { b := []byte{ 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes 0x08, 0x01, // varint key, value 1 // no value } got := &MessageWithMap{} err := Unmarshal(b, got) if err != nil { t.Fatalf("failed to marshal map with missing value: %v", err) } want := &MessageWithMap{NameMapping: map[int32]string{1: ""}} if !Equal(got, want) { t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want) } } func TestOneof(t *testing.T) { m := &Communique{} b, err := Marshal(m) if err != nil { t.Fatalf("Marshal of empty message with oneof: %v", err) } if len(b) != 0 { t.Errorf("Marshal of empty message yielded too many bytes: %v", b) } m = &Communique{ Union: &Communique_Name{"Barry"}, } // Round-trip. b, err = Marshal(m) if err != nil { t.Fatalf("Marshal of message with oneof: %v", err) } if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5) t.Errorf("Incorrect marshal of message with oneof: %v", b) } m.Reset() if err := Unmarshal(b, m); err != nil { t.Fatalf("Unmarshal of message with oneof: %v", err) } if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" { t.Errorf("After round trip, Union = %+v", m.Union) } if name := m.GetName(); name != "Barry" { t.Errorf("After round trip, GetName = %q, want %q", name, "Barry") } // Let's try with a message in the oneof. m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}} b, err = Marshal(m) if err != nil { t.Fatalf("Marshal of message with oneof set to message: %v", err) } if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16) t.Errorf("Incorrect marshal of message with oneof set to message: %v", b) } m.Reset() if err := Unmarshal(b, m); err != nil { t.Fatalf("Unmarshal of message with oneof set to message: %v", err) } ss, ok := m.Union.(*Communique_Msg) if !ok || ss.Msg.GetStringField() != "deep deep string" { t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union) } } func TestInefficientPackedBool(t *testing.T) { // https://github.com/golang/protobuf/issues/76 inp := []byte{ 0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes // Usually a bool should take a single byte, // but it is permitted to be any varint. 0xb9, 0x30, } if err := Unmarshal(inp, new(MoreRepeated)); err != nil { t.Error(err) } } // Benchmarks func testMsg() *GoTest { pb := initGoTest(true) const N = 1000 // Internally the library starts much smaller. pb.F_Int32Repeated = make([]int32, N) pb.F_DoubleRepeated = make([]float64, N) for i := 0; i < N; i++ { pb.F_Int32Repeated[i] = int32(i) pb.F_DoubleRepeated[i] = float64(i) } return pb } func bytesMsg() *GoTest { pb := initGoTest(true) buf := make([]byte, 4000) for i := range buf { buf[i] = byte(i) } pb.F_BytesDefaulted = buf return pb } func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { d, _ := marshal(pb) b.SetBytes(int64(len(d))) b.ResetTimer() for i := 0; i < b.N; i++ { marshal(pb) } } func benchmarkBufferMarshal(b *testing.B, pb Message) { p := NewBuffer(nil) benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { p.Reset() err := p.Marshal(pb0) return p.Bytes(), err }) } func benchmarkSize(b *testing.B, pb Message) { benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { Size(pb) return nil, nil }) } func newOf(pb Message) Message { in := reflect.ValueOf(pb) if in.IsNil() { return pb } return reflect.New(in.Type().Elem()).Interface().(Message) } func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { d, _ := Marshal(pb) b.SetBytes(int64(len(d))) pbd := newOf(pb) b.ResetTimer() for i := 0; i < b.N; i++ { unmarshal(d, pbd) } } func benchmarkBufferUnmarshal(b *testing.B, pb Message) { p := NewBuffer(nil) benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { p.SetBuf(d) return p.Unmarshal(pb0) }) } // Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} func BenchmarkMarshal(b *testing.B) { benchmarkMarshal(b, testMsg(), Marshal) } func BenchmarkBufferMarshal(b *testing.B) { benchmarkBufferMarshal(b, testMsg()) } func BenchmarkSize(b *testing.B) { benchmarkSize(b, testMsg()) } func BenchmarkUnmarshal(b *testing.B) { benchmarkUnmarshal(b, testMsg(), Unmarshal) } func BenchmarkBufferUnmarshal(b *testing.B) { benchmarkBufferUnmarshal(b, testMsg()) } func BenchmarkMarshalBytes(b *testing.B) { benchmarkMarshal(b, bytesMsg(), Marshal) } func BenchmarkBufferMarshalBytes(b *testing.B) { benchmarkBufferMarshal(b, bytesMsg()) } func BenchmarkSizeBytes(b *testing.B) { benchmarkSize(b, bytesMsg()) } func BenchmarkUnmarshalBytes(b *testing.B) { benchmarkUnmarshal(b, bytesMsg(), Unmarshal) } func BenchmarkBufferUnmarshalBytes(b *testing.B) { benchmarkBufferUnmarshal(b, bytesMsg()) } func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { b.StopTimer() pb := initGoTestField() skip := &GoSkipTest{ SkipInt32: Int32(32), SkipFixed32: Uint32(3232), SkipFixed64: Uint64(6464), SkipString: String("skipper"), Skipgroup: &GoSkipTest_SkipGroup{ GroupInt32: Int32(75), GroupString: String("wxyz"), }, } pbd := new(GoTestField) p := NewBuffer(nil) p.Marshal(pb) p.Marshal(skip) p2 := NewBuffer(nil) b.StartTimer() for i := 0; i < b.N; i++ { p2.SetBuf(p.Bytes()) p2.Unmarshal(pbd) } }
{ "pile_set_name": "Github" }
/** * Copyright JS Foundation and other contributors, http://js.foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. **/ RED.tabs = (function() { var defaultTabIcon = "fa fa-lemon-o"; var dragActive = false; var dblClickTime; var dblClickArmed = false; function createTabs(options) { var tabs = {}; var pinnedTabsCount = 0; var currentTabWidth; var currentActiveTabWidth = 0; var collapsibleMenu; var ul = options.element || $("#"+options.id); var wrapper = ul.wrap( "<div>" ).parent(); var scrollContainer = ul.wrap( "<div>" ).parent(); wrapper.addClass("red-ui-tabs"); if (options.vertical) { wrapper.addClass("red-ui-tabs-vertical"); } if (options.addButton) { wrapper.addClass("red-ui-tabs-add"); var addButton = $('<div class="red-ui-tab-button red-ui-tabs-add"><a href="#"><i class="fa fa-plus"></i></a></div>').appendTo(wrapper); addButton.find('a').on("click", function(evt) { evt.preventDefault(); if (typeof options.addButton === 'function') { options.addButton(); } else if (typeof options.addButton === 'string') { RED.actions.invoke(options.addButton); } }) if (typeof options.addButton === 'string') { var l = options.addButton; if (options.addButtonCaption) { l = options.addButtonCaption } RED.popover.tooltip(addButton,l,options.addButton); } ul.on("dblclick", function(evt) { var existingTabs = ul.children(); var clickX = evt.clientX; var targetIndex = 0; existingTabs.each(function(index) { var pos = $(this).offset(); if (pos.left > clickX) { return false; } targetIndex = index+1; }) if (typeof options.addButton === 'function') { options.addButton({index:targetIndex}); } else if (typeof options.addButton === 'string') { RED.actions.invoke(options.addButton,{index:targetIndex}); } }); } if (options.searchButton) { wrapper.addClass("red-ui-tabs-search"); var searchButton = $('<div class="red-ui-tab-button red-ui-tabs-search"><a href="#"><i class="fa fa-list-ul"></i></a></div>').appendTo(wrapper); searchButton.find('a').on("click", function(evt) { evt.preventDefault(); if (typeof options.searchButton === 'function') { options.searchButton() } else if (typeof options.searchButton === 'string') { RED.actions.invoke(options.searchButton); } }) if (typeof options.searchButton === 'string') { var l = options.searchButton; if (options.searchButtonCaption) { l = options.searchButtonCaption } RED.popover.tooltip(searchButton,l,options.searchButton); } } var scrollLeft; var scrollRight; if (options.scrollable) { wrapper.addClass("red-ui-tabs-scrollable"); scrollContainer.addClass("red-ui-tabs-scroll-container"); scrollContainer.on("scroll",updateScroll); scrollLeft = $('<div class="red-ui-tab-button red-ui-tab-scroll red-ui-tab-scroll-left"><a href="#" style="display:none;"><i class="fa fa-caret-left"></i></a></div>').appendTo(wrapper).find("a"); scrollLeft.on('mousedown',function(evt) { scrollEventHandler(evt,'-=150') }).on('click',function(evt){ evt.preventDefault();}); scrollRight = $('<div class="red-ui-tab-button red-ui-tab-scroll red-ui-tab-scroll-right"><a href="#" style="display:none;"><i class="fa fa-caret-right"></i></a></div>').appendTo(wrapper).find("a"); scrollRight.on('mousedown',function(evt) { scrollEventHandler(evt,'+=150') }).on('click',function(evt){ evt.preventDefault();}); } if (options.collapsible) { // var dropDown = $('<div>',{class:"red-ui-tabs-select"}).appendTo(wrapper); // ul.hide(); wrapper.addClass("red-ui-tabs-collapsible"); var collapsedButtonsRow = $('<div class="red-ui-tab-link-buttons"></div>').appendTo(wrapper); if (options.menu !== false) { var selectButton = $('<a href="#"><i class="fa fa-caret-down"></i></a>').appendTo(collapsedButtonsRow); selectButton.addClass("red-ui-tab-link-button-menu") selectButton.on("click", function(evt) { evt.stopPropagation(); evt.preventDefault(); if (!collapsibleMenu) { var pinnedOptions = []; var options = []; ul.children().each(function(i,el) { var id = $(el).data('tabId'); var opt = { id:"red-ui-tabs-menu-option-"+id, icon: tabs[id].iconClass || defaultTabIcon, label: tabs[id].name, onselect: function() { activateTab(id); } }; if (tabs[id].pinned) { pinnedOptions.push(opt); } else { options.push(opt); } }); options = pinnedOptions.concat(options); collapsibleMenu = RED.menu.init({options: options}); collapsibleMenu.css({ position: "absolute" }) collapsibleMenu.appendTo("body"); } var elementPos = selectButton.offset(); collapsibleMenu.css({ top: (elementPos.top+selectButton.height()-2)+"px", left: (elementPos.left - collapsibleMenu.width() + selectButton.width())+"px" }) if (collapsibleMenu.is(":visible")) { $(document).off("click.red-ui-tabmenu"); } else { $(".red-ui-menu.red-ui-menu-dropdown").hide(); $(document).on("click.red-ui-tabmenu", function(evt) { $(document).off("click.red-ui-tabmenu"); collapsibleMenu.hide(); }); } collapsibleMenu.toggle(); }) } } function scrollEventHandler(evt,dir) { evt.preventDefault(); if ($(this).hasClass('disabled')) { return; } var currentScrollLeft = scrollContainer.scrollLeft(); scrollContainer.animate( { scrollLeft: dir }, 100); var interval = setInterval(function() { var newScrollLeft = scrollContainer.scrollLeft() if (newScrollLeft === currentScrollLeft) { clearInterval(interval); return; } currentScrollLeft = newScrollLeft; scrollContainer.animate( { scrollLeft: dir }, 100); },100); $(this).one('mouseup',function() { clearInterval(interval); }) } ul.children().first().addClass("active"); ul.children().addClass("red-ui-tab"); function getSelection() { var selection = ul.find("li.red-ui-tab.selected"); var selectedTabs = []; selection.each(function() { selectedTabs.push(tabs[$(this).find('a').attr('href').slice(1)]) }) return selectedTabs; } function selectionChanged() { options.onselect(getSelection()); } function onTabClick(evt) { if (dragActive) { return } if (dblClickTime && Date.now()-dblClickTime < 400) { dblClickTime = 0; dblClickArmed = true; return onTabDblClick.call(this,evt); } dblClickTime = Date.now(); var currentTab = ul.find("li.red-ui-tab.active"); var thisTab = $(this).parent(); var fireSelectionChanged = false; if (options.onselect) { if (evt.metaKey || evt.ctrlKey) { if (thisTab.hasClass("selected")) { thisTab.removeClass("selected"); if (thisTab[0] !== currentTab[0]) { // Deselect background tab // - don't switch to it selectionChanged(); return; } else { // Deselect current tab // - if nothing remains selected, do nothing // - otherwise switch to first selected tab var selection = ul.find("li.red-ui-tab.selected"); if (selection.length === 0) { selectionChanged(); return; } thisTab = selection.first(); } } else { if (!currentTab.hasClass("selected")) { var currentTabObj = tabs[currentTab.find('a').attr('href').slice(1)]; // Auto select current tab currentTab.addClass("selected"); } thisTab.addClass("selected"); } fireSelectionChanged = true; } else if (evt.shiftKey) { if (currentTab[0] !== thisTab[0]) { var firstTab,lastTab; if (currentTab.index() < thisTab.index()) { firstTab = currentTab; lastTab = thisTab; } else { firstTab = thisTab; lastTab = currentTab; } ul.find("li.red-ui-tab").removeClass("selected"); firstTab.addClass("selected"); lastTab.addClass("selected"); firstTab.nextUntil(lastTab).addClass("selected"); } fireSelectionChanged = true; } else { var selection = ul.find("li.red-ui-tab.selected"); if (selection.length > 0) { selection.removeClass("selected"); fireSelectionChanged = true; } } } var thisTabA = thisTab.find("a"); if (options.onclick) { options.onclick(tabs[thisTabA.attr('href').slice(1)]); } activateTab(thisTabA); if (fireSelectionChanged) { selectionChanged(); } } function updateScroll() { if (ul.children().length !== 0) { var sl = scrollContainer.scrollLeft(); var scWidth = scrollContainer.width(); var ulWidth = ul.width(); if (sl === 0) { scrollLeft.hide(); } else { scrollLeft.show(); } if (sl === ulWidth-scWidth) { scrollRight.hide(); } else { scrollRight.show(); } } } function onTabDblClick(evt) { evt.preventDefault(); if (evt.metaKey || evt.shiftKey) { return; } if (options.ondblclick) { options.ondblclick(tabs[$(this).attr('href').slice(1)]); } return false; } function activateTab(link) { if (typeof link === "string") { link = ul.find("a[href='#"+link+"']"); } if (link.length === 0) { return; } if (!link.parent().hasClass("active")) { ul.children().removeClass("active"); ul.children().css({"transition": "width 100ms"}); link.parent().addClass("active"); var parentId = link.parent().attr('id'); wrapper.find(".red-ui-tab-link-button").removeClass("active selected"); $("#"+parentId+"-link-button").addClass("active selected"); if (options.scrollable) { var pos = link.parent().position().left; if (pos-21 < 0) { scrollContainer.animate( { scrollLeft: '+='+(pos-50) }, 300); } else if (pos + 120 > scrollContainer.width()) { scrollContainer.animate( { scrollLeft: '+='+(pos + 140-scrollContainer.width()) }, 300); } } if (options.onchange) { options.onchange(tabs[link.attr('href').slice(1)]); } updateTabWidths(); setTimeout(function() { ul.children().css({"transition": ""}); },100); } } function activatePreviousTab() { var previous = ul.find("li.active").prev(); if (previous.length > 0) { activateTab(previous.find("a")); } } function activateNextTab() { var next = ul.find("li.active").next(); if (next.length > 0) { activateTab(next.find("a")); } } function updateTabWidths() { if (options.vertical) { return; } var tabs = ul.find("li.red-ui-tab"); var width = wrapper.width(); var tabCount = tabs.length; var tabWidth; if (options.collapsible) { tabWidth = width - collapsedButtonsRow.width()-10; if (tabWidth < 198) { var delta = 198 - tabWidth; var b = collapsedButtonsRow.find("a:last").prev(); while (b.is(":not(:visible)")) { b = b.prev(); } if (!b.hasClass("red-ui-tab-link-button-pinned")) { b.hide(); } tabWidth = width - collapsedButtonsRow.width()-10; } else { var space = width - 198 - collapsedButtonsRow.width(); if (space > 40) { collapsedButtonsRow.find("a:not(:visible):first").show(); tabWidth = width - collapsedButtonsRow.width()-10; } } tabs.css({width:tabWidth}); } else { var tabWidth = (width-12-(tabCount*6))/tabCount; currentTabWidth = (100*tabWidth/width)+"%"; currentActiveTabWidth = currentTabWidth+"%"; if (options.scrollable) { tabWidth = Math.max(tabWidth,140); currentTabWidth = tabWidth+"px"; currentActiveTabWidth = 0; var listWidth = Math.max(wrapper.width(),12+(tabWidth+6)*tabCount); ul.width(listWidth); updateScroll(); } else if (options.hasOwnProperty("minimumActiveTabWidth")) { if (tabWidth < options.minimumActiveTabWidth) { tabCount -= 1; tabWidth = (width-12-options.minimumActiveTabWidth-(tabCount*6))/tabCount; currentTabWidth = (100*tabWidth/width)+"%"; currentActiveTabWidth = options.minimumActiveTabWidth+"px"; } else { currentActiveTabWidth = 0; } } // if (options.collapsible) { // console.log(currentTabWidth); // } tabs.css({width:currentTabWidth}); if (tabWidth < 50) { // ul.find(".red-ui-tab-close").hide(); ul.find(".red-ui-tab-icon").hide(); ul.find(".red-ui-tab-label").css({paddingLeft:Math.min(12,Math.max(0,tabWidth-38))+"px"}) } else { // ul.find(".red-ui-tab-close").show(); ul.find(".red-ui-tab-icon").show(); ul.find(".red-ui-tab-label").css({paddingLeft:""}) } if (currentActiveTabWidth !== 0) { ul.find("li.red-ui-tab.active").css({"width":options.minimumActiveTabWidth}); // ul.find("li.red-ui-tab.active .red-ui-tab-close").show(); ul.find("li.red-ui-tab.active .red-ui-tab-icon").show(); ul.find("li.red-ui-tab.active .red-ui-tab-label").css({paddingLeft:""}) } } } ul.find("li.red-ui-tab a") .on("mouseup",onTabClick) .on("click", function(evt) {evt.preventDefault(); }) .on("dblclick", function(evt) {evt.stopPropagation(); evt.preventDefault(); }) setTimeout(function() { updateTabWidths(); },0); function removeTab(id) { if (options.onselect) { var selection = ul.find("li.red-ui-tab.selected"); if (selection.length > 0) { selection.removeClass("selected"); selectionChanged(); } } var li = ul.find("a[href='#"+id+"']").parent(); if (li.hasClass("active")) { var tab = li.prev(); if (tab.length === 0) { tab = li.next(); } activateTab(tab.find("a")); } li.remove(); if (tabs[id].pinned) { pinnedTabsCount--; } if (options.onremove) { options.onremove(tabs[id]); } delete tabs[id]; updateTabWidths(); if (collapsibleMenu) { collapsibleMenu.remove(); collapsibleMenu = null; } } return { addTab: function(tab,targetIndex) { if (options.onselect) { var selection = ul.find("li.red-ui-tab.selected"); if (selection.length > 0) { selection.removeClass("selected"); selectionChanged(); } } tabs[tab.id] = tab; var li = $("<li/>",{class:"red-ui-tab"}); if (ul.children().length === 0) { targetIndex = undefined; } if (targetIndex === 0) { li.prependTo(ul); } else if (targetIndex > 0) { li.insertAfter(ul.find("li:nth-child("+(targetIndex)+")")); } else { li.appendTo(ul); } li.attr('id',"red-ui-tab-"+(tab.id.replace(".","-"))); li.data("tabId",tab.id); if (options.maximumTabWidth) { li.css("maxWidth",options.maximumTabWidth+"px"); } var link = $("<a/>",{href:"#"+tab.id, class:"red-ui-tab-label"}).appendTo(li); if (tab.icon) { $('<img src="'+tab.icon+'" class="red-ui-tab-icon"/>').appendTo(link); } else if (tab.iconClass) { $('<i>',{class:"red-ui-tab-icon "+tab.iconClass}).appendTo(link); } var span = $('<span/>',{class:"red-ui-text-bidi-aware"}).text(tab.label).appendTo(link); span.attr('dir', RED.text.bidi.resolveBaseTextDir(tab.label)); if (options.collapsible) { li.addClass("red-ui-tab-pinned"); var pinnedLink = $('<a href="#'+tab.id+'" class="red-ui-tab-link-button"></a>'); if (tab.pinned) { if (pinnedTabsCount === 0) { pinnedLink.prependTo(collapsedButtonsRow) } else { pinnedLink.insertAfter(collapsedButtonsRow.find("a.red-ui-tab-link-button-pinned:last")); } } else { if (options.menu !== false) { pinnedLink.insertBefore(collapsedButtonsRow.find("a:last")); } else { pinnedLink.appendTo(collapsedButtonsRow); } } pinnedLink.attr('id',li.attr('id')+"-link-button"); if (tab.iconClass) { $('<i>',{class:tab.iconClass}).appendTo(pinnedLink); } else { $('<i>',{class:defaultTabIcon}).appendTo(pinnedLink); } pinnedLink.on("click", function(evt) { evt.preventDefault(); activateTab(tab.id); }); if (tab.pinned) { pinnedLink.addClass("red-ui-tab-link-button-pinned"); pinnedTabsCount++; } RED.popover.tooltip($(pinnedLink), tab.name, tab.action); } link.on("mouseup",onTabClick); link.on("click", function(evt) { evt.preventDefault(); }) link.on("dblclick", function(evt) { evt.stopPropagation(); evt.preventDefault(); }) if (tab.closeable) { li.addClass("red-ui-tabs-closeable") var closeLink = $("<a/>",{href:"#",class:"red-ui-tab-close"}).appendTo(li); closeLink.append('<i class="fa fa-times" />'); closeLink.on("click",function(event) { event.preventDefault(); removeTab(tab.id); }); } var badges = $('<span class="red-ui-tabs-badges"></span>').appendTo(li); if (options.onselect) { $('<i class="red-ui-tabs-badge-changed fa fa-circle"></i>').appendTo(badges); $('<i class="red-ui-tabs-badge-selected fa fa-check-circle"></i>').appendTo(badges); } if (options.onadd) { options.onadd(tab); } link.attr("title",tab.label); if (ul.find("li.red-ui-tab").length == 1) { activateTab(link); } if (options.onreorder) { var originalTabOrder; var tabDragIndex; var tabElements = []; var startDragIndex; li.draggable({ axis:"x", distance: 20, start: function(event,ui) { if (dblClickArmed) { dblClickArmed = false; return false } dragActive = true; originalTabOrder = []; tabElements = []; ul.children().each(function(i) { tabElements[i] = { el:$(this), text: $(this).text(), left: $(this).position().left, width: $(this).width() }; if ($(this).is(li)) { tabDragIndex = i; startDragIndex = i; } originalTabOrder.push($(this).data("tabId")); }); ul.children().each(function(i) { if (i!==tabDragIndex) { $(this).css({ position: 'absolute', left: tabElements[i].left+"px", width: tabElements[i].width+2, transition: "left 0.3s" }); } }) if (!li.hasClass('active')) { li.css({'zIndex':1}); } }, drag: function(event,ui) { ui.position.left += tabElements[tabDragIndex].left+scrollContainer.scrollLeft(); var tabCenter = ui.position.left + tabElements[tabDragIndex].width/2 - scrollContainer.scrollLeft(); for (var i=0;i<tabElements.length;i++) { if (i === tabDragIndex) { continue; } if (tabCenter > tabElements[i].left && tabCenter < tabElements[i].left+tabElements[i].width) { if (i < tabDragIndex) { tabElements[i].left += tabElements[tabDragIndex].width+8; tabElements[tabDragIndex].el.detach().insertBefore(tabElements[i].el); } else { tabElements[i].left -= tabElements[tabDragIndex].width+8; tabElements[tabDragIndex].el.detach().insertAfter(tabElements[i].el); } tabElements[i].el.css({left:tabElements[i].left+"px"}); tabElements.splice(i, 0, tabElements.splice(tabDragIndex, 1)[0]); tabDragIndex = i; break; } } }, stop: function(event,ui) { dragActive = false; ul.children().css({position:"relative",left:"",transition:""}); if (!li.hasClass('active')) { li.css({zIndex:""}); } updateTabWidths(); if (startDragIndex !== tabDragIndex) { options.onreorder(originalTabOrder, $.makeArray(ul.children().map(function() { return $(this).data('tabId');}))); } activateTab(tabElements[tabDragIndex].el.data('tabId')); } }) } setTimeout(function() { updateTabWidths(); },10); if (collapsibleMenu) { collapsibleMenu.remove(); collapsibleMenu = null; } }, removeTab: removeTab, activateTab: activateTab, nextTab: activateNextTab, previousTab: activatePreviousTab, resize: updateTabWidths, count: function() { return ul.find("li.red-ui-tab").length; }, contains: function(id) { return ul.find("a[href='#"+id+"']").length > 0; }, renameTab: function(id,label) { tabs[id].label = label; var tab = ul.find("a[href='#"+id+"']"); tab.attr("title",label); tab.find("span.red-ui-text-bidi-aware").text(label).attr('dir', RED.text.bidi.resolveBaseTextDir(label)); updateTabWidths(); }, selection: getSelection, order: function(order) { var existingTabOrder = $.makeArray(ul.children().map(function() { return $(this).data('tabId');})); if (existingTabOrder.length !== order.length) { return } var i; var match = true; for (i=0;i<order.length;i++) { if (order[i] !== existingTabOrder[i]) { match = false; break; } } if (match) { return; } var existingTabMap = {}; var existingTabs = ul.children().detach().each(function() { existingTabMap[$(this).data("tabId")] = $(this); }); for (i=0;i<order.length;i++) { existingTabMap[order[i]].appendTo(ul); } } } } return { create: createTabs } })();
{ "pile_set_name": "Github" }
/** * Bind after-leave event for vue instance. Make sure after-leave is called in any browsers. * * @param {Vue} instance Vue instance. * @param {Function} callback callback of after-leave event * @param {Number} speed the speed of transition, default value is 300ms * @param {Boolean} once weather bind after-leave once. default value is false. */ export default function(instance, callback, speed = 300, once = false) { if (!instance || !callback) throw new Error('instance & callback is required'); let called = false; const afterLeaveCallback = function() { if (called) return; called = true; if (callback) { callback.apply(null, arguments); } }; if (once) { instance.$once('after-leave', afterLeaveCallback); } else { instance.$on('after-leave', afterLeaveCallback); } setTimeout(() => { afterLeaveCallback(); }, speed + 100); };
{ "pile_set_name": "Github" }
import React from 'react' import { Direction } from 'rt-types' import { Price, BigWrapper, DirectionLabel, Big, Pip, Tenth, ExpiredPrice, } from 'apps/MainRoute/widgets/spotTile/components/PriceButton/styled' import { styled } from 'rt-theme' interface PriceButtonProps { big?: number pip?: number tenth?: number rawRate?: number direction?: Direction handleClick?: () => void priceAnnounced?: boolean disabled?: boolean expired?: boolean currencyPairSymbol: string isAnalyticsView: boolean } const TradeButton = styled.button<{ direction: Direction priceAnnounced: boolean isAnalyticsView: boolean }>` background-color: ${({ theme, direction }) => direction === Direction.Sell ? '#f94c4c' : '#5f94f5'}; border-radius: 3px; color: #fff; transition: background-color 0.2s ease; cursor: pointer; border: none; outline: none; height: ${({ isAnalyticsView }) => (isAnalyticsView ? '50%' : '59px')}; min-width: 125px; padding: 0.6rem 1.5rem 0.7rem 1.5rem; margin-bottom: 2px; cursor: initial; pointer-events: none; ` const renderPips = (pips: number) => (pips.toString().length === 1 ? `0${pips}` : pips) const getBigFigureDisplay = (bigFigure: number, rawRate: number) => bigFigure === Math.floor(rawRate) ? `${bigFigure}.` : bigFigure.toString() const renderBigFigureDisplay = (bigFigureDisplay: string) => bigFigureDisplay.toString().length === 3 ? `${bigFigureDisplay}0` : bigFigureDisplay const PriceButtonComp: React.FC<PriceButtonProps> = ({ big = 0, pip = 0, tenth = 0, rawRate = 0, direction = Direction.Buy, handleClick = () => {}, priceAnnounced, disabled = false, expired = false, currencyPairSymbol, isAnalyticsView, }) => { const bigFigure = getBigFigureDisplay(big, rawRate) const hasPrice = rawRate !== 0 const isDisabled = disabled || !hasPrice return ( <TradeButton direction={direction} onClick={handleClick} priceAnnounced={!!priceAnnounced} disabled={isDisabled} isAnalyticsView={isAnalyticsView} data-qa="price-button__trade-button" data-qa-id={`direction-${direction.toLowerCase()}-${currencyPairSymbol.toLowerCase()}`} > <Price disabled={isDisabled}> <BigWrapper> <DirectionLabel>{direction.toUpperCase()}</DirectionLabel> <Big data-qa="price-button__big"> {hasPrice ? renderBigFigureDisplay(bigFigure) : '-'} </Big> </BigWrapper> {hasPrice && ( <React.Fragment> <Pip data-qa="price-button__pip">{renderPips(pip)}</Pip> <Tenth data-qa="price-button__tenth">{tenth}</Tenth> </React.Fragment> )} </Price> {expired && <ExpiredPrice data-qa="price-button__expired">Expired</ExpiredPrice>} </TradeButton> ) } export default PriceButtonComp
{ "pile_set_name": "Github" }
<!DOCTYPE html> <html lang="en" > <head> <title>HTTP charset</title> <link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'> <link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'> <link rel="stylesheet" type="text/css" href="./generatedtests.css"> <script src="http://w3c-test.org/resources/testharness.js"></script> <script src="http://w3c-test.org/resources/testharnessreport.js"></script> <meta name='flags' content='http'> <meta name="assert" content="The character encoding of a page can be set using the HTTP header charset declaration."> <style type='text/css'> .test div { width: 50px; }</style> <link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css"> </head> <body> <p class='title'>HTTP charset</p> <div id='log'></div> <div class='test'><div id='box' class='รฝรครจ'>&#xA0;</div></div> <div class='description'> <p class="assertion" title="Assertion">The character encoding of a page can be set using the HTTP header charset declaration.</p> <div class="notes"><p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p><p>The only character encoding declaration for this HTML file is in the HTTP header, which sets the encoding to ISO 8859-15.</p></p> </div> </div> <div class="nexttest"><div><a href="generate?test=the-input-byte-stream-003">Next test</a></div><div class="doctype">HTML5</div> <p class="jump">the-input-byte-stream-001<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-001" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p> <div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li> <li>The test is read from a server that supports HTTP.</li></ul></div> </div> <script> test(function() { assert_equals(document.getElementById('box').offsetWidth, 100); }, " "); </script> </body> </html>
{ "pile_set_name": "Github" }
package com.insthub.ecmobile.protocol; import java.util.ArrayList; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import com.external.activeandroid.Model; import com.external.activeandroid.annotation.Column; import com.external.activeandroid.annotation.Table; @Table(name = "cartcreateResponse") public class cartcreateResponse extends Model { @Column(name = "status") public STATUS status; public void fromJson(JSONObject jsonObject) throws JSONException { if(null == jsonObject){ return ; } JSONArray subItemArray; STATUS status = new STATUS(); status.fromJson(jsonObject.optJSONObject("status")); this.status = status; return ; } public JSONObject toJson() throws JSONException { JSONObject localItemObject = new JSONObject(); JSONArray itemJSONArray = new JSONArray(); if(null != status) { localItemObject.put("status", status.toJson()); } return localItemObject; } }
{ "pile_set_name": "Github" }
<?php return array ( 'Master Campaign ID' => 'ID Maestro de Campaรฑa', 'Cost' => 'Costo', 'Launch Date' => 'Fecha de Lanzamiento', 'Email Template' => 'Plantilla para Correo Electrรณnico', 'All Campaigns' => 'Todas las Campaรฑas', 'Newsletters' => 'Boletines', 'Create Newsletter' => 'Crear Boletines', 'Web Tracker' => 'Web Tracker', 'Email List' => 'Lista de Email', 'The campaign has already been launched.' => 'La ya campaรฑa ha sido lanzada.', 'Contact List cannot be blank.' => 'Lista de contactos no puede estar en blanco.', 'Subject cannot be blank.' => 'El sujeto no puede estar en blanco.', 'The contact list is empty.' => 'La lista de contactos estรก vacรญa.', 'Launch Now' => 'Iniciar Ahora', 'Campaign launched' => 'La Campaรฑa se ha puesto en marcha', 'Campaign paused' => 'La Campaรฑa se ha detenido', 'Campaign resumed' => 'La Campaรฑa ha reanudado', 'The email sending limit has been reached.' => 'El lรญmite de envรญo de correo electrรณnico se ha alcanzado.', 'There is no campaign email to send.' => 'No hay ninguna campaรฑa de envรญo de correo electrรณnico.', 'Successful email sent' => 'Correo electrรณnico enviado con รฉxito', 'No email sent.' => 'Ningรบn correo electrรณnico fue enviado.', 'Campaign complete.' => 'Campaรฑa completada.', 'Email These Contacts' => 'Enviar Correo Electrรณnico a estos contactos', 'Email Entire List' => 'Enviar Correo Electrรณnico a la Lista', 'Invalid tag value' => 'Valor de la etiqueta no vรกlido', 'Contacts for tag' => 'Contactos para la etiqueta', 'Mailing for tag' => 'Correo para la etiqueta', 'Could not create mailing' => 'No se pudo crear correo', 'You have been unsubscribed' => 'Usted ha sido dado de baja', 'Contact has unsubscribed' => 'El contacto se ha dado de baja', '\'Do Not Email\' has been set' => '\'No Enviar\' ha sido establecido', 'has unsubscribed' => 'se ha dado de baja', 'Contact has opened the email' => 'El contacto ha abierto el correo electrรณnico', 'has opened the email' => 'ha abierto el correo electrรณnico', 'Contact has clicked a link' => 'El contacto ha hecho clic en un enlace', 'has clicked a link' => 'ha hecho clic en un enlace', 'To stop receiving these messages, click here' => 'Para dejar de recibir estos mensajes, haga clic aquรญ', 'CSS' => 'CSS', 'Name cannot be blank.' => 'El nombre no puede estar en blanco.', 'Name cannot be' => 'El nombre no puede ser', 'Web Lead Form' => 'Formulario de Leads Web', 'Copy and paste this code into your website to include the web lead form.' => 'Copia y pega este cรณdigo en tu pรกgina web para incluir el formulario de Prospectos vรญa web.', 'Choose an existing form as a starting point.' => 'Elige un formulario existente como punto de partida.', 'Text Color' => 'Color del Texto', 'Font' => 'Tipo de Letra', 'Border' => 'Bordes', 'Size' => 'Tamaรฑo', 'pixels' => 'pixeles', 'black' => 'negro', 'transparent' => 'transparente', 'none' => 'ninguno', 'Enter a name and save this form to edit later.' => 'Escriba un nombre y guardar este formulario para editar mรกs tarde.', 'Form Saved' => 'Formulario Guardado', 'Reset Form' => 'Restablecer el Formulario', 'Embed Code' => 'Cรณdigo para Insertar', 'Saved Forms' => 'Formas Guardadas', 'Enter custom css for the web form.' => 'Introduzca css personalizado para el formulario web.', 'Custom &lt;HEAD&gt;' => '&lt;HEAD&gt; Personalizada', 'Enter any HTML you would like inserted into the &lt;HEAD&gt; tag.' => 'Ingrese cualquier cรณdigo HTML que desea insertar en la etiqueta &lt;HEAD&gt;.', 'Weblead Email' => 'Weblead Email', 'Drag and Drop fields from Fields List to Form.' => 'Arrastrar y soltar los campos de la lista Campos de formulario.', 'Live web form preview.' => 'Vivir formulario web de vista previa.', 'Position:' => 'Posiciรณn:', 'Web Tracker Code' => 'Cรณdigo Web Rastreador', 'Paste this code into the body section of every page of your website.' => 'Pega este cรณdigo en la secciรณn del cuerpo de cada pรกgina de su sitio web.', 'Web Tracker Settings' => 'Ajustes del perseguidor Web', 'Turn it down all the way to receive notifications about every page hit.' => 'Gire hacia abajo todo el camino para recibir notificaciones acerca de cada visita a la pรกgina.', 'Contact has opened the email sent on ' => 'Contacto ha abierto el correo electrรณnico enviado el', 'Header Code' => 'Cรณdigo de la cabecera', 'A new web lead has been assigned to you: ' => 'Una nueva iniciativa web ha sido asignada a usted:', 'If you want to keep your current HTML forms but still get web leads into X2, please see the wiki article located here: {link}' => 'Si quieres mantener tus formularios HTML actuales, pero todavรญa se les web conduce a X2, por favor consulte el artรญculo wiki encuentra aquรญ: {link}', 'Web Lead API' => 'Web API de plomo', 'Custom HTML could not be removed.' => 'HTML personalizado no puede ser eliminado.', 'Custom HTML could not be saved.' => 'HTML personalizado no se pudo guardar.', 'Type:' => 'Tipo:', 'Value:' => 'Valor:', 'Anonymous Contact {email}' => 'Anรณnimo Contactar {email}', 'X2Identity Settings' => 'Ajustes X2Identity', 'Disable Slider' => 'Desactivar Control deslizante', 'Import Campaigns' => 'Campaรฑas de importaciรณn', 'Export Campaigns' => 'Exportar campaรฑas', 'Email Delivery Progress' => 'Email Progreso Entrega', 'Click to refresh displays of campaign data on this page.' => 'Click para refrescar las pantallas de datos de la campaรฑa en esta pรกgina.', 'Errors that occurred when attempting to send email:' => 'Los errores que se produjeron al intentar enviar correo electrรณnico:', 'You have unsent mail in this campaign. Are you sure you want to forcibly mark this campaign as complete?' => 'Tiene correo no enviado en esta campaรฑa. ยฟSeguro que quieres marcar la fuerza de esta campaรฑa tan completa?', 'Please try again in {t} {units}.' => 'Por favor, intรฉntelo de nuevo en {t} {units} .', 'Skipping {email}; another concurrent send operation is handling delivery to this address.' => 'Saltarse {email} ; otra operaciรณn de envรญo concurrente es el manejo de la entrega a esta direcciรณn.', 'Skipping delivery for recipient {id}; email address has been set to blank.' => 'Saltarse la entrega para el destinatario {id} ; direcciรณn de correo electrรณnico se ha establecido en blanco.', 'Skipping {email}; the contact has unsubscribed.' => 'Saltarse {email} ; el contacto ha dado de baja.', 'Email has already been sent to {address}' => 'Correo electrรณnico ya ha sido enviada a {address}', 'Email sent successfully to {address}.' => 'Correo enviado satisfactoriamente a {address} .', 'Email could not be sent to {address}. The message given was: {message}' => 'El correo electrรณnico no podrรญa ser enviado a {address} . El mensaje que se dio fue: {message}', 'All emails sent.' => 'Todos los correos electrรณnicos enviados.', 'Batch timeout limit reached.' => 'Tiempo lรญmite de espera de lote alcanza.', 'Failed to save temporary list.' => 'Error al guardar la lista provisional.', 'Specified campaign does not exist.' => 'Campaรฑa especificado no existe.', 'Delete Form' => 'Eliminar Form', 'NOTE: The web lead form must be saved for these emails to be sent.' => 'NOTA: La forma de plomo web debe ser guardado para este tipo de mensajes que se envรญan.', 'NOTE: The web lead form must be saved for these tags to be applied.' => 'NOTA: La forma de plomo web debe ser guardado para las siguientes etiquetas que han de aplicarse.', 'Are you sure you want to remove this email address from the list?' => 'ยฟSeguro que quieres eliminar esta direcciรณn de correo electrรณnico de la lista?', 'The {contact} you enter here will be used for variable replacement, ' => 'El {contact} que introduzca aquรญ se utilizarรก para el reemplazo variables,', 'Campaign Name:' => 'Nombre de la campaรฑa:', 'Contact List:' => 'Lista de contactos:', 'Choose a contact list to send the campaign out to, or create one here.' => 'Seleccione una lista de contactos para enviar la campaรฑa a, o crear una aquรญ.', 'Email Template:' => 'Plantilla de correo electrรณnico:', 'Choose a email template to use for this campaign, or create a custom one here.' => 'Elija una plantilla de correo electrรณnico que se utilizarรก para esta campaรฑa, o crear una costumbre aquรญ.', 'Save Email As Template' => 'Guardar Correo Plantilla', 'If you want to track {contacts} on a website with a domain that is different from the domain on which X2 is hosted, you\'ll have to configure your {link}.' => 'Si desea realizar un seguimiento {contacts} en un sitio web con un dominio que es diferente del dominio en el que estรก alojado X2, que \\ &#39;ll tiene que configurar su {link}', 'This HTML tag will allow you to track {contacts} who visit your website.' => 'Esta etiqueta HTML permitirรก realizar un seguimiento de {contacts} que visitan su sitio web.', 'You can enable or disable the web tracker. The tracker will ignore repeat hits from a given {contact} during the cooldown period.' => 'Puede activar o desactivar el rastreador web. El rastreador ignorarรก repetir รฉxitos de un determinado {contact} durante el perรญodo de tiempo de reutilizaciรณn.', 'If a {contact} visits several pages in a short time, you will only get one notification.' => 'Si un {contact} visita varias pรกginas en poco tiempo, sรณlo obtendrรก una sola notificaciรณn.', 'X2Identity uses browser fingerprinting as an alternative method to the traditional web tracker. Browser fingerprinting is not reliant on cookies and instead identifies a {contact} based on certain browser attributes that make them unique. ' => 'X2Identity utiliza la huella dactilar del navegador como un mรฉtodo alternativo a la web tracker tradicional. Fingerprinting navegador no depende de las cookies y en lugar identifica un {contact} basado en ciertos atributos del navegador que las hacen รบnicas.', 'If browser fingerprinting is enabled, the tracker will attempt to identify a {contact} based on their browser settings. Note that this is probabilistic by nature, and will not always be completely accurate.' => 'Si las huellas dactilares navegador estรก activada, el rastreador intentarรก identificar un {contact} basado en su configuraciรณn del navegador. Tenga en cuenta que esto es probabilรญstica por naturaleza, y no siempre sean del todo correctos.', 'You can set the threshold for the minimum number of attributes that must be equal in order to constitute a partial match. It is recommended to keep higher values to prevent false positives. You can also tune the maximum number of anonymous contacts and actions associated with them in order to limit flooding to the database. The fingerprint tracker will obey the same cooldown period as the web tracker.' => 'Puede establecer el umbral para el nรบmero mรญnimo de atributos que debe ser igual para constituir una coincidencia parcial. Se recomienda mantener los valores mรกs altos para evitar falsos positivos. Tambiรฉn puede ajustar el nรบmero mรกximo de contactos anรณnimos y acciones asociadas con ellos con el fin de limitar las inundaciones a la base de datos. El rastreador de huellas digitales obedecerรก el mismo perรญodo de tiempo de reutilizaciรณn como el web tracker.', 'Anonymous contacts are created upon submission of the newsletter form, or when a user who has not yet been associated with a fingerprint visits a page embedded with the web tracker. Once these anonymous contacts submit the web lead form, their lead score and activity history will be migrated over to an ordinary {contact} record.' => 'Contactos anรณnimos se crean mediante la presentaciรณn del formulario de boletรญn de noticias, o cuando un usuario que aรบn no se ha asociado con una huella dactilar visita una pรกgina incrustada con el rastreador web. Una vez que estos contactos anรณnimos envรญen el formulario plomo web, su puntuaciรณn de plomo y el historial de actividades se migrarรกn a un ordinario {contact} rรฉcord.', 'Resolve hostnames into IP addresses. This incurs a slight penalty while performing DNS resolution, and it may be preferrable to disable hostname lookups for performance reasons.' => 'Resolver nombres de host en direcciones IP. Esto incurre en una leve pena mientras se realiza la resoluciรณn de DNS, y puede ser preferible desactivar las bรบsquedas por nombre por razones de rendimiento.', 'Email Delivery Complete' => 'Email completa Entrega', 'Could not send email due to an error in the request to the server.' => 'No se pudo enviar correo electrรณnico debido a un error en la peticiรณn al servidor.', 'You do no have permission to perform this action' => 'Usted no tiene permiso para realizar esta acciรณn', 'Generate Records' => 'Generar Registros', 'Select email templates to send to the new web lead and the {user} assigned to the web lead.' => 'Seleccione plantillas de correo electrรณnico para enviar a la nueva iniciativa web y el {user} asignado a la delantera web.', '{user} Email' => '{user} Email', 'Add Tags' => 'Adicionar etiquetas', 'These tags will be applied to any {contact} created by the form.' => 'Estas etiquetas se pueden aplicar a cualquier {contact} creado por la forma.', 'Additional Settings' => 'Ajustes adicionales', 'Style' => 'Estilo', 'Custom HTML cannot be added to the web form until it has been saved.' => 'Custom HTML no se puede aรฑadir a la forma de la tela hasta que se ha guardado.', 'HTML cannot be empty.' => 'HTML no puede estar vacรญo.', 'HTML saved' => 'Salvado HTML', 'HTML removed' => 'Eliminado HTML', 'Create a public form to receive new {module}.' => 'Crear una forma pรบblica para recibir nueva {module}', 'If no lead routing has been configured, all new {module} will be assigned to "Anyone".' => 'Si no hay enrutamiento plomo se ha configurado, todo nuevo {module} serรก asignado a &quot;Cualquiera&quot;.', 'The web form must be saved for custom fields to get included. Changes made to the custom fields will not be reflected in the preview until the web form is saved.' => 'La forma de la tela debe ser guardado para campos personalizados para quedar incluidos. Los cambios realizados en los campos personalizados no se reflejarรกn en la vista previa hasta que se guarda el formulario web.', 'The web form must be saved for your custom fields or custom HTML to get included. Changes made to the custom fields or custom HTML will not be reflected in the preview until the web form is saved.' => 'El formulario de la web se debe guardar para sus campos personalizados o HTML personalizado para ser incluidos. Los cambios realizados en los campos personalizados o HTML personalizado no se reflejarรกn en la vista previa hasta que se guarda el formulario web.', '{Contact} has clicked a link' => '{Contact} ha hecho clic en un enlace', '{contacts} for {account} Report ({number})' => '{contacts} para {account} Informe {number}', 'Mailing for {account} Report ({number})' => 'Mailing para {account} Informe {number}', 'Enable redirect links?' => 'Habilitar enlaces de redirecciรณn?', 'Test email sent successfully to {address}.' => 'Email de prueba enviado correctamente a {address}', 'Test email sent could not be sent to {address}.' => 'Email de prueba enviado no se pudo enviar a {address}', 'Welcome to the Campaign Creator!' => 'Bienvenido al Creador Campaรฑa!', 'With campaigns, you can send out personalized emails to contacts, and track when they have been opened. Let\'s walk through the creation of your first Campaign.' => 'Con campaรฑas, usted puede enviar correos electrรณnicos personalizados a los contactos, y realizar un seguimiento cuando se han abierto. Que \\ &#39;s paseo a travรฉs de la creaciรณn de su primera campaรฑa.', 'Insert the name of the campaign you will be running, for internal usage.' => 'Introduzca el nombre de la campaรฑa que va a correr, para uso interno.', 'Select a contact list to send this campaign to, or create one here.' => 'Seleccione una lista de contactos para enviar esta campaรฑa, o crear una aquรญ.', 'Next, choose a template as your email, or design one from scratch below.' => 'A continuaciรณn, elija una plantilla como su correo electrรณnico, o el diseรฑo desde cero abajo.', 'Enable Redirect Links will allow you to track when links have been clicked. Any link in the body of your email will be converted to a special tracking link.' => 'Habilitar redireccionamiento Enlaces permitirรก realizar un seguimiento cuando se han hecho clic en los enlaces. Cualquier enlace en el cuerpo de su correo electrรณnico se convertirรก en un enlace especial seguimiento.', 'Select a Contact List' => 'Seleccione una lista de contactos', 'Tracking Key' => 'Clave de seguimiento', 'Anonymous' => 'Anรณnimo', 'Plugins' => 'Plugins', 'User Agent' => 'Agente de usuario', 'Screen Resolution' => 'Resoluciรณn de la pantalla', 'Timezone Offset' => 'Zona horaria Offset', 'Cookies Enabled' => 'Cookies habilitadas', 'IndexedDB Supoprt' => 'IndexedDB Supoprt', 'IE addBehavior Support' => 'IE de la ayuda addBehavior', 'Java Enabled' => 'Java habilitado', 'HTML5 Canvas Fingerprint' => 'HTML5 Canvas huella digital', 'Local Storage Support' => 'Soporte de almacenamiento local', 'Session Storage Support' => 'Soporte de almacenamiento de sesiรณn', 'Fonts Checksum' => 'Fuentes Checksum', '<b>Note:</b> {contacts} can be tracked only if they filled out the web lead capture form, or clicked on a tracking link in an email campaign.' => '<b>Nota:</b> {contacts} se puede seguir sรณlo si llenan el formulario de captura de plomo web, o hace clic en un vรญnculo de seguimiento en una campaรฑa de correo electrรณnico.', 'Enable Fingerprinting' => 'Habilitar Huellas Digitales', 'Perform Hostname Lookups' => 'Realizar bรบsquedas de nombre de mรกquinas', 'Identity Threshold' => 'Umbral de Identidad', 'Max Anon Contacts' => 'Max Anon Contactos', 'Max Anon Actions' => 'Max Anon acciones', 'Generate HTML & Save' => 'Generar HTML y Guardar', 'Redirect URL' => 'Redireccionar URL', 'Enter a URL which the form will redirect to upon submission.' => 'Introduzca una URL que el formulario se redirigirรก a previa presentaciรณn.', '{recordType} has opened the email sent on ' => '{recordType} ha abierto el correo electrรณnico enviado en', 'When this is enabled, all links in the email template will be replaced with links that will track when they have been clicked.' => 'Cuando esta opciรณn estรก activada, todos los eslabones de la plantilla de correo electrรณnico serรกn reemplazados por enlaces que harรก un seguimiento cuando se han hecho clic.', 'The {contact} you enter here will be used for variable replacement, i.e. for "John Doe" the token {firstName} will get replaced with "John"' => 'El {contact} que introduzca aquรญ se utilizarรก para el reemplazo variables, es decir, para &quot;John Doe&quot; el token {firstName} lograrรก sustituido por &quot;John&quot;', 'Generate and export the web tracker JavaScript. This can be uploaded to your site in place of the standard web tracker embed code, which is useful when using tracking under SSL. Please note: the code that is generated is specific to your X2CRM installation.' => 'Generar y exportar la web tracker JavaScript. Esto puede ser subido a su sitio en lugar del cรณdigo web tracker de inserciรณn estรกndar, lo cual es รบtil cuando se utiliza el seguimiento de bajo SSL. Tenga en cuenta: el cรณdigo que se genera es especรญfica para su instalaciรณn X2CRM.', 'Open Rate' => 'Rango abierto', 'Click Rate' => 'Tarifa de clics', 'Unsubscribe Rate' => 'Anular la suscripciรณn', 'This campaign is ready to launch!' => 'Esta campaรฑa estรก lista para lanzar!', 'Opened At' => 'Abierto en', 'This will filter notifications for AnonContact web activity without affecting the total number of notifications. These can be reenabled at any time to reveal past web activity visits in your notifications.' => 'Esto filtrarรก las notificaciones de la actividad web de AnonContact sin afectar el nรบmero total de notificaciones. ร‰stos se pueden volver a habilitar en cualquier momento para revelar las visitas pasadas de actividad web en sus notificaciones.', 'Campaign is scheduled to launch at ' => 'La campaรฑa estรก programada para', 'Validate' => 'Validar', 'X2Identity duplicate detection' => 'Detecciรณn de duplicados X2Identity', 'Configure whether duplicate detection will be performed using the lead\'s fingerprint. This setting should be disabled for any form used from a single device to capture leads.' => 'Configure si la detecciรณn de duplicados se realizarรก utilizando la huella digital del plomo. Esta configuraciรณn debe desactivarse para cualquier forma utilizada desde un รบnico dispositivo para capturar cables.', 'Customize Thank You Text' => 'Modifique para requisitos particulares gracias el texto', 'Welcome to the Web Tracker configuration!' => 'ยกBienvenido a la configuraciรณn de Web Tracker!', 'The web tracker allows you to log visitor activity and interactions on your website. This page provides various configuration options to customize the behavior of your web tracker.' => 'El rastreador web le permite registrar la actividad de los visitantes y las interacciones en su sitio web. Esta pรกgina ofrece varias opciones de configuraciรณn para personalizar el comportamiento de su web tracker.', 'This text box provides the HTML code to embed on your website.' => 'Este cuadro de texto proporciona el cรณdigo HTML para incrustar en su sitio web.', 'You can instead export your web tracker JavaScript and upload it to your website. This is useful in certain cases to simplify web tracker setup under SSL.' => 'En su lugar, puede exportar su JavaScript de seguimiento web y subirlo a su sitio web. Esto es รบtil en ciertos casos para simplificar la configuraciรณn de seguimiento web en SSL.', 'Here you can configure the web tracker cooldown. A visitor will only have their web activity logged once within this cooldown period.' => 'Aquรญ puede configurar el tiempo de reutilizaciรณn del rastreador web. Un visitante sรณlo tendrรก su actividad web registrada una vez dentro de este perรญodo de tiempo de reutilizaciรณn.', 'This option allows you to configure whether geolocation will be performed for your visitors.' => 'Esta opciรณn le permite configurar si la geolocalizaciรณn se realizarรก para sus visitantes.', 'This option allows you to configure whether browser fingerprinting will be performed to attempt to match your visitor.' => 'Esta opciรณn le permite configurar si la huella digital del navegador se realizarรก para intentar igualar a su visitante.', 'You can also configure the minimum number of browser attributes required to match a visitor. The higher this value is, the more accurate your partial matches will be.' => 'Tambiรฉn puede configurar el nรบmero mรญnimo de atributos de navegador necesarios para que coincida con un visitante. Cuanto mรกs alto sea este valor, mรกs exactas serรกn sus coincidencias parciales.', 'Suppressed' => 'Suprimido', 'Suppression List' => 'Lista de supresiรณn', 'Suppression List:' => 'Lista de supresiรณn:', 'Skipping email sending to {address}. This contact is skipped as it is present in suppressed List.' => 'Omitir el envรญo de correo electrรณnico a {address}. Este contacto se omite ya que estรก presente en la lista suprimida.', 'Select a Suppression List' => 'Seleccione una lista de supresiรณn', 'New Suppression List' => 'Nueva lista de supresiรณn', 'Choose a Suppression list to avoid sending the campaign email, or create one here.' => 'Elija una lista de Supresiรณn para evitar enviar el correo electrรณnico de la campaรฑa, o cree uno aquรญ.', );
{ "pile_set_name": "Github" }
#Maintained by: RehabMan for: Laptop Patches #graphics_PNLF_haswell.txt # This patch enables the brightness slider in SysPrefs->Displays # and will also enable activation of sleep mode by closing # the laptop lid. # # This particular version is intended to be used with ACPIBacklight.kext # although it can also be used with AppleBacklight.kext, # provided AppleBacklight.kext is patched to recognize your # display or an injector is used to set brightness levels. # # See this thread for more information: # http://www.tonymacx86.com/hp-probook-mavericks/121031-native-brightness-working-without-blinkscreen-using-patched-applebacklight-kext.html # (also read any linked threads) # # Note: This patch should be applied to the DSDT or SSDT that defines # your integrated graphics device (always at _ADR 0x00020000) # # This patch works for both Haswell and Broadwell graphics # into_all device label PNLF remove_entry; into_all device name_adr 0x00020000 code_regex (OperationRegion\s\(RMPC,\sPCI_Config[^\}]*\}) remove_matched; into device name_adr 0x00020000 insert begin OperationRegion (RMPC, PCI_Config, 0x10, 4)\n Field (RMPC, AnyAcc, NoLock, Preserve)\n {\n BAR1,32,\n }\n Device (PNLF)\n {\n // normal PNLF declares (note some of this probably not necessary)\n Name (_ADR, Zero)\n Name (_HID, EisaId ("APP0002"))\n Name (_CID, "backlight")\n Name (_UID, 15)\n Name (_STA, 0x0B)\n //define hardware register access for brightness\n // lower nibble of BAR1 is status bits and not part of the address\n OperationRegion (BRIT, SystemMemory, And(^BAR1, Not(0xF)), 0xe1184)\n Field (BRIT, AnyAcc, Lock, Preserve)\n {\n Offset(0x48250),\n LEV2, 32,\n LEVL, 32,\n Offset(0x70040),\n P0BL, 32,\n Offset(0xc8250),\n LEVW, 32,\n LEVX, 32,\n Offset(0xe1180),\n PCHL, 32,\n }\n // LMAX: use 0xad9/0x56c/0x5db to force OS X value\n // or use any arbitrary value\n // or use 0 to capture BIOS setting\n Name (LMAX, 0xad9)\n // KMAX: defines the unscaled range in the _BCL table below\n Name (KMAX, 0xad9)\n // _INI deals with differences between native setting and desired\n Method (_INI, 0, NotSerialized)\n {\n // This 0xC value comes from looking what OS X initializes this\n // register to after display sleep (using ACPIDebug/ACPIPoller)\n Store(0xC0000000, LEVW)\n // determine LMAX to use\n If (LNot(LMAX)) { Store(ShiftRight(LEVX,16), LMAX) }\n If (LNot(LMAX)) { Store(KMAX, LMAX) }\n If (LNotEqual(LMAX, KMAX))\n {\n // Scale all the values in _BCL to the PWM max in use\n Store(0, Local0)\n While (LLess(Local0, SizeOf(_BCL)))\n {\n Store(DerefOf(Index(_BCL,Local0)), Local1)\n Divide(Multiply(Local1,LMAX), KMAX,, Local1)\n Store(Local1, Index(_BCL,Local0))\n Increment(Local0)\n }\n // Also scale XRGL and XRGH values\n Divide(Multiply(XRGL,LMAX), KMAX,, XRGL)\n Divide(Multiply(XRGH,LMAX), KMAX,, XRGH)\n }\n // adjust values to desired LMAX\n Store(ShiftRight(LEVX,16), Local1)\n If (LNotEqual(Local1, LMAX))\n {\n Store(And(LEVX,0xFFFF), Local0)\n If (LOr(LNot(Local0),LNot(Local1))) { Store(LMAX, Local0) Store(LMAX, Local1) }\n Divide(Multiply(Local0,LMAX), Local1,, Local0)\n //REVIEW: wait for vblank before setting new PWM config\n //Store(P0BL, Local7)\n //While (LEqual (P0BL, Local7)) {}\n Store(Or(Local0,ShiftLeft(LMAX,16)), LEVX)\n }\n }\n // _BCM/_BQC: set/get for brightness level\n Method (_BCM, 1, NotSerialized)\n {\n // store new backlight level\n Store(Match(_BCL, MGE, Arg0, MTR, 0, 2), Local0)\n If (LEqual(Local0, Ones)) { Subtract(SizeOf(_BCL), 1, Local0) }\n Store(Or(DerefOf(Index(_BCL,Local0)),ShiftLeft(LMAX,16)), LEVX)\n }\n Method (_BQC, 0, NotSerialized)\n {\n Store(Match(_BCL, MGE, And(LEVX, 0xFFFF), MTR, 0, 2), Local0)\n If (LEqual(Local0, Ones)) { Subtract(SizeOf(_BCL), 1, Local0) }\n Return(DerefOf(Index(_BCL, Local0)))\n }\n Method (_DOS, 1, NotSerialized)\n {\n // Note: Some systems have this defined in DSDT, so uncomment\n // the next line if that is the case.\n //External(^^_DOS, MethodObj)\n ^^_DOS(Arg0)\n }\n // extended _BCM/_BQC for setting "in between" levels\n Method (XBCM, 1, NotSerialized)\n {\n // store new backlight level\n If (LGreater(Arg0, XRGH)) { Store(XRGH, Arg0) }\n If (LAnd(Arg0, LLess(Arg0, XRGL))) { Store(XRGL, Arg0) }\n Store(Or(Arg0,ShiftLeft(LMAX,16)), LEVX)\n }\n Method (XBQC, 0, NotSerialized)\n {\n Store(And(LEVX,0xFFFF), Local0)\n If (LGreater(Local0, XRGH)) { Store(XRGH, Local0) }\n If (LAnd(Local0, LLess(Local0, XRGL))) { Store(XRGL, Local0) }\n Return(Local0)\n }\n // Set XOPT bit 0 to disable smooth transitions\n // Set XOPT bit 1 to wait for native BacklightHandler\n // Set XOPT bit 2 to force use of native BacklightHandler\n Name (XOPT, 0x02)\n // XRGL/XRGH: defines the valid range\n Name (XRGL, 25)\n Name (XRGH, 2777)\n // _BCL: returns list of valid brightness levels\n // first two entries describe ac/battery power levels\n Name (_BCL, Package()\n {\n 2777,\n 748,\n 0,\n 35, 39, 44, 50,\n 58, 67, 77, 88,\n 101, 115, 130, 147,\n 165, 184, 204, 226,\n 249, 273, 299, 326,\n 354, 383, 414, 446,\n 479, 514, 549, 587,\n 625, 665, 706, 748,\n 791, 836, 882, 930,\n 978, 1028, 1079, 1132,\n 1186, 1241, 1297, 1355,\n 1414, 1474, 1535, 1598,\n 1662, 1728, 1794, 1862,\n 1931, 2002, 2074, 2147,\n 2221, 2296, 2373, 2452,\n 2531, 2612, 2694, 2777,\n })\n }\n end;
{ "pile_set_name": "Github" }
function submit() addpath('./lib'); conf.assignmentSlug = 'logistic-regression'; conf.itemName = 'Logistic Regression'; conf.partArrays = { ... { ... '1', ... { 'sigmoid.m' }, ... 'Sigmoid Function', ... }, ... { ... '2', ... { 'costFunction.m' }, ... 'Logistic Regression Cost', ... }, ... { ... '3', ... { 'costFunction.m' }, ... 'Logistic Regression Gradient', ... }, ... { ... '4', ... { 'predict.m' }, ... 'Predict', ... }, ... { ... '5', ... { 'costFunctionReg.m' }, ... 'Regularized Logistic Regression Cost', ... }, ... { ... '6', ... { 'costFunctionReg.m' }, ... 'Regularized Logistic Regression Gradient', ... }, ... }; conf.output = @output; submitWithConfiguration(conf); end function out = output(partId, auxstring) % Random Test Cases X = [ones(20,1) (exp(1) * sin(1:1:20))' (exp(0.5) * cos(1:1:20))']; y = sin(X(:,1) + X(:,2)) > 0; if partId == '1' out = sprintf('%0.5f ', sigmoid(X)); elseif partId == '2' out = sprintf('%0.5f ', costFunction([0.25 0.5 -0.5]', X, y)); elseif partId == '3' [cost, grad] = costFunction([0.25 0.5 -0.5]', X, y); out = sprintf('%0.5f ', grad); elseif partId == '4' out = sprintf('%0.5f ', predict([0.25 0.5 -0.5]', X)); elseif partId == '5' out = sprintf('%0.5f ', costFunctionReg([0.25 0.5 -0.5]', X, y, 0.1)); elseif partId == '6' [cost, grad] = costFunctionReg([0.25 0.5 -0.5]', X, y, 0.1); out = sprintf('%0.5f ', grad); end end
{ "pile_set_name": "Github" }
{ "npc": "NPC", "npcAchievementName": "<%= key %> NPC", "npcAchievementText": "Backed the Kickstarter project at the maximum level!", "welcomeTo": "Welcome to", "welcomeBack": "Welcome back!", "justin": "Justin", "justinIntroMessage1": "Hello there! You must be new here. My name is <strong>Justin</strong>, and I'll be your guide in Habitica.", "justinIntroMessage3": "Great! Now, what are you interested in working on throughout this journey?", "justinIntroMessageUsername": "Before we begin, letโ€™s figure out what to call you. Below youโ€™ll find a display name and username Iโ€™ve generated for you. After youโ€™ve picked a display name and username, weโ€™ll get started by creating an avatar!", "justinIntroMessageAppearance": "So how would you like to look? Donโ€™t worry, you can change this later.", "introTour": "Here we are! I've filled out some Tasks for you based on your interests, so you can get started right away. Click a Task to edit or add new Tasks to fit your routine!", "prev": "Prev", "next": "Next", "randomize": "Randomize", "mattBoch": "Matt Boch", "mattBochText1": "Welcome to the Stable! I'm Matt, the beast master. Starting at level 3, you will find eggs and potions to hatch pets with. When you hatch a pet in the Market, it will appear here! Click a pet's image to add it to your avatar. Feed them with the food you find after level 3, and they'll grow into hardy mounts.", "welcomeToTavern": "Welcome to The Tavern!", "sleepDescription": "Need a break? Check into Daniel's Inn to pause some of Habitica's more difficult game mechanics:", "sleepBullet1": "Missed Dailies won't damage you", "sleepBullet2": "Tasks won't lose streaks or decay in color", "sleepBullet3": "Bosses won't do damage for your missed Dailies", "sleepBullet4": "Your boss damage or collection Quest items will stay pending until check-out", "pauseDailies": "Pause Damage", "unpauseDailies": "Unpause Damage", "staffAndModerators": "Staff and Moderators", "communityGuidelinesIntro": "Habitica tries to create a welcoming environment for users of all ages and backgrounds, especially in public spaces like the Tavern. If you have any questions, please consult our <a href='/static/community-guidelines' target='_blank'>Community Guidelines</a>.", "acceptCommunityGuidelines": "I agree to follow the Community Guidelines", "worldBossEvent": "World Boss Event", "worldBossDescription": "World Boss Description", "welcomeMarketMobile": "Welcome to the Market! Buy hard-to-find eggs and potions! Come see what we have to offer.", "howManyToSell": "How many would you like to sell?", "yourBalance": "Your balance", "sell": "Sell", "buyNow": "Buy Now", "sortByNumber": "Number", "featuredItems": "Featured Items!", "hideLocked": "Hide locked", "hidePinned": "Hide pinned", "hideMissing": "Hide Missing", "amountExperience": "<%= amount %> Experience", "amountGold": "<%= amount %> Gold", "namedHatchingPotion": "<%= type %> Hatching Potion", "buyGems": "Kjรธp edelsteinar", "purchaseGems": "Kjรธpte edelsteinar", "items": "Items", "AZ": "A-Z", "sort": "Sort", "sortBy": "Sort By", "groupBy2": "Group By", "sortByName": "Name", "quantity": "Quantity", "cost": "Cost", "shops": "Shops", "custom": "Custom", "wishlist": "Wishlist", "wrongItemType": "The item type \"<%= type %>\" is not valid.", "wrongItemPath": "The item path \"<%= path %>\" is not valid.", "unpinnedItem": "You unpinned <%= item %>! It will no longer display in your Rewards column.", "cannotUnpinArmoirPotion": "The Health Potion and Enchanted Armoire cannot be unpinned.", "purchasedItem": "You bought <%= itemName %>", "ianTextMobile": "Can I interest you in some quest scrolls? Activate them to battle monsters with your Party!", "featuredQuests": "Featured Quests!", "cannotBuyItem": "You can't buy this item.", "mustPurchaseToSet": "Must purchase <%= val %> to set it on <%= key %>.", "typeRequired": "Type is required", "positiveAmountRequired": "Positive amount is required", "notAccteptedType": "Type must be in [eggs, hatchingPotions, premiumHatchingPotions, food, quests, gear]", "contentKeyNotFound": "Key not found for Content <%= type %>", "plusGem": "+<%= count %> Gem", "typeNotSellable": "Type is not sellable. Must be one of the following <%= acceptedTypes %>", "userItemsKeyNotFound": "Key not found for user.items <%= type %>", "userItemsNotEnough": "You do not have enough <%= type %>", "pathRequired": "Path string is required", "unlocked": "Items have been unlocked", "alreadyUnlocked": "Full set already unlocked.", "alreadyUnlockedPart": "Full set already partially unlocked.", "invalidQuantity": "Quantity to purchase must be a number.", "USD": "(USD)", "newStuff": "New Stuff by Bailey", "newBaileyUpdate": "New Bailey Update!", "tellMeLater": "Tell Me Later", "dismissAlert": "Fjern varsel", "donateText3": "Habitica is an open source project that depends on our users for support. The money you spend on gems helps us keep the servers running, maintain a small staff, develop new features, and provide incentives for our volunteer programmers. Thank you for your generosity!", "card": "Credit Card (using Stripe)", "paymentMethods": "Purchase using", "paymentSuccessful": "Your payment was successful!", "paymentYouReceived": "You received:", "paymentYouSentGems": "You sent <strong><%- name %></strong>:", "paymentYouSentSubscription": "You sent <strong><%- name %></strong> a <%= months %>-months Habitica subscription.", "paymentSubBilling": "Your subscription will be billed <strong>$<%= amount %></strong> every <strong><%= months %> months</strong>.", "success": "Success!", "classGear": "Class Gear", "classGearText": "Congratulations on choosing a class! I've added your new basic weapon to your inventory. Take a look below to equip it!", "autoAllocate": "Auto Allocate", "spells": "Skills", "skillsTitle": "Skills", "toDo": "Gjeremรฅl", "tourStatsPage": "This is your Stats page! Earn achievements by completing the listed tasks.", "tourTavernPage": "Welcome to the Tavern, an all-ages chat room! You can keep your Dailies from hurting you in case of illness or travel by clicking \"Pause Damage\". Come say hi!", "tourPartyPage": "Your Party will help you stay accountable. Invite friends to unlock a Quest Scroll!", "tourGuildsPage": "Guilds are common-interest chat groups created by the players, for the players. Browse through the list and join the Guilds that interest you. Be sure to check out the popular Habitica Help: Ask a Question guild, where anyone can ask questions about Habitica!", "tourChallengesPage": "Challenges are themed task lists created by users! Joining a Challenge will add its tasks to your account. Compete against other users to win Gem prizes!", "tourMarketPage": "Starting at Level 4, eggs and hatching potions drop randomly when you complete tasks. They appear here - use them to hatch pets! You can also buy items from the Market.", "tourHallPage": "Welcome to the Hall of Heroes, where open-source contributors to Habitica are honored. Whether through code, art, music, writing, or even just helpfulness, they have earned Gems, exclusive equipment, and prestigious titles. You can contribute to Habitica, too!", "tourPetsPage": "This is the Stable! After reaching level 3, you will gather pet eggs and hatching potions as you complete tasks. When you hatch a pet in the Market, it will appear here! Click a pet's image to add it to your avatar. Feed them with the food you find after level 3, and they'll grow into powerful mounts.", "tourMountsPage": "Once you've fed a pet enough food to turn it into a mount, it will appear here. Click a mount to saddle up!", "tourEquipmentPage": "This is where your Equipment is stored! Your Battle Gear affects your Stats. If you want to show different Equipment on your avatar without changing your Stats, click \"Enable Costume.\"", "equipmentAlreadyOwned": "You already own that piece of equipment", "tourOkay": "Okay!", "tourAwesome": "Awesome!", "tourSplendid": "Splendid!", "welcomeToHabit": "Welcome to Habitica!", "welcome1": "Create a basic avatar.", "welcome1notes": "This avatar will represent you as you progress.", "welcome2": "Set up your tasks.", "welcome2notes": "How well you do on your real-life tasks will control how well you do in the game!", "welcome3": "Progress in life and the game!", "welcome3notes": "As you improve your life, your avatar will level up and unlock pets, quests, equipment, and more!", "imReady": "Enter Habitica", "limitedOffer": "Available until <%= date %>" }
{ "pile_set_name": "Github" }
#-- copyright # OpenProject is an open source project management software. # Copyright (C) 2012-2020 the OpenProject GmbH # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License version 3. # # OpenProject is a fork of ChiliProject, which is a fork of Redmine. The copyright follows: # Copyright (C) 2006-2017 Jean-Philippe Lang # Copyright (C) 2010-2013 the ChiliProject Team # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # See docs/COPYRIGHT.rdoc for more details. #++ module API module V3 module Queries module GroupBys class QueryGroupBysAPI < ::API::OpenProjectAPI resource :group_bys do helpers do def convert_to_ar(attribute) ::API::Utilities::WpPropertyNameConverter.to_ar_name(attribute) end end after_validation do authorize(:view_work_packages, global: true, user: current_user) end route_param :id, type: String, regexp: /\A\w+\z/, desc: 'Group by ID' do get do ar_id = convert_to_ar(params[:id]).to_sym column = Query.groupable_columns.detect { |candidate| candidate.name == ar_id } if column ::API::V3::Queries::GroupBys::QueryGroupByRepresenter.new(column) else raise API::Errors::NotFound end end end end end end end end end
{ "pile_set_name": "Github" }
/* http://prismjs.com/download.html?themes=prism&languages=markup+css+clike+javascript+aspnet+bash+c+csharp+cpp+coffeescript+css-extras+erlang+fortran+git+go+haskell+ini+jade+java+latex+less+makefile+markdown+matlab+objectivec+pascal+perl+php+php-extras+python+r+rest+ruby+rust+sass+scss+scala+scheme+smalltalk+smarty+sql+stylus+swift+twig+wiki+yaml&plugins=line-numbers+show-language */ /** * prism.js default theme for JavaScript, CSS and HTML * Based on dabblet (http://dabblet.com) * @author Lea Verou */ code[class*="language-"], pre[class*="language-"] { color: black; text-shadow: 0 1px white; font-family: Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace; direction: ltr; text-align: left; white-space: pre; word-spacing: normal; word-break: normal; line-height: 1.5; -moz-tab-size: 4; -o-tab-size: 4; tab-size: 4; -webkit-hyphens: none; -moz-hyphens: none; -ms-hyphens: none; hyphens: none; } pre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection, code[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection { text-shadow: none; background: #b3d4fc; } pre[class*="language-"]::selection, pre[class*="language-"] ::selection, code[class*="language-"]::selection, code[class*="language-"] ::selection { text-shadow: none; background: #b3d4fc; } @media print { code[class*="language-"], pre[class*="language-"] { text-shadow: none; } } /* Code blocks */ pre[class*="language-"] { padding: 1em; margin: .5em 0; overflow: auto; } :not(pre) > code[class*="language-"], pre[class*="language-"] { background: #f5f2f0; } /* Inline code */ :not(pre) > code[class*="language-"] { padding: .1em; border-radius: .3em; } .token.comment, .token.prolog, .token.doctype, .token.cdata { color: slategray; } .token.punctuation { color: #999; } .namespace { opacity: .7; } .token.property, .token.tag, .token.boolean, .token.number, .token.constant, .token.symbol, .token.deleted { color: #905; } .token.selector, .token.attr-name, .token.string, .token.char, .token.builtin, .token.inserted { color: #690; } .token.operator, .token.entity, .token.url, .language-css .token.string, .style .token.string { color: #a67f59; background: hsla(0, 0%, 100%, .5); } .token.atrule, .token.attr-value, .token.keyword { color: #07a; } .token.function { color: #DD4A68; } .token.regex, .token.important, .token.variable { color: #e90; } .token.important, .token.bold { font-weight: bold; } .token.italic { font-style: italic; } .token.entity { cursor: help; } pre.line-numbers { position: relative; padding-left: 3.8em; counter-reset: linenumber; } pre.line-numbers > code { position: relative; } .line-numbers .line-numbers-rows { position: absolute; pointer-events: none; top: 0; font-size: 100%; left: -3.8em; width: 3em; /* works for line-numbers below 1000 lines */ letter-spacing: -1px; border-right: 1px solid #999; -webkit-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } .line-numbers-rows > span { pointer-events: none; display: block; counter-increment: linenumber; } .line-numbers-rows > span:before { content: counter(linenumber); color: #999; display: block; padding-right: 0.8em; text-align: right; } pre[class*='language-'] { position: relative; } pre[class*='language-'][data-language]::before { content: attr(data-language); color: black; background-color: #CFCFCF; display: inline-block; position: absolute; top: 0; right: 0; font-size: 0.9em; border-radius: 0 0 0 5px; padding: 0 0.5em; text-shadow: none; }
{ "pile_set_name": "Github" }
python setup.py install rem del doc\source\generated\*.rst rem sphinx-autogen -o doc\source\generated doc\source\index.rst sphinx-build -b html doc\source doc\build
{ "pile_set_name": "Github" }
/* * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #ifndef RTC_TOOLS_SIMPLE_COMMAND_LINE_PARSER_H_ #define RTC_TOOLS_SIMPLE_COMMAND_LINE_PARSER_H_ #include <map> #include <string> #include <vector> #include "rtc_base/constructormagic.h" #include "rtc_base/gtest_prod_util.h" // This is a very basic command line parsing class. We pass the command line // arguments and their number and the class forms a vector out of these. Than we // should set up the flags - we provide a name and a string value and map these. // // Example use of this class: // 1. Create a CommandLineParser object. // 2. Configure the flags you want to support with SetFlag calls. // 3. Call Init with your program's argc+argv parameters. // 4. Parse the flags by calling ProcessFlags. // 5. Get the values of the flags using GetFlag. namespace webrtc { namespace test { class CommandLineParser { public: CommandLineParser(); ~CommandLineParser(); void Init(int argc, char** argv); // Prints the entered flags and their values (without --help). void PrintEnteredFlags(); // Processes the vector of command line arguments and puts the value of each // flag in the corresponding map entry for this flag's name. We don't process // flags which haven't been defined in the map. void ProcessFlags(); // Sets the usage message to be shown if we pass --help. void SetUsageMessage(std::string usage_message); // prints the usage message. void PrintUsageMessage(); // Set a flag into the map of flag names/values. // To set a boolean flag, use "false" as the default flag value. // The flag_name should not include the -- prefix. void SetFlag(std::string flag_name, std::string default_flag_value); // Gets a flag when provided a flag name (name is without the -- prefix). // Returns "" if the flag is unknown and "true"/"false" if the flag is a // boolean flag. std::string GetFlag(std::string flag_name); private: // The vector of passed command line arguments. std::vector<std::string> args_; // The map of the flag names/values. std::map<std::string, std::string> flags_; // The usage message. std::string usage_message_; // Returns whether the passed flag is standalone or not. By standalone we // understand e.g. --standalone (in contrast to --non_standalone=1). bool IsStandaloneFlag(std::string flag); // Checks whether the flag is in the format --flag_name=flag_value. // or just --flag_name. bool IsFlagWellFormed(std::string flag); // Extracts the flag name from the flag, i.e. return foo for --foo=bar. std::string GetCommandLineFlagName(std::string flag); // Extracts the flag value from the flag, i.e. return bar for --foo=bar. // If the flag has no value (i.e. no equals sign) an empty string is returned. std::string GetCommandLineFlagValue(std::string flag); FRIEND_TEST_ALL_PREFIXES(CommandLineParserTest, IsStandaloneFlag); FRIEND_TEST_ALL_PREFIXES(CommandLineParserTest, IsFlagWellFormed); FRIEND_TEST_ALL_PREFIXES(CommandLineParserTest, GetCommandLineFlagName); FRIEND_TEST_ALL_PREFIXES(CommandLineParserTest, GetCommandLineFlagValue); RTC_DISALLOW_COPY_AND_ASSIGN(CommandLineParser); }; } // namespace test } // namespace webrtc #endif // RTC_TOOLS_SIMPLE_COMMAND_LINE_PARSER_H_
{ "pile_set_name": "Github" }
#!/bin/sh /etc/rc.common # Copyright (C) 2006-2011 OpenWrt.org START=10 STOP=98 uci_apply_defaults() { . /lib/functions/system.sh cd /etc/uci-defaults || return 0 files="$(ls)" [ -z "$files" ] && return 0 mkdir -p /tmp/.uci for file in $files; do ( . "./$(basename $file)" ) && rm -f "$file" done uci commit } boot() { [ -f /proc/mounts ] || /sbin/mount_root [ -f /proc/jffs2_bbc ] && echo "S" > /proc/jffs2_bbc [ -f /proc/net/vlan/config ] && vconfig set_name_type DEV_PLUS_VID_NO_PAD mkdir -p /var/run mkdir -p /var/log mkdir -p /var/lock mkdir -p /var/state mkdir -p /var/tmp mkdir -p /tmp/.uci chmod 0700 /tmp/.uci touch /var/log/wtmp touch /var/log/lastlog touch /tmp/resolv.conf.auto ln -sf /tmp/resolv.conf.auto /tmp/resolv.conf grep -q debugfs /proc/filesystems && /bin/mount -o noatime -t debugfs debugfs /sys/kernel/debug [ "$FAILSAFE" = "true" ] && touch /tmp/.failsafe /sbin/kmodloader # allow wifi modules time to settle sleep 1 /sbin/wifi detect > /tmp/wireless.tmp [ -s /tmp/wireless.tmp ] && { cat /tmp/wireless.tmp >> /etc/config/wireless } rm -f /tmp/wireless.tmp /bin/board_detect uci_apply_defaults # temporary hack until configd exists /sbin/reload_config # create /dev/root if it doesn't exist [ -e /dev/root -o -h /dev/root ] || { rootdev=$(awk 'BEGIN { RS=" "; FS="="; } $1 == "root" { print $2 }' < /proc/cmdline) [ -n "$rootdev" ] && ln -s "$rootdev" /dev/root } }
{ "pile_set_name": "Github" }
๏ปฟ/* Copyright (c) 2003-2014, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or http://ckeditor.com/license */ CKEDITOR.plugins.setLang("uicolor","ko",{title:"UI ์ƒ‰์ƒ ์„ ํƒ๊ธฐ",preview:"๋ฏธ๋ฆฌ๋ณด๊ธฐ",config:"์ด ๋ฌธ์ž์—ด์„ config.js ์— ๋ถ™์—ฌ๋„ฃ์œผ์„ธ์š”",predefined:"๋ฏธ๋ฆฌ ์ •์˜๋œ ์ƒ‰๊น”๋“ค"});
{ "pile_set_name": "Github" }
<?php /** * Copyright ยฉ Magento, Inc. All rights reserved. * See COPYING.txt for license details. */ namespace Magento\Framework\View\Design\Fallback\Rule; use Magento\Framework\ObjectManagerInterface; class SimpleFactory { /** * @var ObjectManagerInterface */ private $objectManager; /** * @param ObjectManagerInterface $objectManager */ public function __construct(ObjectManagerInterface $objectManager) { $this->objectManager = $objectManager; } /** * Create rule instance * * @param array $data * @return \Magento\Framework\View\Design\Fallback\Rule\Simple */ public function create(array $data = []) { return $this->objectManager->create(\Magento\Framework\View\Design\Fallback\Rule\Simple::class, $data); } }
{ "pile_set_name": "Github" }
//- bemto //- Copyright(c) 2012 Roman Komarov <kizu@kizu.ru> //- MIT Licensed //- Some global variables - var bemto_chain = [] - var bemto_chain_contexts = ['block'] //- Block mixin b(options) - var settings = get_bemto_settings() //- TODO: should we make it generic way for other settings too? if options && options.prefix !== undefined - settings.prefix = options.prefix //- Rewriting the class for elements and modifiers - var tag = options && options.tag || ( typeof options == 'string' ? options : '') - var isElement = options && options.isElement - var tagMetadata = options && options.metadata - var block_sets_context = false if attributes.class //- Creating and normalizing bemto classes - var bemto_classes = attributes.class if bemto_classes instanceof Array - bemto_classes = bemto_classes.join(' ') - bemto_classes = bemto_classes.split(' ') - var bemto_objects = [] - var is_first_object = true - var new_context = [] each klass, i in bemto_classes - var bemto_object = {} - var prev_object = bemto_objects[bemto_objects.length - 1] - var sets_context = false //- Catching the optional tag class if klass.match(/^[A-Z-]+[A-Z0-9-]?$/) - tag = klass.toLowerCase() - continue //- Use block as a context for the first class if we're at element if is_first_object && isElement - bemto_object['context'] = bemto_chain[bemto_chain.length - 1] //- If the class is a modifier, add it to the previous object - var modifier_class = klass.match(new RegExp('^(?!' + settings['element'] + '[A-Za-z0-9])' + settings['modifier'] + "(.+)$")) //- FIXME: `+b._mod._mod` โ€” raw classes should be treated as raw ones if modifier_class && prev_object && prev_object.name if !prev_object['modifiers'] - prev_object['modifiers'] = [] - prev_object.modifiers.push(modifier_class[1]) - continue //- Use block as a context for the following classes if we have element delimiter at the start - var element_class = klass.match(new RegExp('^(?!' + settings['modifier'] + '[A-Za-z0-9])' + settings['element'] + "(.+)$")) if element_class - bemto_object['context'] = bemto_chain[bemto_chain.length - 1] - klass = element_class[1] //- Set custom context for nested items - var name_with_context = klass.match(new RegExp('^(.*[A-Za-z0-9])(?!' + settings['modifier'] + "$)" + settings['element'] + "$")) if name_with_context - klass = name_with_context[1] - bemto_object['is_context'] = true - sets_context = true - block_sets_context = true - isElement = false //- Apply the modifier from the classname if exist - var name_with_modifier = klass.match(new RegExp('^(.*?[A-Za-z0-9])(?!' + settings['element'] + '[A-Za-z0-9])' + settings['modifier'] + "(.+)$")) if name_with_modifier - klass = name_with_modifier[1] if !bemto_object['modifiers'] - bemto_object['modifiers'] = [] - bemto_object.modifiers.push(name_with_modifier[2]) - var found_prefix = '' - var prefix_regex_string = '()?' if settings.prefix - var prefix = settings.prefix if typeof prefix === 'string' - prefix = { '': prefix } - var prefix_regex_test = [] if prefix instanceof Object each value, key in prefix if typeof key === 'string' && key != '' && prefix_regex_test.indexOf(key) == -1 - prefix_regex_test.push(key) if typeof value === 'string' && value != '' && prefix_regex_test.indexOf(value) == -1 - prefix_regex_test.push(value) - prefix_regex_string = '(' + prefix_regex_test.join('|') + ')?' - var name_with_prefix = klass.match(new RegExp('^' + prefix_regex_string + "([A-Za-z0-9]+.*)$")) if name_with_prefix - klass = name_with_prefix[2] - found_prefix = name_with_prefix[1] || '' - found_prefix = prefix[found_prefix] if found_prefix === undefined || found_prefix === true - found_prefix = name_with_prefix[1] - bemto_object['prefix'] = (found_prefix || '').replace(/\-/g, '%DASH%').replace(/\_/g, '%UNDERSCORE%') if sets_context && klass.match(/^[a-zA-Z0-9]+.*/) - new_context.push(bemto_object.context ? (bemto_object.context + settings['element'] + klass) : (bemto_object.prefix + klass)) - bemto_object['name'] = klass - is_first_object = false if bemto_object.context && bemto_object.context.length > 1 each subcontext, i in bemto_object.context - var sub_object = clone(bemto_object) - sub_object['context'] = [subcontext] - bemto_objects.push(sub_object) else - bemto_objects.push(bemto_object) //- If no custom context is set, use the first proper object if !isElement && !new_context.length && bemto_objects[0] && bemto_objects[0].name && bemto_objects[0].name.match(/^[a-zA-Z0-9]+.*/) - bemto_objects[0]['is_context'] = true - new_context.push(bemto_objects[0].context ? (bemto_objects[0].context + settings['element'] + bemto_objects[0].name) : (bemto_objects[0].prefix + bemto_objects[0].name)) - block_sets_context = true if new_context.length //- Use only the block's name for context if we're at strict setting if settings.flat_elements each subcontext, i in new_context - var context_with_element = subcontext.match(new RegExp('^(.*?[A-Za-z0-9])(?!' + settings['modifier'] + '[A-Za-z0-9])' + settings['element'] + ".+$")) if context_with_element - new_context[i] = context_with_element[1] - bemto_chain[bemto_chain.length] = new_context //- Rendering the classes if bemto_objects.length - var new_classes = [] each bemto_object in bemto_objects if bemto_object.name - var start = bemto_object.prefix if bemto_object.context - start = bemto_object.context + settings.output_element - new_classes.push(start + bemto_object.name) if bemto_object.modifiers each modifier in bemto_object.modifiers - new_classes.push(start + bemto_object.name + settings.output_modifier + modifier) - var delimiter = settings.class_delimiter - delimiter = delimiter ? (' ' + delimiter + ' ') : ' ' - attributes.class = new_classes.join(delimiter).replace(/%DASH%/g, '-').replace(/%UNDERSCORE%/g, '_') else - attributes.class = undefined if block +bemto_tag(tag, tagMetadata)&attributes(attributes) block else +bemto_tag(tag, tagMetadata)&attributes(attributes) //- Closing actions (remove the current block from the chain) if !isElement && block_sets_context - bemto_chain = bemto_chain.splice(0,bemto_chain.length-1) - bemto_chain_contexts = bemto_chain_contexts.splice(0,bemto_chain_contexts.length-1) //- Element mixin e(options) if options && typeof options == 'string' - options = { 'tag': options } else - options = options || {} - options['isElement'] = true +b(options)&attributes(attributes): block
{ "pile_set_name": "Github" }
const foo = fetch('foo');
{ "pile_set_name": "Github" }
{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Lab 02: Training with epochs -- exercise" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# For Google Colaboratory\n", "import sys, os\n", "if 'google.colab' in sys.modules:\n", " from google.colab import drive\n", " drive.mount('/content/gdrive')\n", " file_name = 'epoch_exercise.ipynb'\n", " import subprocess\n", " path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode(\"utf-8\")\n", " print(path_to_file)\n", " path_to_file = path_to_file.replace(file_name,\"\").replace('\\n',\"\")\n", " os.chdir(path_to_file)\n", " !pwd" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import torch.optim as optim\n", "from random import randint\n", "import utils" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Download the data and print the sizes" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from utils import check_mnist_dataset_exists\n", "data_path=check_mnist_dataset_exists()\n", "\n", "train_data=torch.load(data_path+'mnist/train_data.pt')\n", "train_label=torch.load(data_path+'mnist/train_label.pt')\n", "test_data=torch.load(data_path+'mnist/test_data.pt')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Make a ONE layer net class. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class one_layer_net(nn.Module):\n", "\n", " def __init__(self, input_size, output_size):\n", " super(one_layer_net , self).__init__()\n", " self.linear_layer = nn.Linear( input_size, output_size , bias=False)\n", " \n", " def forward(self, x):\n", " scores = self.linear_layer(x)\n", " return scores" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Build the net" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "net=one_layer_net(784,10)\n", "print(net)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Choose the criterion, optimizer, batchsize, learning rate" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "criterion = nn.CrossEntropyLoss()\n", "\n", "optimizer=torch.optim.SGD( net.parameters() , lr=0.01 )\n", "\n", "bs=50" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# You only have stuff to do in this cell" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Do 15 passes through the training set" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "for # COMPLETE\n", " \n", " # COMPLETE\n", " \n", " for # COMPLETE\n", " \n", " optimizer.zero_grad()\n", " \n", " # COMPLETE\n", " # COMPLETE\n", " # COMPLETE\n", "\n", " inputs = minibatch_data.view(bs,784)\n", " \n", " inputs.requires_grad_()\n", "\n", " scores=net( inputs ) \n", "\n", " loss = criterion( scores , minibatch_label) \n", " \n", " loss.backward()\n", "\n", " optimizer.step()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Choose image at random from the test set and see how good/bad are the predictions" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# choose a picture at random\n", "idx=randint(0, 10000-1)\n", "im=test_data[idx]\n", "\n", "# diplay the picture\n", "utils.show(im)\n", "\n", "# feed it to the net and display the confidence scores\n", "scores = net( im.view(1,784)) \n", "probs= F.softmax(scores, dim=1)\n", "utils.show_prob_mnist(probs)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.8" } }, "nbformat": 4, "nbformat_minor": 2 }
{ "pile_set_name": "Github" }
// Copyright (c) 2015-2020 Hartmut Kaiser // Copyright (c) 2015-2016 Thomas Heller // // SPDX-License-Identifier: BSL-1.0 // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #pragma once #include <hpx/assert.hpp> #include <hpx/modules/datastructures.hpp> #include <hpx/modules/futures.hpp> #include <hpx/serialization/detail/extra_archive_data.hpp> #include <hpx/synchronization/spinlock.hpp> #include <cstddef> #include <mutex> #include <type_traits> #include <utility> //////////////////////////////////////////////////////////////////////////////// namespace hpx { namespace serialization { namespace detail { // This class allows to register futures during serialization preprocessing // to ensure each future is ready before serializing it. class preprocess_futures { using mutex_type = hpx::lcos::local::spinlock; public: preprocess_futures() : mtx_() , done_(false) , num_futures_(0) , triggered_futures_(0) { } preprocess_futures(preprocess_futures&& rhs) noexcept : mtx_() , done_(rhs.done_) , num_futures_(rhs.num_futures_) , triggered_futures_(rhs.triggered_futures_) , promise_(std::move(rhs.promise_)) { rhs.done_ = true; rhs.num_futures_ = 0; rhs.triggered_futures_ = 0; } ~preprocess_futures() { HPX_ASSERT(done_); HPX_ASSERT(num_futures_ == 0); HPX_ASSERT(num_futures_ == triggered_futures_); } preprocess_futures& operator=(preprocess_futures&& rhs) noexcept { done_ = rhs.done_; num_futures_ = rhs.num_futures_; triggered_futures_ = rhs.triggered_futures_; promise_ = std::move(rhs.promise_); rhs.done_ = true; rhs.num_futures_ = 0; rhs.triggered_futures_ = 0; return *this; } void trigger() { // hpx::lcos::local::promise<>::set_value() might need to acquire // a lock, as such, we check the our triggering condition inside a // critical section and trigger the promise outside of it. bool set_value = false; { std::lock_guard<mutex_type> l(mtx_); ++triggered_futures_; // trigger the promise only after the whole serialization // operation is done and all futures have become ready set_value = (done_ && num_futures_ == triggered_futures_); } if (set_value) { promise_.set_value(); } } // This is called during serialization of futures. It keeps track of // the number of futures encountered. It also attaches a continuation to // all futures which triggers this object and eventually invokes the // parcel send operation. void await_future( hpx::lcos::detail::future_data_refcnt_base& future_data, bool increment_count = true) { { std::lock_guard<mutex_type> l(mtx_); if (num_futures_ == 0) { done_ = false; } if (increment_count) { ++num_futures_; } } future_data.set_on_completed([this]() { this->trigger(); }); } void increment_future_count() { std::lock_guard<mutex_type> l(mtx_); if (num_futures_ == 0) { done_ = false; } ++num_futures_; } void reset() { std::lock_guard<mutex_type> l(mtx_); done_ = true; num_futures_ = 0; triggered_futures_ = 0; promise_ = hpx::lcos::local::promise<void>(); } bool has_futures() const { std::lock_guard<mutex_type> l(mtx_); return num_futures_ > 0; } // This is called after the full serialization of a parcel. It attaches // the supplied function to be invoked as soon as all encountered // futures have become ready. template <typename F> void operator()(F f) { bool set_promise = false; hpx::future<void> fut = promise_.get_future(); { std::lock_guard<mutex_type> l(mtx_); // trigger promise if all futures seen during serialization // have been made ready by now done_ = true; if (num_futures_ == triggered_futures_) { set_promise = true; } } if (set_promise) { promise_.set_value(); } // we don't call f directly to avoid possible stack overflow. auto& shared_state_ = hpx::traits::future_access<hpx::future<void>>::get_shared_state( fut); shared_state_->set_on_completed([this, f = std::move(f)]() { reset(); f(); // this invokes the next round of the fixed-point // iteration }); } private: mutable mutex_type mtx_; bool done_; std::size_t num_futures_; std::size_t triggered_futures_; hpx::lcos::local::promise<void> promise_; }; // This is explicitly instantiated to ensure that the id is stable across // shared libraries. MSVC and gcc/clang require different handling of // exported explicitly instantiated templates. #if defined(HPX_MSVC) extern template struct extra_archive_data_id_helper<preprocess_futures>; #else extern template struct HPX_PARALLELISM_EXPORT extra_archive_data_id_helper<preprocess_futures>; #endif }}} // namespace hpx::serialization::detail
{ "pile_set_name": "Github" }
## ### --------------- ### ### Create: Jianming Zeng ### Date: 2018-12-20 15:43:52 ### Email: jmzeng1314@163.com ### Blog: http://www.bio-info-trainee.com/ ### Forum: http://www.biotrainee.com/thread-1376-1-1.html ### CAFS/SUSTC/Eli Lilly/University of Macau ### Update Log: 2018-12-20 First version ### Update Log: 2019-09-10 ๅŸบไบŽR version 3.5.1 (2018-07-02) ### --------------- rm(list = ls()) ## ้ญ”ๅนปๆ“ไฝœ๏ผŒไธ€้”ฎๆธ…็ฉบ~ options(stringsAsFactors = F) load(file = 'step1-output.Rdata') table(group_list) # ๆฏๆฌก้ƒฝ่ฆๆฃ€ๆต‹ๆ•ฐๆฎ dat[1:4,1:4] ## ไธ‹้ขๆ˜ฏ็”ปPCA็š„ๅฟ…้กปๆ“ไฝœ๏ผŒ้œ€่ฆ็œ‹่ฏดๆ˜Žไนฆใ€‚ dat=t(dat)#็”ปPCAๅ›พๆ—ถ่ฆๆฑ‚ๆ˜ฏ่กŒๅๆ—ถๆ ทๆœฌๅ๏ผŒๅˆ—ๅๆ—ถๆŽข้’ˆๅ๏ผŒๅ› ๆญคๆญคๆ—ถ้œ€่ฆ่ฝฌๆข dat=as.data.frame(dat)#ๅฐ†matrix่ฝฌๆขไธบdata.frame dat=cbind(dat,group_list) #cbindๆจชๅ‘่ฟฝๅŠ ๏ผŒๅณๅฐ†ๅˆ†็ป„ไฟกๆฏ่ฟฝๅŠ ๅˆฐๆœ€ๅŽไธ€ๅˆ— library("FactoMineR")#็”ปไธปๆˆๅˆ†ๅˆ†ๆžๅ›พ้œ€่ฆๅŠ ่ฝฝ่ฟ™ไธคไธชๅŒ… library("factoextra") # The variable group_list (index = 54676) is removed # before PCA analysis dat.pca <- PCA(dat[,-ncol(dat)], graph = FALSE)#็Žฐๅœจdatๆœ€ๅŽไธ€ๅˆ—ๆ˜ฏgroup_list๏ผŒ้œ€่ฆ้‡ๆ–ฐ่ต‹ๅ€ผ็ป™ไธ€ไธชdat.pca,่ฟ™ไธช็Ÿฉ้˜ตๆ˜ฏไธๅซๆœ‰ๅˆ†็ป„ไฟกๆฏ็š„ fviz_pca_ind(dat.pca, geom.ind = "point", # show points only (nbut not "text") col.ind = dat$group_list, # color by groups # palette = c("#00AFBB", "#E7B800"), addEllipses = TRUE, # Concentration ellipses legend.title = "Groups" ) ggsave('all_samples_PCA.png') rm(list = ls()) ## ้ญ”ๅนปๆ“ไฝœ๏ผŒไธ€้”ฎๆธ…็ฉบ~ load(file = 'step1-output.Rdata') #ๆญคๆญฅไธบไธ€ไธชๅฐๆ’ๆ›ฒ๏ผŒๅณ่ฎก็ฎ—ไธ€ไธ‹ไปŽ็ฌฌไธ€่กŒๅผ€ๆ˜ฏ่ฎก็ฎ—ๆฏไธ€่กŒ็š„sdๅ€ผ๏ผŒ็Ÿฅ้“ๆœ€ๅŽไธ€่กŒๆ‰€้œ€่ฆ็š„ๆ—ถ้—ด dat[1:4,1:4] cg=names(tail(sort(apply(dat,1,sd)),1000))#applyๆŒ‰่กŒ๏ผˆ'1'ๆ˜ฏๆŒ‰่กŒๅ–๏ผŒ'2'ๆ˜ฏๆŒ‰ๅˆ—ๅ–๏ผ‰ๅ–ๆฏไธ€่กŒ็š„ๆ–นๅทฎ๏ผŒไปŽๅฐๅˆฐๅคงๆŽ’ๅบ๏ผŒๅ–ๆœ€ๅคง็š„1000ไธช library(pheatmap) pheatmap(dat[cg,],show_colnames =F,show_rownames = F) #ๅฏน้‚ฃไบ›ๆๅ–ๅ‡บๆฅ็š„1000ไธชๅŸบๅ› ๆ‰€ๅœจ็š„ๆฏไธ€่กŒๅ–ๅ‡บ๏ผŒ็ป„ๅˆ่ตทๆฅไธบไธ€ไธชๆ–ฐ็š„่กจ่พพ็Ÿฉ้˜ต n=t(scale(t(dat[cg,]))) # 'scale'ๅฏไปฅๅฏนlog-ratioๆ•ฐๅ€ผ่ฟ›่กŒๅฝ’ไธ€ๅŒ– n[n>2]=2 n[n< -2]= -2 n[1:4,1:4] pheatmap(n,show_colnames =F,show_rownames = F) ac=data.frame(g=group_list) rownames(ac)=colnames(n) #ๆŠŠac็š„่กŒๅ็ป™ๅˆฐn็š„ๅˆ—ๅ๏ผŒๅณๅฏนๆฏไธ€ไธชๆŽข้’ˆๆ ‡่ฎฐไธŠๅˆ†็ป„ไฟกๆฏ๏ผˆๆ˜ฏ'noTNBC'่ฟ˜ๆ˜ฏ'TNBC'๏ผ‰ ## ๅฏไปฅ็œ‹ๅˆฐTNBCๅ…ทๆœ‰ไธ€ๅฎš็š„ๅผ‚่ดจๆ€ง๏ผŒๆ‹ฟๅฎƒๆฅๅŒบๅˆ†ไนณ่…บ็™Œไบšๅž‹ๆŒ‡ๅฏผไธดๅบŠๆฒป็–—่ฟ˜ๆ˜ฏ็•ฅๆ˜พ็ฒ—็ณ™ใ€‚ pheatmap(n,show_colnames =F,show_rownames = F, annotation_col=ac,filename = 'heatmap_top1000_sd.png')
{ "pile_set_name": "Github" }
/* this file is for functions for field arithmetic */ #include "gf.h" #include "params.h" /* check if a == 0 */ gf PQCLEAN_MCELIECE460896F_AVX_gf_iszero(gf a) { uint32_t t = a; t -= 1; t >>= 19; return (gf) t; } /* field multiplication */ gf PQCLEAN_MCELIECE460896F_AVX_gf_mul(gf in0, gf in1) { int i; uint64_t tmp; uint64_t t0; uint64_t t1; uint64_t t; t0 = in0; t1 = in1; tmp = t0 * (t1 & 1); for (i = 1; i < GFBITS; i++) { tmp ^= (t0 * (t1 & ((uint64_t)1 << i))); } // t = tmp & 0x1FF0000; tmp ^= (t >> 9) ^ (t >> 10) ^ (t >> 12) ^ (t >> 13); t = tmp & 0x000E000; tmp ^= (t >> 9) ^ (t >> 10) ^ (t >> 12) ^ (t >> 13); return tmp & GFMASK; } /* square twice */ static inline gf gf_sq2(gf in) { int i; const uint64_t B[] = {0x1111111111111111, 0x0303030303030303, 0x000F000F000F000F, 0x000000FF000000FF }; const uint64_t M[] = {0x0001FF0000000000, 0x000000FF80000000, 0x000000007FC00000, 0x00000000003FE000 }; uint64_t x = in; uint64_t t; x = (x | (x << 24)) & B[3]; x = (x | (x << 12)) & B[2]; x = (x | (x << 6)) & B[1]; x = (x | (x << 3)) & B[0]; for (i = 0; i < 4; i++) { t = x & M[i]; x ^= (t >> 9) ^ (t >> 10) ^ (t >> 12) ^ (t >> 13); } return x & GFMASK; } /* square and multiply */ static inline gf gf_sqmul(gf in, gf m) { int i; uint64_t x; uint64_t t0; uint64_t t1; uint64_t t; const uint64_t M[] = {0x0000001FF0000000, 0x000000000FF80000, 0x000000000007E000 }; t0 = in; t1 = m; x = (t1 << 6) * (t0 & (1 << 6)); t0 ^= (t0 << 7); x ^= (t1 * (t0 & (0x04001))); x ^= (t1 * (t0 & (0x08002))) << 1; x ^= (t1 * (t0 & (0x10004))) << 2; x ^= (t1 * (t0 & (0x20008))) << 3; x ^= (t1 * (t0 & (0x40010))) << 4; x ^= (t1 * (t0 & (0x80020))) << 5; for (i = 0; i < 3; i++) { t = x & M[i]; x ^= (t >> 9) ^ (t >> 10) ^ (t >> 12) ^ (t >> 13); } return x & GFMASK; } /* square twice and multiply */ static inline gf gf_sq2mul(gf in, gf m) { int i; uint64_t x; uint64_t t0; uint64_t t1; uint64_t t; const uint64_t M[] = {0x1FF0000000000000, 0x000FF80000000000, 0x000007FC00000000, 0x00000003FE000000, 0x0000000001FE0000, 0x000000000001E000 }; t0 = in; t1 = m; x = (t1 << 18) * (t0 & (1 << 6)); t0 ^= (t0 << 21); x ^= (t1 * (t0 & (0x010000001))); x ^= (t1 * (t0 & (0x020000002))) << 3; x ^= (t1 * (t0 & (0x040000004))) << 6; x ^= (t1 * (t0 & (0x080000008))) << 9; x ^= (t1 * (t0 & (0x100000010))) << 12; x ^= (t1 * (t0 & (0x200000020))) << 15; for (i = 0; i < 6; i++) { t = x & M[i]; x ^= (t >> 9) ^ (t >> 10) ^ (t >> 12) ^ (t >> 13); } return x & GFMASK; } /* return num/den */ gf PQCLEAN_MCELIECE460896F_AVX_gf_frac(gf den, gf num) { gf tmp_11; gf tmp_1111; gf out; tmp_11 = gf_sqmul(den, den); // ^11 tmp_1111 = gf_sq2mul(tmp_11, tmp_11); // ^1111 out = gf_sq2(tmp_1111); out = gf_sq2mul(out, tmp_1111); // ^11111111 out = gf_sq2(out); out = gf_sq2mul(out, tmp_1111); // ^111111111111 return gf_sqmul(out, num); // ^1111111111110 = ^-1 } /* return 1/den */ gf PQCLEAN_MCELIECE460896F_AVX_gf_inv(gf in) { return PQCLEAN_MCELIECE460896F_AVX_gf_frac(in, ((gf) 1)); } /* multiplication in GF((2^m)^t) */ void PQCLEAN_MCELIECE460896F_AVX_GF_mul(gf *out, const gf *in0, const gf *in1) { int i, j; gf prod[191]; for (i = 0; i < 191; i++) { prod[i] = 0; } for (i = 0; i < 96; i++) { for (j = 0; j < 96; j++) { prod[i + j] ^= PQCLEAN_MCELIECE460896F_AVX_gf_mul(in0[i], in1[j]); } } // for (i = 190; i >= 96; i--) { prod[i - 85] ^= PQCLEAN_MCELIECE460896F_AVX_gf_mul(prod[i], (gf) 714); prod[i - 91] ^= PQCLEAN_MCELIECE460896F_AVX_gf_mul(prod[i], (gf) 5296); prod[i - 92] ^= PQCLEAN_MCELIECE460896F_AVX_gf_mul(prod[i], (gf) 728); prod[i - 96] ^= PQCLEAN_MCELIECE460896F_AVX_gf_mul(prod[i], (gf) 5881); } for (i = 0; i < 96; i++) { out[i] = prod[i]; } }
{ "pile_set_name": "Github" }
<%@ page import="docker.registry.web.Setting" %> <!DOCTYPE html> <html> <head> <meta name="layout" content="main"> <g:set var="entityName" value="${message(code: 'setting.label', default: 'Setting')}" /> <title><g:message code="default.list.label" args="[entityName]" /></title> </head> <body> <a href="#list-setting" class="skip" tabindex="-1"><g:message code="default.link.skip.label" default="Skip to content&hellip;"/></a> <div class="nav" role="navigation"> <ul> <li><a class="home" href="${createLink(uri: '/')}"><g:message code="default.home.label"/></a></li> <li><g:link class="create" action="create"><g:message code="default.new.label" args="[entityName]" /></g:link></li> </ul> </div> <div id="list-setting" class="content scaffold-list" role="main"> <h1><g:message code="default.list.label" args="[entityName]" /></h1> <g:if test="${flash.message}"> <div class="message" role="status">${flash.message}</div> </g:if> <table> <thead> <tr> </tr> </thead> <tbody> <g:each in="${settingInstanceList}" status="i" var="settingInstance"> <tr class="${(i % 2) == 0 ? 'even' : 'odd'}"> </tr> </g:each> </tbody> </table> <div class="pagination"> <g:paginate total="${settingInstanceCount ?: 0}" /> </div> </div> </body> </html>
{ "pile_set_name": "Github" }
activate-noawait ldconfig
{ "pile_set_name": "Github" }
{ "assets" : [ { "filename" : "App Icon - App Store.imagestack", "idiom" : "tv", "role" : "primary-app-icon", "size" : "1280x768" }, { "filename" : "App Icon.imagestack", "idiom" : "tv", "role" : "primary-app-icon", "size" : "400x240" }, { "filename" : "Top Shelf Image Wide.imageset", "idiom" : "tv", "role" : "top-shelf-image-wide", "size" : "2320x720" }, { "filename" : "Top Shelf Image.imageset", "idiom" : "tv", "role" : "top-shelf-image", "size" : "1920x720" } ], "info" : { "author" : "xcode", "version" : 1 } }
{ "pile_set_name": "Github" }
{if $articlecontentlist|@count > 0} <li class="menu_articles"> <h2>Articles</h2> <ul> {foreach $articlecontentlist as $content} <li class="mmenu{if $menu.newwindow == "1"}_new{/if}"> <a {if $menu.newwindow == "1"}class="external" target="null"{/if} title="{$content->title}" href="{$smarty.const.WWW_TOP}/content/{$content->id}{$content->url}">{$content->title}</a> </li> {/foreach} </ul> </li> {/if}
{ "pile_set_name": "Github" }
<template> <div class="demo"> <button @click="increment">count +1</button> <button @click="decrement">count -1</button> <button @click="resetCount">้‡็ฝฎ</button> <p>Count is : {{count}}</p> <p>countๅปถ่ฟŸไบ†1็ง’่ต‹ๅ€ผ</p> <p>ajaxๆŽฅๅฃ่ฟ”ๅ›ž{{testResult}}</p> </div> </template> <script type="text/babel"> import { mapGetters, mapActions } from 'vuex' export default { title: 'Vuex storeๆ•ฐๆฎๅค„็†', asyncData({ store, router }) { // ่งฆๅ‘ action ๅŽ๏ผŒไผš่ฟ”ๅ›ž Promise return store.dispatch('loading') }, computed: { ...mapGetters({ count: 'getCount', testResult: 'getTestResult' }) }, methods: { ...mapActions([ 'increment', 'decrement', 'resetCount' ]) } } </script> <style lang="stylus" scoped> .demo { padding: 20px; font-size: 14px; text-align: center; } </style>
{ "pile_set_name": "Github" }
<?xml version="1.0"?> <ZopeData> <record id="1" aka="AAAAAAAAAAE="> <pickle> <global name="Category" module="erp5.portal_type"/> </pickle> <pickle> <dictionary> <item> <key> <string>_count</string> </key> <value> <persistent> <string encoding="base64">AAAAAAAAAAI=</string> </persistent> </value> </item> <item> <key> <string>_mt_index</string> </key> <value> <persistent> <string encoding="base64">AAAAAAAAAAM=</string> </persistent> </value> </item> <item> <key> <string>_tree</string> </key> <value> <persistent> <string encoding="base64">AAAAAAAAAAQ=</string> </persistent> </value> </item> <item> <key> <string>categories</string> </key> <value> <tuple> <string>gap/fr/m14/2/22/224/2242</string> </tuple> </value> </item> <item> <key> <string>id</string> </key> <value> <string>2242</string> </value> </item> <item> <key> <string>portal_type</string> </key> <value> <string>Category</string> </value> </item> <item> <key> <string>title</string> </key> <value> <string>Construction sur sol d\'autrui - Immeubles de rapport</string> </value> </item> </dictionary> </pickle> </record> <record id="2" aka="AAAAAAAAAAI="> <pickle> <global name="Length" module="BTrees.Length"/> </pickle> <pickle> <int>0</int> </pickle> </record> <record id="3" aka="AAAAAAAAAAM="> <pickle> <global name="OOBTree" module="BTrees.OOBTree"/> </pickle> <pickle> <none/> </pickle> </record> <record id="4" aka="AAAAAAAAAAQ="> <pickle> <global name="OOBTree" module="BTrees.OOBTree"/> </pickle> <pickle> <none/> </pickle> </record> </ZopeData>
{ "pile_set_name": "Github" }
.drawing-board-control-colors { font-size: 0; line-height: 0; } .drawing-board-control-colors-current { border: 1px solid #ccc; cursor: pointer; display: inline-block; width: $controls-height - 2px; height: $controls-height - 2px; } .drawing-board-control-colors-rainbows { display: inline-block; margin-left: 5px; position: absolute; left: 0; top: $controls-dropdown-top; @extend %box; margin-left: 0; z-index: 100; width: 250px; height: auto; padding: 4px; } .drawing-board-control-colors-rainbow { height: 18px; } .drawing-board-control-colors-picker:first-child { margin-right: 5px; } .drawing-board-control-colors-picker { display: inline-block; width: 18px; height: 18px; cursor: pointer; } .drawing-board-control-colors-picker[data-color="rgba(255, 255, 255, 1)"] { width: 16px; height: 17px; border: 1px solid #ccc; border-bottom: none; } .drawing-board-control-colors-picker:hover { width: 16px; height: 16px; border: 1px solid #555; }
{ "pile_set_name": "Github" }
/* * Copyright (C) Narf Industries <info@narfindustries.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <libcgc.h> #include <stdint.h> #include "libc.h" #include "lift.h" #include "rider.h" #include "decider.h" #include "resort.h" /** * Add one rider to the chair * * @param c Chair * @param r Rider */ void chair_embark_rider(Chair *c, Rider *r) { rider_append(&c->riders, r); c->empty_seats--; } /** * Remove one rider from the chair * * @param c Chair * @return pointer to Rider or NULL if chair is empty */ Rider *chair_disembark_rider(Chair *c) { Rider *r = NULL; r = rider_pop(&c->riders); if (NULL != r) { c->empty_seats++; } return r; } /** * Create and initialize a chair using the given settings * * @param c Pointer to assign new chair * @param id ID for this chair * @param seatas Number of seats on this chair * @return SUCCESS on success, else -1 */ int32_t chair_new(Chair **c, uint32_t id, uint32_t seats) { if (0 == seats) return -1; Chair *new = calloc(sizeof(Chair)); MALLOC_OK(new); new->id = id; new->seats = seats; new->empty_seats = seats; new->embark = chair_embark_rider; new->disembark = chair_disembark_rider; *c = new; return SUCCESS; } /** * Reset chair to initial state. * * @param c Chair to reset * @param riders List of riders to store riders removed from chair */ void chair_reset(Chair *c, Rider **riders) { Rider *r = NULL; while (NULL != (r = c->disembark(c))) { rider_append(riders, r); } } /** * Reset all chair in a list. * * @param chairs List of chairs * @param riders List of riders to store riders removed from chair list */ void chair_reset_list(Chair *chairs, Rider **riders) { Chair *c = chairs; while (NULL != c) { chair_reset(c, riders); c = c->next; } } /** * Add new chair to list of chairs. * * @param chairs List of chairs * @param new New chair * @return pointer to new chair. */ Chair *chair_append_new(Chair **chairs, Chair *new) { Chair *prev = *chairs; if (NULL == prev) { // this is the first chair *chairs = new; } else { // other chairs exist while (NULL != prev->next) { // find last chair prev = prev->next; } prev->next = new; } return new; } /** * Remove chair from a list of chairs * * @param chairs List of chairs * @return pointer to chair or NULL if list is empty */ Chair *chair_pop(Chair **chairs) { if (NULL == *chairs) { return NULL; } Chair *c = *chairs; *chairs = c->next; c->next = NULL; return c; } /** * Destroy one chair * * @param c Chair */ void chair_destroy_single(Chair **c, Rider **riders) { chair_reset(*c, riders); free(*c); *c = NULL; } /** * Destroy all chairs in a list * * @param chairs List of chairs */ void chair_destroy_list(Chair **chairs, Rider **riders) { Chair *c = NULL; while (NULL != *chairs) { c = chair_pop(chairs); chair_destroy_single(&c, riders); } } /** * Add riders to the lift queue * * @param l Lift * @param riders List of riders */ void lift_enqueue_riders(Lift *l, Rider **riders) { Rider *r = NULL; while (NULL != (r = rider_pop(riders))) { rider_append(&l->queue, r); } } /** * Remove riders from the lift queue and embark onto the next chair. * If enough riders in queue, fill the chair to capacity. * * @param l Lift * @return Number of riders that were embarked. */ uint32_t lift_embark_riders(Lift *l) { uint32_t count = 0; Rider *r = NULL; Chair *c = l->c_embark; while (0 != c->empty_seats) { r = rider_pop(&l->queue); if (NULL == r) { break; } c->embark(c, r); l->rider_total++; count++; } if (NULL == c->next) { // last chair in list l->c_embark = l->chairs; } else { l->c_embark = c->next; } return count; } /** * Move all riders one step further through the lift * * @param l Lift */ void lift_one_step(Lift *l) { // unload c_disembark chair and increment to the next chair l->disembark(l); // load c_embark char and increment to the next chair l->embark(l); } /** * Disembark riders from the chair and add to the lift's end decider queue. * All riders on chair are disembarked. * * @param l Lift * @return Number of riders that were disembarked. */ uint32_t lift_disembark_riders(Lift *l) { uint32_t count = 0; Decider *d = get_decider_by_id(l->end_decider); if (NULL == d) { return count; } Rider *r = NULL; Chair *c = l->c_disembark; while (c->empty_seats < c->seats) { r = c->disembark(c); if (NULL == r) { break; } d->embark(d, r); count++; } if (NULL == c->next) { // last chair in list l->c_disembark = l->chairs; } else { l->c_disembark = c->next; } return count; } /** * Create and initialize a new lift * * @param l Pointer to store lift * @param settings Array of lift settings * @return lift's ID on success, else -1 */ int32_t lift_new(Lift **l, uint32_t settings[5]) { Lift *new = calloc(sizeof(Lift)); MALLOC_OK(new); new->id = settings[0]; new->start_decider = settings[1]; new->end_decider = settings[2]; new->chair_count = settings[3]; new->chair_capacity = settings[4]; new->embark = lift_embark_riders; new->step = lift_one_step; new->disembark = lift_disembark_riders; if ((new->chair_count == 0) || // at least 1 chair (new->start_decider == new->end_decider) || // different start/end ((new->chair_capacity != 2) && (new->chair_capacity != 4))) { // valid chair size/qty free(new); return -1; } Chair *c_new; // gen chairs // set embark and disembark chairs for (uint32_t i = 0; i < new->chair_count; i++) { chair_new(&c_new, i, new->chair_capacity); chair_append_new(&new->chairs, c_new); if (0 == i) { // first chair new->c_embark = c_new; } if (new->chair_count/2 == i) { // middle chair new->c_disembark = c_new; } } *l = new; return new->id; } /** * Reset lift to initial state (no riders on chairs or in queue). * * @param l Lift * @param riders List of riders to store riders removed from lift. */ void lift_reset(Lift *l, Rider **riders) { // empty all chairs chair_reset_list(l->chairs, riders); // remove riders from queue rider_append(riders, l->queue); l->queue = NULL; l->rider_total = 0; l->c_embark = l->chairs; l->c_disembark = l->chairs; for (uint32_t i = 0; i < l->chair_count; i++) { if (l->chair_count/2 == i) { // middle chair break; } l->c_disembark = l->c_disembark->next; } } /** * Destroy a lift and remove all riders from it * * @param l Lift * @param riders List to store riders into */ void lift_destroy(Lift **l, Rider **riders) { Lift *this = *l; // destroy all chairs and get the riders from them chair_destroy_list(&this->chairs, riders); // return all riders in queue rider_append(riders, this->queue); free(*l); *l = NULL; }
{ "pile_set_name": "Github" }
/** * Copyright (c) 2010-2020 Contributors to the openHAB project * * See the NOTICE file(s) distributed with this work for additional * information. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0 * * SPDX-License-Identifier: EPL-2.0 */ package org.openhab.core.items; import java.util.List; import org.openhab.core.types.Command; import org.openhab.core.types.State; /** * <p> * This interface defines the core features of an openHAB item. * </p> * <p> * Item instances are used for all stateful services and are especially * important for the {@link ItemRegistry}. * </p> * * @author Kai Kreuzer * */ public interface Item { /** * returns the current state of the item * * @return the current state */ public State getState(); /** * returns the current state of the item as a specific type * * @return the current state in the requested type or * null, if state cannot be provided as the requested type */ public State getStateAs(Class<? extends State> typeClass); /** * returns the name of the item * * @return the name of the item */ public String getName(); /** * <p> * This method provides a list of all data types that can be used to update the item state * </p> * <p> * Imagine e.g. a dimmer device: It's status could be 0%, 10%, 50%, 100%, but also OFF or ON and * maybe UNDEFINED. So the accepted data types would be in this case {@link PercentType}, {@link OnOffType} * and {@link UnDefType} * </p> * * @return a list of data types that can be used to update the item state */ public List<Class<? extends State>> getAcceptedDataTypes(); /** * <p> * This method provides a list of all command types that can be used for this item * </p> * <p> * Imagine e.g. a dimmer device: You could ask it to dim to 0%, 10%, 50%, 100%, but * also to turn OFF or ON. So the accepted command types would be in this case {@link PercentType}, * {@link OnOffType} * </p> * * @return a list of all command types that can be used for this item */ public List<Class<? extends Command>> getAcceptedCommandTypes(); /** * Returns a list of the names of the groups this item belongs to. * * @return list of item group names */ public List<String> getGroupNames(); }
{ "pile_set_name": "Github" }
# # Copyright 2019 Xilinx, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # source settings.tcl set PROJ "arithm.prj" set SOLN "sol1" if {![info exists CLKP]} { set CLKP 3.3 } open_project -reset $PROJ add_files "${XF_PROJ_ROOT}/L1/examples/arithm/xf_arithm_accel.cpp" -cflags "-I${XF_PROJ_ROOT}/L1/include -I ./ -D__SDSVHLS__ -std=c++0x" -csimflags "-I${XF_PROJ_ROOT}/L1/include -I ./ -D__SDSVHLS__ -std=c++0x" add_files -tb "${XF_PROJ_ROOT}/L1/examples/arithm/xf_arithm_tb.cpp" -cflags "-I${OPENCV_INCLUDE} -I${XF_PROJ_ROOT}/L1/include -I ./ -D__SDSVHLS__ -std=c++0x" -csimflags "-I${XF_PROJ_ROOT}/L1/include -I ./ -D__SDSVHLS__ -std=c++0x" set_top arithm_accel open_solution -reset $SOLN set_part $XPART create_clock -period $CLKP if {$CSIM == 1} { csim_design -ldflags "-L ${OPENCV_LIB} -lopencv_imgcodecs -lopencv_imgproc -lopencv_core -lopencv_highgui -lopencv_flann -lopencv_features2d" -argv " ${XF_PROJ_ROOT}/data/128x128_1.png ${XF_PROJ_ROOT}/data/128x128_2.png " } if {$CSYNTH == 1} { csynth_design } if {$COSIM == 1} { cosim_design -ldflags "-L ${OPENCV_LIB} -lopencv_imgcodecs -lopencv_imgproc -lopencv_core -lopencv_highgui -lopencv_flann -lopencv_features2d" -argv " ${XF_PROJ_ROOT}/data/128x128_1.png ${XF_PROJ_ROOT}/data/128x128_2.png " } if {$VIVADO_SYN == 1} { export_design -flow syn -rtl verilog } if {$VIVADO_IMPL == 1} { export_design -flow impl -rtl verilog } exit
{ "pile_set_name": "Github" }
/****************************************************************************** * * * License Agreement * * * * Copyright (c) 2004 Altera Corporation, San Jose, California, USA. * * All rights reserved. * * * * Permission is hereby granted, free of charge, to any person obtaining a * * copy of this software and associated documentation files (the "Software"), * * to deal in the Software without restriction, including without limitation * * the rights to use, copy, modify, merge, publish, distribute, sublicense, * * and/or sell copies of the Software, and to permit persons to whom the * * Software is furnished to do so, subject to the following conditions: * * * * The above copyright notice and this permission notice shall be included in * * all copies or substantial portions of the Software. * * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * * DEALINGS IN THE SOFTWARE. * * * * This agreement shall be governed in all respects by the laws of the State * * of California and by the laws of the United States of America. * * * * Altera does not recommend, suggest or require that this reference design * * file be used in conjunction or combination with any other product. * ******************************************************************************/ #include <unistd.h> #include "sys/alt_errno.h" #include "sys/alt_warning.h" #include "priv/alt_file.h" #include "os/alt_syscall.h" #ifdef ALT_USE_DIRECT_DRIVERS int ALT_CLOSE (int fildes) { /* Generate a link time warning, should this function ever be called. */ ALT_STUB_WARNING(close); /* Indicate an error */ ALT_ERRNO = ENOSYS; return -1; } #else /* !ALT_USE_DIRECT_DRIVERS */ /* * close() is called by an application to release a file descriptor. If the * associated file system/device has a close() callback function registered * then this called. The file descriptor is then marked as free. * * ALT_CLOSE is mapped onto the close() system call in alt_syscall.h */ int ALT_CLOSE (int fildes) { alt_fd* fd; int rval; /* * A common error case is that when the file descriptor was created, the call * to open() failed resulting in a negative file descriptor. This is trapped * below so that we don't try and process an invalid file descriptor. */ fd = (fildes < 0) ? NULL : &alt_fd_list[fildes]; if (fd) { /* * If the associated file system/device has a close function, call it so * that any necessary cleanup code can run. */ rval = (fd->dev->close) ? fd->dev->close(fd) : 0; /* Free the file descriptor structure and return. */ alt_release_fd (fildes); if (rval < 0) { ALT_ERRNO = -rval; return -1; } return 0; } else { ALT_ERRNO = EBADFD; return -1; } } #endif /* ALT_USE_DIRECT_DRIVERS */
{ "pile_set_name": "Github" }
import * as path from 'path'; import { generatedDir } from './paths'; import { readTextFile } from './utils'; let enumsPath = path.join(generatedDir, 'enums.json'); interface EnumItem { identifier: string; name: string; description: string; } export async function getAreaTypeItems() { return getGeneratedEnumData('areaTypeItems'); } async function getGeneratedEnumData(identifier: string): Promise<EnumItem[]> { let text = await readTextFile(enumsPath); let data = JSON.parse(text); return data[identifier]; }
{ "pile_set_name": "Github" }
// Copyright (c) 2014-2019 Coin Sciences Ltd // MultiChain code distributed under the GPLv3 license, see COPYING file. #include "v8/callbacks.h" #include "v8/v8engine.h" #include "v8/v8json_spirit.h" #include "v8/v8utils.h" #include <cassert> namespace mc_v8 { /** * Call an RPC function from a V8 JS callback. * * Marshal the arguments and the return value between V8 and json_spirit using intermediate JSON strings. * Optionally filter the result before returning it to JS. * * @param name The name of the RPC function. * @param args The V8 arguments/return value. */ void CallRpcFunction(std::string name, const v8::FunctionCallbackInfo<v8::Value> &args) { v8::Isolate *isolate = args.GetIsolate(); v8::Locker locker(isolate); v8::Isolate::Scope isolateScope(isolate); v8::HandleScope handleScope(isolate); v8::Local<v8::Context> context(isolate->GetCurrentContext()); v8::Context::Scope contextScope(context); IFilterCallback *filterCallback = static_cast<IFilterCallback *>(args.Data().As<v8::External>()->Value()); json_spirit::Array params; for (int i = 0; i < args.Length(); ++i) { params.push_back(V82Jsp(isolate, args[i])); } json_spirit::Value result; filterCallback->JspCallback(name, params, result); if (result.is_null()) { args.GetReturnValue().SetUndefined(); } else { args.GetReturnValue().Set(Jsp2V8(isolate, result)); } } // clang-format off #define FILTER_FUNCTION(name) \ void filter_##name(const v8::FunctionCallbackInfo<v8::Value> &args) \ { \ CallRpcFunction(#name, args); \ } FILTER_FUNCTION(getfiltertxid) FILTER_FUNCTION(getfiltertransaction) FILTER_FUNCTION(getfilterstreamitem) FILTER_FUNCTION(getfilterassetbalances) FILTER_FUNCTION(setfilterparam) FILTER_FUNCTION(getfiltertxinput) FILTER_FUNCTION(getlastblockinfo) FILTER_FUNCTION(getassetinfo) FILTER_FUNCTION(getstreaminfo) FILTER_FUNCTION(verifypermission) FILTER_FUNCTION(verifymessage) #define FILTER_LOOKUP(name) { #name, filter_##name } std::map<std::string, v8::FunctionCallback> callbackLookup{ FILTER_LOOKUP(getfiltertxid), FILTER_LOOKUP(getfiltertransaction), FILTER_LOOKUP(getfilterstreamitem), FILTER_LOOKUP(getfilterassetbalances), FILTER_LOOKUP(setfilterparam), FILTER_LOOKUP(getfiltertxinput), FILTER_LOOKUP(getlastblockinfo), FILTER_LOOKUP(getassetinfo), FILTER_LOOKUP(getstreaminfo), FILTER_LOOKUP(verifypermission), FILTER_LOOKUP(verifymessage) }; // clang-format on } // namespace mc_v8
{ "pile_set_name": "Github" }
--- name: r-tidy-test vm: ip: "192.168.88.3" lang: - R: packages: tidyverse, xml, ggplot2
{ "pile_set_name": "Github" }
// Copyright Aleksey Gurtovoy 2001-2004 // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // // Preprocessed version of "boost/mpl/aux_/template_arity.hpp" header // -- DO NOT modify by hand! namespace boost { namespace mpl { namespace aux { template< bool > struct template_arity_impl { template< typename F > struct result_ : mpl::int_< -1 > { }; }; template<> struct template_arity_impl<true> { template< typename F > struct result_ : F::arity { }; }; template< typename F > struct template_arity : template_arity_impl< ::boost::mpl::aux::has_rebind<F>::value > ::template result_<F> { }; }}}
{ "pile_set_name": "Github" }
# -*- coding: utf-8 -*- from django.utils import timezone from django.utils.translation import gettext as _ import plotly.offline as plotly import plotly.graph_objs as go from core.utils import duration_parts from reports import utils def sleep_totals(instances): """ Create a graph showing total time sleeping for each day. :param instances: a QuerySet of Sleep instances. :returns: a tuple of the the graph's html and javascript. """ totals = {} for instance in instances: start = timezone.localtime(instance.start) end = timezone.localtime(instance.end) if start.date() not in totals.keys(): totals[start.date()] = timezone.timedelta(seconds=0) if end.date() not in totals.keys(): totals[end.date()] = timezone.timedelta(seconds=0) # Account for dates crossing midnight. if start.date() != end.date(): totals[start.date()] += end.replace( year=start.year, month=start.month, day=start.day, hour=23, minute=59, second=59) - start totals[end.date()] += end - start.replace( year=end.year, month=end.month, day=end.day, hour=0, minute=0, second=0) else: totals[start.date()] += instance.duration trace = go.Bar( name=_('Total sleep'), x=list(totals.keys()), y=[td.seconds/3600 for td in totals.values()], hoverinfo='text', textposition='outside', text=[_duration_string_short(td) for td in totals.values()] ) layout_args = utils.default_graph_layout_options() layout_args['barmode'] = 'stack' layout_args['title'] = _('<b>Sleep Totals</b>') layout_args['xaxis']['title'] = _('Date') layout_args['xaxis']['rangeselector'] = utils.rangeselector_date() layout_args['yaxis']['title'] = _('Hours of sleep') fig = go.Figure({ 'data': [trace], 'layout': go.Layout(**layout_args) }) output = plotly.plot(fig, output_type='div', include_plotlyjs=False) return utils.split_graph_output(output) def _duration_string_short(duration): """ Format a "short" duration string without seconds precision. This is intended to fit better in smaller spaces on a graph. :returns: a string of the form XhXm. """ h, m, s = duration_parts(duration) return '{}h{}m'.format(h, m)
{ "pile_set_name": "Github" }
Model = require('../common/model').Model class WaveImportData extends Model ### Source ะ˜ะผะฟะพั€ั‚ะธั€ัƒะตะผะฐั ะฒะพะปะฝะฐ ### constructor: (@id=null, @userId=null, @sourceData=null, @lastUpdateTimestamp=null, @lastImportingTimestamp=null, @importedWaveId=null, @importedWaveUrl=null, @_rev=undefined, @participants=[], blipIds=null) -> ### @param id: string โ€” waveId ะธะท google wave @param userId: string โ€” id ั‡ะตะปะฐ ะฟะพะฟั€ะพัะธะฒัˆะตะณะพ ะทะฐะธะผะฟะพั€ั‚ะธั‚ัŒ ะฒะพะปะฝัƒ @param sourceData: string โ€” ะธัั…ะพะดะฝั‹ะน ะบะพะด ะธะผะฟะพั€ั‚ะธั€ัƒะตะผะพะน ะฒะพะปะฝั‹ @param lastUpdateTimestamp: timstamp โ€” ะฒั€ะตะผั ะฟะพัะปะตะดะฝะตะณะพ ะทะฐะฟั€ะพัะฐ ะฝะฐ ะธะผะฟะพั€ั‚ ะฒะพะปะฝั‹ @param lastImportingTimestamp: timstamp โ€” ะฒั€ะตะผั ะฟะพัะปะตะดะฝะตะณะพ ะธะผะฟะพั€ั‚ะฐ ะฒะพะปะฝั‹ ะฒ odessa @param importedWaveId: string โ€” ัะพะพั‚ะฒะตั‚ัั‚ะฒัƒัŽั‰ะธะน WaveId ะธะท odessa ### super('WaveImportData') module.exports.WaveImportData = WaveImportData
{ "pile_set_name": "Github" }
%YAML 1.1 %TAG !u! tag:unity3d.com,2011: --- !u!114 &11400000 MonoBehaviour: m_ObjectHideFlags: 0 m_CorrespondingSourceObject: {fileID: 0} m_PrefabInstance: {fileID: 0} m_PrefabAsset: {fileID: 0} m_GameObject: {fileID: 0} m_Enabled: 1 m_EditorHideFlags: 0 m_Script: {fileID: 11500000, guid: e289857c00578db40a4afbed85afbd23, type: 3} m_Name: Floating Text Setting - Crit Healing m_EditorClassIdentifier: lifeTime: 1.2 randomOffset: 0.5 floatingSpeed: 1 fontSize: 4 fontColor: {r: 0.19607843, g: 0.5882353, b: 0, a: 1} fontMaterial: {fileID: 2100000, guid: 4325c17b748c74f47a0c307e533ddaf8, type: 2} sizeOverTime: serializedVersion: 2 m_Curve: - serializedVersion: 3 time: 0 value: 0.6302643 inSlope: 368.4868 outSlope: 368.4868 tangentMode: 34 weightedMode: 0 inWeight: 0 outWeight: 0.33333334 - serializedVersion: 3 time: 0.02 value: 8 inSlope: 368.4868 outSlope: -28.26087 tangentMode: 69 weightedMode: 0 inWeight: 0.33333334 outWeight: 1 - serializedVersion: 3 time: 0.25 value: 1.5 inSlope: -28.26087 outSlope: -0.23999996 tangentMode: 69 weightedMode: 0 inWeight: 1 outWeight: 1 - serializedVersion: 3 time: 1.5 value: 1.2 inSlope: -0.23999996 outSlope: -0.035180338 tangentMode: 69 weightedMode: 0 inWeight: 0.19715202 outWeight: 0 m_PreInfinity: 2 m_PostInfinity: 2 m_RotationOrder: 4 alphaOverTime: serializedVersion: 2 m_Curve: - serializedVersion: 3 time: 0 value: 1 inSlope: 0 outSlope: 0 tangentMode: 34 weightedMode: 0 inWeight: 0 outWeight: 0.33333334 - serializedVersion: 3 time: 0.6 value: 1 inSlope: 0.0013339786 outSlope: 0.0013339786 tangentMode: 0 weightedMode: 0 inWeight: 0.11429835 outWeight: 0.16852763 - serializedVersion: 3 time: 1.2 value: 0 inSlope: -1.6666666 outSlope: -1.6666666 tangentMode: 34 weightedMode: 0 inWeight: 0.33333334 outWeight: 0 m_PreInfinity: 2 m_PostInfinity: 2 m_RotationOrder: 4 sizeOverDistanceToCamera: serializedVersion: 2 m_Curve: - serializedVersion: 3 time: 0 value: 0.1 inSlope: 0.16333334 outSlope: 0.16333334 tangentMode: 34 weightedMode: 0 inWeight: 0 outWeight: 0.33333334 - serializedVersion: 3 time: 30 value: 5 inSlope: 0.16333334 outSlope: 0.16333334 tangentMode: 34 weightedMode: 0 inWeight: 0.33333334 outWeight: 0 m_PreInfinity: 2 m_PostInfinity: 2 m_RotationOrder: 4 randomOffsetOverDistance: serializedVersion: 2 m_Curve: - serializedVersion: 3 time: 0 value: 0.2 inSlope: 0.28 outSlope: 0.28 tangentMode: 34 weightedMode: 0 inWeight: 0 outWeight: 0.33333334 - serializedVersion: 3 time: 10 value: 3 inSlope: 0.28 outSlope: 0.28 tangentMode: 34 weightedMode: 0 inWeight: 0.33333334 outWeight: 0 m_PreInfinity: 2 m_PostInfinity: 2 m_RotationOrder: 4
{ "pile_set_name": "Github" }
args @ { fetchurl, ... }: rec { baseName = ''optima''; version = ''20150709-git''; description = ''Optimized Pattern Matching Library''; deps = [ args."alexandria" args."closer-mop" ]; src = fetchurl { url = ''http://beta.quicklisp.org/archive/optima/2015-07-09/optima-20150709-git.tgz''; sha256 = ''0vqyqrnx2d8qwa2jlg9l2wn6vrykraj8a1ysz0gxxxnwpqc29hdc''; }; packageName = "optima"; asdFilesToKeep = ["optima.asd"]; overrides = x: x; } /* (SYSTEM optima DESCRIPTION Optimized Pattern Matching Library SHA256 0vqyqrnx2d8qwa2jlg9l2wn6vrykraj8a1ysz0gxxxnwpqc29hdc URL http://beta.quicklisp.org/archive/optima/2015-07-09/optima-20150709-git.tgz MD5 20523dc3dfc04bb2526008dff0842caa NAME optima FILENAME optima DEPS ((NAME alexandria FILENAME alexandria) (NAME closer-mop FILENAME closer-mop)) DEPENDENCIES (alexandria closer-mop) VERSION 20150709-git SIBLINGS (optima.ppcre optima.test) PARASITES NIL) */
{ "pile_set_name": "Github" }
package d2dat import ( "fmt" "github.com/OpenDiablo2/OpenDiablo2/d2common/d2interface" ) const ( numColors = 256 ) // DATPalette represents a 256 color palette. type DATPalette struct { colors [numColors]d2interface.Color } // NumColors returns the number of colors in the palette func (p *DATPalette) NumColors() int { return len(p.colors) } // GetColors returns the slice of colors in the palette func (p *DATPalette) GetColors() [numColors]d2interface.Color { return p.colors } // GetColor returns a color by index func (p *DATPalette) GetColor(idx int) (d2interface.Color, error) { if color := p.colors[idx]; color != nil { return color, nil } return nil, fmt.Errorf("cannot find color index '%d in palette'", idx) }
{ "pile_set_name": "Github" }
# UICollectionView-XYTemplateLayoutCell A custom templateLayoutCell for UICollectionView ## ้ข˜่ฎฐ ๅœจๆฃๆ‘ฉไบ†**forkingdog**็š„FDTemplateLayoutCellๅŽ๏ผŒ็ช็„ถๅ‘็ŽฐUICollectionViewๆฒกๆœ‰ไธ€ๅฅ—่ฎก็ฎ—cell้ซ˜ๅบฆ็š„ๆ–นๆณ•๏ผŒๆ‰€ไปฅไพ่‘ซ่Šฆ็”ป็“ข็”ปๅ‡บไบ†่ฟ™ไธช ## ็ฎ€ไป‹ Template auto layout cell for **automatically** UICollectionViewCell height calculating. ![Demo Overview](https://github.com/fifyrio/UICollectionView-XYTemplateLayoutCell/blob/master/Screenshots/screenshots.gif) ## ๅฆ‚ไฝ•ไฝฟ็”จ ####Use in UICollectionViewCell: * fixed width ``` objc #import "UICollectionView+XYTemplateLayoutCell.h" - (CGSize)collectionView:(UICollectionView *)collectionView layout:(UICollectionViewLayout*)collectionViewLayout sizeForItemAtIndexPath:(NSIndexPath *)indexPath{ return [collectionView xy_getCellSizeForIdentifier:@"your identifier" width:width config:^(id cell) { /*่ฎพ็ฝฎcell็š„ๆ•ฐๆฎ*/ }]; } ``` * fixed height ``` objc #import "UICollectionView+XYTemplateLayoutCell.h" - (CGSize)collectionView:(UICollectionView *)collectionView layout:(UICollectionViewLayout*)collectionViewLayout sizeForItemAtIndexPath:(NSIndexPath *)indexPath{ return [collectionView xy_getCellSizeForIdentifier:@"your identifier" height:height config:^(id cell) { /*่ฎพ็ฝฎcell็š„ๆ•ฐๆฎ*/ }]; } ``` * dynamic size ``` objc #import "UICollectionView+XYTemplateLayoutCell.h" - (CGSize)collectionView:(UICollectionView *)collectionView layout:(UICollectionViewLayout*)collectionViewLayout sizeForItemAtIndexPath:(NSIndexPath *)indexPath{ return [collectionView xy_getCellSizeForIdentifier:@"your identifier" config:^(id cell) { /*่ฎพ็ฝฎcell็š„ๆ•ฐๆฎ*/ }]; } ``` #### Use in UICollectionReusableView: * fixed width ``` objc #import "UICollectionView+XYTemplateReusableView.h" //่ฟ™้‡ŒไปฅHeaderไธบไพ‹ - (CGSize)collectionView:(UICollectionView *)collectionView layout:(UICollectionViewLayout*)collectionViewLayout referenceSizeForHeaderInSection:(NSInteger)section{ return [collectionView xy_getReusableViewSizeForIdentifier:@"your identifier" width:width config:^(id reusableView) { /*่ฎพ็ฝฎheader็š„ๆ•ฐๆฎ*/ }]; } ``` * fixed height ``` objc #import "UICollectionView+XYTemplateReusableView.h" //่ฟ™้‡ŒไปฅHeaderไธบไพ‹ - (CGSize)collectionView:(UICollectionView *)collectionView layout:(UICollectionViewLayout*)collectionViewLayout referenceSizeForHeaderInSection:(NSInteger)section{ return [collectionView xy_getReusableViewSizeForIdentifier:@"your identifier" height:height config:^(id reusableView) { /*่ฎพ็ฝฎheader็š„ๆ•ฐๆฎ*/ }]; } ``` * dynamic size ``` objc #import "UICollectionView+XYTemplateReusableView.h" //่ฟ™้‡ŒไปฅHeaderไธบไพ‹ - (CGSize)collectionView:(UICollectionView *)collectionView layout:(UICollectionViewLayout*)collectionViewLayout referenceSizeForHeaderInSection:(NSInteger)section{ return [collectionView xy_getReusableViewSizeForIdentifier:@"your identifier" config:^(id reusableView) { /*่ฎพ็ฝฎheader็š„ๆ•ฐๆฎ*/ }]; } ``` #### ็ผ“ๅญ˜cell็š„size็š„API ``` objc - (CGSize)collectionView:(UICollectionView *)collectionView layout:(UICollectionViewLayout*)collectionViewLayout sizeForItemAtIndexPath:(NSIndexPath *)indexPath{ return [collectionView xy_getCellSizeForIdentifier:@"your identifier" width:ScreenW cacheByIndexPath:indexPath config:^(id cell) { //config }]; } ``` #### ็ผ“ๅญ˜Header/Footer็š„size็š„API ``` objc - (CGSize)collectionView:(UICollectionView *)collectionView layout:(UICollectionViewLayout*)collectionViewLayout referenceSizeForHeaderInSection:(NSInteger)section{ return [collectionView xy_getReusableViewSizeForIdentifier:@"your identifier" width:width cacheBySection:section config:^(id reusableView) { //config }]; } ``` ## Release Versions * v1.1 add dynamic size * v1.0 support to caculate size for UICollectionView Cell
{ "pile_set_name": "Github" }
{ "name": "my-app", "version": "0.0.0", "license": "MIT", "scripts": { "ng": "ng", "start": "ng serve", "build": "ng build --prod", "test": "ng test", "lint": "ng lint", "e2e": "ng e2e" }, "private": true, "dependencies": { "@angular/animations": "6.1.6", "@angular/common": "6.1.6", "@angular/compiler": "6.1.6", "@angular/core": "6.1.6", "@angular/forms": "6.1.6", "@angular/http": "6.1.6", "@angular/platform-browser": "6.1.6", "@angular/platform-browser-dynamic": "6.1.6", "@angular/router": "6.1.6", "@ng-bootstrap/ng-bootstrap": "^3.2.0", "core-js": "^2.4.1", "npm": "^5.6.0", "rxjs": "^6.3.2", "zone.js": "^0.8.26" }, "devDependencies": { "@angular-devkit/build-angular": "~0.7.0", "@angular/cli": "^6.1.5", "@angular/compiler-cli": "6.1.6", "@angular/language-service": "6.1.6", "@types/jasmine": "~2.8.3", "@types/jasminewd2": "~2.0.2", "@types/node": "~6.0.60", "angular-ide": "^0.9.39", "codelyzer": "^4.0.1", "jasmine-core": "~2.8.0", "jasmine-spec-reporter": "~4.2.1", "karma": "~2.0.0", "karma-chrome-launcher": "~2.2.0", "karma-coverage-istanbul-reporter": "^1.2.1", "karma-jasmine": "~1.1.0", "karma-jasmine-html-reporter": "^0.2.2", "protractor": "~5.1.2", "ts-node": "~4.1.0", "tslint": "~5.9.1", "typescript": "2.9.2" } }
{ "pile_set_name": "Github" }
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/cert/cert_verify_proc_win.h" #include <algorithm> #include <memory> #include <string> #include <vector> #include "base/memory/free_deleter.h" #include "base/metrics/histogram_macros.h" #include "base/stl_util.h" #include "base/strings/string_util.h" #include "base/strings/utf_string_conversions.h" #include "base/threading/thread_local.h" #include "crypto/capi_util.h" #include "crypto/scoped_capi_types.h" #include "crypto/sha2.h" #include "net/base/net_errors.h" #include "net/cert/asn1_util.h" #include "net/cert/cert_status_flags.h" #include "net/cert/cert_verifier.h" #include "net/cert/cert_verify_result.h" #include "net/cert/crl_set.h" #include "net/cert/ev_root_ca_metadata.h" #include "net/cert/known_roots.h" #include "net/cert/known_roots_win.h" #include "net/cert/test_root_certs.h" #include "net/cert/x509_certificate.h" #include "net/cert/x509_util_win.h" #if !defined(CERT_TRUST_HAS_WEAK_SIGNATURE) // This was introduced in Windows 8 / Windows Server 2012, but retroactively // ported as far back as Windows XP via system update. #define CERT_TRUST_HAS_WEAK_SIGNATURE 0x00100000 #endif namespace net { namespace { struct FreeChainEngineFunctor { void operator()(HCERTCHAINENGINE engine) const { if (engine) CertFreeCertificateChainEngine(engine); } }; struct FreeCertChainContextFunctor { void operator()(PCCERT_CHAIN_CONTEXT chain_context) const { if (chain_context) CertFreeCertificateChain(chain_context); } }; typedef crypto::ScopedCAPIHandle<HCERTCHAINENGINE, FreeChainEngineFunctor> ScopedHCERTCHAINENGINE; typedef std::unique_ptr<const CERT_CHAIN_CONTEXT, FreeCertChainContextFunctor> ScopedPCCERT_CHAIN_CONTEXT; //----------------------------------------------------------------------------- int MapSecurityError(SECURITY_STATUS err) { // There are numerous security error codes, but these are the ones we thus // far find interesting. switch (err) { case SEC_E_WRONG_PRINCIPAL: // Schannel case CERT_E_CN_NO_MATCH: // CryptoAPI return ERR_CERT_COMMON_NAME_INVALID; case SEC_E_UNTRUSTED_ROOT: // Schannel case CERT_E_UNTRUSTEDROOT: // CryptoAPI case TRUST_E_CERT_SIGNATURE: // CryptoAPI. Caused by weak crypto or bad // signatures, but not differentiable. return ERR_CERT_AUTHORITY_INVALID; case SEC_E_CERT_EXPIRED: // Schannel case CERT_E_EXPIRED: // CryptoAPI return ERR_CERT_DATE_INVALID; case CRYPT_E_NO_REVOCATION_CHECK: return ERR_CERT_NO_REVOCATION_MECHANISM; case CRYPT_E_REVOCATION_OFFLINE: return ERR_CERT_UNABLE_TO_CHECK_REVOCATION; case CRYPT_E_REVOKED: // Schannel and CryptoAPI return ERR_CERT_REVOKED; case SEC_E_CERT_UNKNOWN: case CERT_E_ROLE: return ERR_CERT_INVALID; case CERT_E_WRONG_USAGE: // TODO(wtc): Should we add ERR_CERT_WRONG_USAGE? return ERR_CERT_INVALID; // We received an unexpected_message or illegal_parameter alert message // from the server. case SEC_E_ILLEGAL_MESSAGE: return ERR_SSL_PROTOCOL_ERROR; case SEC_E_ALGORITHM_MISMATCH: return ERR_SSL_VERSION_OR_CIPHER_MISMATCH; case SEC_E_INVALID_HANDLE: return ERR_UNEXPECTED; case SEC_E_OK: return OK; default: LOG(WARNING) << "Unknown error " << err << " mapped to net::ERR_FAILED"; return ERR_FAILED; } } // Map the errors in the chain_context->TrustStatus.dwErrorStatus returned by // CertGetCertificateChain to our certificate status flags. int MapCertChainErrorStatusToCertStatus(DWORD error_status) { CertStatus cert_status = 0; // We don't include CERT_TRUST_IS_NOT_TIME_NESTED because it's obsolete and // we wouldn't consider it an error anyway const DWORD kDateInvalidErrors = CERT_TRUST_IS_NOT_TIME_VALID | CERT_TRUST_CTL_IS_NOT_TIME_VALID; if (error_status & kDateInvalidErrors) cert_status |= CERT_STATUS_DATE_INVALID; const DWORD kAuthorityInvalidErrors = CERT_TRUST_IS_UNTRUSTED_ROOT | CERT_TRUST_IS_EXPLICIT_DISTRUST | CERT_TRUST_IS_PARTIAL_CHAIN; if (error_status & kAuthorityInvalidErrors) cert_status |= CERT_STATUS_AUTHORITY_INVALID; if ((error_status & CERT_TRUST_REVOCATION_STATUS_UNKNOWN) && !(error_status & CERT_TRUST_IS_OFFLINE_REVOCATION)) cert_status |= CERT_STATUS_NO_REVOCATION_MECHANISM; if (error_status & CERT_TRUST_IS_OFFLINE_REVOCATION) cert_status |= CERT_STATUS_UNABLE_TO_CHECK_REVOCATION; if (error_status & CERT_TRUST_IS_REVOKED) cert_status |= CERT_STATUS_REVOKED; const DWORD kWrongUsageErrors = CERT_TRUST_IS_NOT_VALID_FOR_USAGE | CERT_TRUST_CTL_IS_NOT_VALID_FOR_USAGE; if (error_status & kWrongUsageErrors) { // TODO(wtc): Should we add CERT_STATUS_WRONG_USAGE? cert_status |= CERT_STATUS_INVALID; } if (error_status & CERT_TRUST_IS_NOT_SIGNATURE_VALID) { // Check for a signature that does not meet the OS criteria for strong // signatures. // Note: These checks may be more restrictive than the current weak key // criteria implemented within CertVerifier, such as excluding SHA-1 or // excluding RSA keys < 2048 bits. However, if the user has configured // these more stringent checks, respect that configuration and err on the // more restrictive criteria. if (error_status & CERT_TRUST_HAS_WEAK_SIGNATURE) { cert_status |= CERT_STATUS_WEAK_KEY; } else { cert_status |= CERT_STATUS_INVALID; } } // The rest of the errors. const DWORD kCertInvalidErrors = CERT_TRUST_IS_CYCLIC | CERT_TRUST_INVALID_EXTENSION | CERT_TRUST_INVALID_POLICY_CONSTRAINTS | CERT_TRUST_INVALID_BASIC_CONSTRAINTS | CERT_TRUST_INVALID_NAME_CONSTRAINTS | CERT_TRUST_CTL_IS_NOT_SIGNATURE_VALID | CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT | CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT | CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT | CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT | CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY | CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT; if (error_status & kCertInvalidErrors) cert_status |= CERT_STATUS_INVALID; return cert_status; } // Returns true if any common name in the certificate's Subject field contains // a NULL character. bool CertSubjectCommonNameHasNull(PCCERT_CONTEXT cert) { CRYPT_DECODE_PARA decode_para; decode_para.cbSize = sizeof(decode_para); decode_para.pfnAlloc = crypto::CryptAlloc; decode_para.pfnFree = crypto::CryptFree; CERT_NAME_INFO* name_info = NULL; DWORD name_info_size = 0; BOOL rv; rv = CryptDecodeObjectEx(X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, WINCRYPT_X509_NAME, cert->pCertInfo->Subject.pbData, cert->pCertInfo->Subject.cbData, CRYPT_DECODE_ALLOC_FLAG | CRYPT_DECODE_NOCOPY_FLAG, &decode_para, &name_info, &name_info_size); if (rv) { std::unique_ptr<CERT_NAME_INFO, base::FreeDeleter> scoped_name_info( name_info); // The Subject field may have multiple common names. According to the // "PKI Layer Cake" paper, CryptoAPI uses every common name in the // Subject field, so we inspect every common name. // // From RFC 5280: // X520CommonName ::= CHOICE { // teletexString TeletexString (SIZE (1..ub-common-name)), // printableString PrintableString (SIZE (1..ub-common-name)), // universalString UniversalString (SIZE (1..ub-common-name)), // utf8String UTF8String (SIZE (1..ub-common-name)), // bmpString BMPString (SIZE (1..ub-common-name)) } // // We also check IA5String and VisibleString. for (DWORD i = 0; i < name_info->cRDN; ++i) { PCERT_RDN rdn = &name_info->rgRDN[i]; for (DWORD j = 0; j < rdn->cRDNAttr; ++j) { PCERT_RDN_ATTR rdn_attr = &rdn->rgRDNAttr[j]; if (strcmp(rdn_attr->pszObjId, szOID_COMMON_NAME) == 0) { switch (rdn_attr->dwValueType) { // After the CryptoAPI ASN.1 security vulnerabilities described in // http://www.microsoft.com/technet/security/Bulletin/MS09-056.mspx // were patched, we get CERT_RDN_ENCODED_BLOB for a common name // that contains a NULL character. case CERT_RDN_ENCODED_BLOB: break; // Array of 8-bit characters. case CERT_RDN_PRINTABLE_STRING: case CERT_RDN_TELETEX_STRING: case CERT_RDN_IA5_STRING: case CERT_RDN_VISIBLE_STRING: for (DWORD k = 0; k < rdn_attr->Value.cbData; ++k) { if (rdn_attr->Value.pbData[k] == '\0') return true; } break; // Array of 16-bit characters. case CERT_RDN_BMP_STRING: case CERT_RDN_UTF8_STRING: { DWORD num_wchars = rdn_attr->Value.cbData / 2; wchar_t* common_name = reinterpret_cast<wchar_t*>(rdn_attr->Value.pbData); for (DWORD k = 0; k < num_wchars; ++k) { if (common_name[k] == L'\0') return true; } break; } // Array of ints (32-bit). case CERT_RDN_UNIVERSAL_STRING: { DWORD num_ints = rdn_attr->Value.cbData / 4; int* common_name = reinterpret_cast<int*>(rdn_attr->Value.pbData); for (DWORD k = 0; k < num_ints; ++k) { if (common_name[k] == 0) return true; } break; } default: NOTREACHED(); break; } } } } } return false; } // Saves some information about the certificate chain |chain_context| in // |*verify_result|. The caller MUST initialize |*verify_result| before // calling this function. void GetCertChainInfo(PCCERT_CHAIN_CONTEXT chain_context, CertVerifyResult* verify_result) { if (chain_context->cChain == 0) return; PCERT_SIMPLE_CHAIN first_chain = chain_context->rgpChain[0]; DWORD num_elements = first_chain->cElement; PCERT_CHAIN_ELEMENT* element = first_chain->rgpElement; PCCERT_CONTEXT verified_cert = NULL; std::vector<PCCERT_CONTEXT> verified_chain; bool has_root_ca = num_elements > 1 && !(chain_context->TrustStatus.dwErrorStatus & CERT_TRUST_IS_PARTIAL_CHAIN); // Each chain starts with the end entity certificate (i = 0) and ends with // either the root CA certificate or the last available intermediate. If a // root CA certificate is present, do not inspect the signature algorithm of // the root CA certificate because the signature on the trust anchor is not // important. if (has_root_ca) { // If a full chain was constructed, regardless of whether it was trusted, // don't inspect the root's signature algorithm. num_elements -= 1; } for (DWORD i = 0; i < num_elements; ++i) { PCCERT_CONTEXT cert = element[i]->pCertContext; if (i == 0) { verified_cert = cert; } else { verified_chain.push_back(cert); } } if (verified_cert) { // Add the root certificate, if present, as it was not added above. if (has_root_ca) verified_chain.push_back(element[num_elements]->pCertContext); scoped_refptr<X509Certificate> verified_cert_with_chain = x509_util::CreateX509CertificateFromCertContexts(verified_cert, verified_chain); if (verified_cert_with_chain) verify_result->verified_cert = std::move(verified_cert_with_chain); else verify_result->cert_status |= CERT_STATUS_INVALID; } } // Decodes the cert's certificatePolicies extension into a CERT_POLICIES_INFO // structure and stores it in *output. void GetCertPoliciesInfo( PCCERT_CONTEXT cert, std::unique_ptr<CERT_POLICIES_INFO, base::FreeDeleter>* output) { PCERT_EXTENSION extension = CertFindExtension(szOID_CERT_POLICIES, cert->pCertInfo->cExtension, cert->pCertInfo->rgExtension); if (!extension) return; CRYPT_DECODE_PARA decode_para; decode_para.cbSize = sizeof(decode_para); decode_para.pfnAlloc = crypto::CryptAlloc; decode_para.pfnFree = crypto::CryptFree; CERT_POLICIES_INFO* policies_info = NULL; DWORD policies_info_size = 0; BOOL rv; rv = CryptDecodeObjectEx(X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, szOID_CERT_POLICIES, extension->Value.pbData, extension->Value.cbData, CRYPT_DECODE_ALLOC_FLAG | CRYPT_DECODE_NOCOPY_FLAG, &decode_para, &policies_info, &policies_info_size); if (rv) output->reset(policies_info); } // Computes the SHA-256 hash of the SPKI of |cert| and stores it in |hash|, // returning true. If an error occurs, returns false and leaves |hash| // unmodified. bool HashSPKI(PCCERT_CONTEXT cert, std::string* hash) { base::StringPiece der_bytes( reinterpret_cast<const char*>(cert->pbCertEncoded), cert->cbCertEncoded); base::StringPiece spki; if (!asn1::ExtractSPKIFromDERCert(der_bytes, &spki)) return false; *hash = crypto::SHA256HashString(spki); return true; } bool GetSubject(PCCERT_CONTEXT cert, base::StringPiece* out_subject) { base::StringPiece der_bytes( reinterpret_cast<const char*>(cert->pbCertEncoded), cert->cbCertEncoded); return asn1::ExtractSubjectFromDERCert(der_bytes, out_subject); } enum CRLSetResult { // Indicates an error happened while attempting to determine CRLSet status. // For example, if the certificate's SPKI could not be extracted. kCRLSetError, // Indicates there is no fresh information about the certificate, or if the // CRLSet has expired. // In the case of certificate chains, this is only returned if the leaf // certificate is not covered by the CRLSet; this is because some // intermediates are fully covered, but after filtering, the issuer's CRL // is empty and thus omitted from the CRLSet. Since online checking is // performed for EV certificates when this status is returned, this would // result in needless online lookups for certificates known not-revoked. kCRLSetUnknown, // Indicates that the certificate (or a certificate in the chain) has been // revoked. kCRLSetRevoked, // The certificate (or certificate chain) has no revocations. kCRLSetOk, }; // Determines if |subject_cert| is revoked within |crl_set|, // storing the SubjectPublicKeyInfo hash of |subject_cert| in // |*previous_hash|. // // CRLSets store revocations by both SPKI and by the tuple of Issuer SPKI // Hash & Serial. While |subject_cert| contains enough information to check // for SPKI revocations, to determine the issuer's SPKI, either |issuer_cert| // must be supplied, or the hash of the issuer's SPKI provided in // |*previous_hash|. If |issuer_cert| is omitted, and |*previous_hash| is empty, // only SPKI checks are performed. // // To avoid recomputing SPKI hashes, the hash of |subject_cert| is stored in // |*previous_hash|. This allows chaining revocation checking, by starting // at the root and iterating to the leaf, supplying |previous_hash| each time. // // In the event of a parsing error, |*previous_hash| is cleared, to prevent the // wrong Issuer&Serial tuple from being used. CRLSetResult CheckRevocationWithCRLSet(CRLSet* crl_set, PCCERT_CONTEXT subject_cert, PCCERT_CONTEXT issuer_cert, std::string* previous_hash) { DCHECK(crl_set); DCHECK(subject_cert); // Check to see if |subject_cert|'s SPKI or Subject is revoked. std::string subject_hash; base::StringPiece subject_name; if (!HashSPKI(subject_cert, &subject_hash) || !GetSubject(subject_cert, &subject_name)) { NOTREACHED(); // Indicates Windows accepted something irrecoverably bad. previous_hash->clear(); return kCRLSetError; } if (crl_set->CheckSPKI(subject_hash) == CRLSet::REVOKED || crl_set->CheckSubject(subject_name, subject_hash) == CRLSet::REVOKED) { return kCRLSetRevoked; } // If no issuer cert is provided, nor a hash of the issuer's SPKI, no // further checks can be done. if (!issuer_cert && previous_hash->empty()) { previous_hash->swap(subject_hash); return kCRLSetUnknown; } // Compute the subject's serial. const CRYPT_INTEGER_BLOB* serial_blob = &subject_cert->pCertInfo->SerialNumber; std::unique_ptr<uint8_t[]> serial_bytes(new uint8_t[serial_blob->cbData]); // The bytes of the serial number are stored little-endian. // Note: While MSDN implies that bytes are stripped from this serial, // they are not - only CertCompareIntegerBlob actually removes bytes. for (DWORD j = 0; j < serial_blob->cbData; j++) serial_bytes[j] = serial_blob->pbData[serial_blob->cbData - j - 1]; base::StringPiece serial(reinterpret_cast<const char*>(serial_bytes.get()), serial_blob->cbData); // Compute the issuer's hash. If it was provided (via previous_hash), // use that; otherwise, compute it based on |issuer_cert|. std::string issuer_hash_local; std::string* issuer_hash = previous_hash; if (issuer_hash->empty()) { if (!HashSPKI(issuer_cert, &issuer_hash_local)) { NOTREACHED(); // Indicates Windows accepted something irrecoverably bad. previous_hash->clear(); return kCRLSetError; } issuer_hash = &issuer_hash_local; } // Look up by serial & issuer SPKI. const CRLSet::Result result = crl_set->CheckSerial(serial, *issuer_hash); if (result == CRLSet::REVOKED) return kCRLSetRevoked; previous_hash->swap(subject_hash); if (result == CRLSet::GOOD) return kCRLSetOk; if (result == CRLSet::UNKNOWN) return kCRLSetUnknown; NOTREACHED(); return kCRLSetError; } // CheckChainRevocationWithCRLSet attempts to check each element of |chain| // against |crl_set|. It returns: // kCRLSetRevoked: if any element of the chain is known to have been revoked. // kCRLSetUnknown: if there is no fresh information about the leaf // certificate in the chain or if the CRLSet has expired. // // Only the leaf certificate is considered for coverage because some // intermediates have CRLs with no revocations (after filtering) and // those CRLs are pruned from the CRLSet at generation time. This means // that some EV sites would otherwise take the hit of an OCSP lookup for // no reason. // kCRLSetOk: otherwise. CRLSetResult CheckChainRevocationWithCRLSet(PCCERT_CHAIN_CONTEXT chain, CRLSet* crl_set) { if (chain->cChain == 0 || chain->rgpChain[0]->cElement == 0) return kCRLSetOk; PCERT_CHAIN_ELEMENT* elements = chain->rgpChain[0]->rgpElement; DWORD num_elements = chain->rgpChain[0]->cElement; bool had_error = false; CRLSetResult result = kCRLSetError; std::string issuer_spki_hash; for (DWORD i = 0; i < num_elements; ++i) { PCCERT_CONTEXT subject = elements[num_elements - i - 1]->pCertContext; result = CheckRevocationWithCRLSet(crl_set, subject, nullptr, &issuer_spki_hash); if (result == kCRLSetRevoked) return result; if (result == kCRLSetError) had_error = true; } if (had_error || crl_set->IsExpired()) return kCRLSetUnknown; return result; } void AppendPublicKeyHashesAndUpdateKnownRoot(PCCERT_CHAIN_CONTEXT chain, HashValueVector* hashes, bool* known_root) { if (chain->cChain == 0) return; PCERT_SIMPLE_CHAIN first_chain = chain->rgpChain[0]; PCERT_CHAIN_ELEMENT* const element = first_chain->rgpElement; const DWORD num_elements = first_chain->cElement; // Walk the chain in reverse, from the probable root to the known leaf, as // an optimization for IsKnownRoot checks. for (DWORD i = num_elements; i > 0; i--) { PCCERT_CONTEXT cert = element[i - 1]->pCertContext; base::StringPiece der_bytes( reinterpret_cast<const char*>(cert->pbCertEncoded), cert->cbCertEncoded); base::StringPiece spki_bytes; if (!asn1::ExtractSPKIFromDERCert(der_bytes, &spki_bytes)) continue; HashValue sha256(HASH_VALUE_SHA256); crypto::SHA256HashString(spki_bytes, sha256.data(), crypto::kSHA256Length); hashes->push_back(sha256); if (!*known_root) { *known_root = GetNetTrustAnchorHistogramIdForSPKI(sha256) != 0 || IsKnownRoot(cert); } } // Reverse the hash list, such that it's ordered from leaf to root. std::reverse(hashes->begin(), hashes->end()); } // Returns true if the certificate is an extended-validation certificate. // // This function checks the certificatePolicies extensions of the // certificates in the certificate chain according to Section 7 (pp. 11-12) // of the EV Certificate Guidelines Version 1.0 at // http://cabforum.org/EV_Certificate_Guidelines.pdf. bool CheckEV(PCCERT_CHAIN_CONTEXT chain_context, bool rev_checking_enabled, const char* policy_oid) { DCHECK_NE(static_cast<DWORD>(0), chain_context->cChain); // If the cert doesn't match any of the policies, the // CERT_TRUST_IS_NOT_VALID_FOR_USAGE bit (0x10) in // chain_context->TrustStatus.dwErrorStatus is set. DWORD error_status = chain_context->TrustStatus.dwErrorStatus; if (!rev_checking_enabled) { // If online revocation checking is disabled then we will have still // requested that the revocation cache be checked. However, that will often // cause the following two error bits to be set. These error bits mean that // the local OCSP/CRL is stale or missing entries for these certificates. // Since they are expected, we mask them away. error_status &= ~(CERT_TRUST_IS_OFFLINE_REVOCATION | CERT_TRUST_REVOCATION_STATUS_UNKNOWN); } if (!chain_context->cChain || error_status != CERT_TRUST_NO_ERROR) return false; // Check the end certificate simple chain (chain_context->rgpChain[0]). // If the end certificate's certificatePolicies extension contains the // EV policy OID of the root CA, return true. PCERT_CHAIN_ELEMENT* element = chain_context->rgpChain[0]->rgpElement; int num_elements = chain_context->rgpChain[0]->cElement; if (num_elements < 2) return false; // Look up the EV policy OID of the root CA. PCCERT_CONTEXT root_cert = element[num_elements - 1]->pCertContext; SHA256HashValue fingerprint = x509_util::CalculateFingerprint256(root_cert); EVRootCAMetadata* metadata = EVRootCAMetadata::GetInstance(); return metadata->HasEVPolicyOID(fingerprint, policy_oid); } // Custom revocation provider function that compares incoming certificates with // those in CRLSets. This is called BEFORE the default CRL & OCSP handling // is invoked (which is handled by the revocation provider function // "CertDllVerifyRevocation" in cryptnet.dll) BOOL WINAPI CertDllVerifyRevocationWithCRLSet(DWORD encoding_type, DWORD revocation_type, DWORD num_contexts, void* rgpvContext[], DWORD flags, PCERT_REVOCATION_PARA revocation_params, PCERT_REVOCATION_STATUS revocation_status); // Helper class that installs the CRLSet-based Revocation Provider as the // default revocation provider. Because it is installed as a function address // (meaning only scoped to the process, and not stored in the registry), it // will be used before any registry-based providers, including Microsoft's // default provider. class RevocationInjector { public: CRLSet* GetCRLSet() { return thread_local_crlset.Get(); } void SetCRLSet(CRLSet* crl_set) { thread_local_crlset.Set(crl_set); } private: friend struct base::LazyInstanceTraitsBase<RevocationInjector>; RevocationInjector() { const CRYPT_OID_FUNC_ENTRY kInterceptFunction[] = { {CRYPT_DEFAULT_OID, reinterpret_cast<void*>(&CertDllVerifyRevocationWithCRLSet)}, }; BOOL ok = CryptInstallOIDFunctionAddress( NULL, X509_ASN_ENCODING, CRYPT_OID_VERIFY_REVOCATION_FUNC, base::size(kInterceptFunction), kInterceptFunction, CRYPT_INSTALL_OID_FUNC_BEFORE_FLAG); DCHECK(ok); } ~RevocationInjector() {} // As the revocation parameters passed to CertVerifyProc::VerifyInternal // cannot be officially smuggled to the Revocation Provider base::ThreadLocalPointer<CRLSet> thread_local_crlset; }; // Leaky, as CertVerifyProc workers are themselves leaky. base::LazyInstance<RevocationInjector>::Leaky g_revocation_injector = LAZY_INSTANCE_INITIALIZER; BOOL WINAPI CertDllVerifyRevocationWithCRLSet(DWORD encoding_type, DWORD revocation_type, DWORD num_contexts, void* rgpvContext[], DWORD flags, PCERT_REVOCATION_PARA revocation_params, PCERT_REVOCATION_STATUS revocation_status) { PCERT_CONTEXT* cert_contexts = reinterpret_cast<PCERT_CONTEXT*>(rgpvContext); // The dummy CRLSet provider never returns that something is affirmatively // *un*revoked, as this would disable other revocation providers from being // checked for this certificate (much like an OCSP "Good" status would). // Instead, it merely indicates that insufficient information existed to // determine if the certificate was revoked (in the good case), or that a cert // is affirmatively revoked in the event it appears within the CRLSet. // Because of this, set up some basic bookkeeping for the results. CHECK(revocation_status); revocation_status->dwIndex = 0; revocation_status->dwError = static_cast<DWORD>(CRYPT_E_NO_REVOCATION_CHECK); revocation_status->dwReason = 0; if (num_contexts == 0 || !cert_contexts[0]) { SetLastError(static_cast<DWORD>(E_INVALIDARG)); return FALSE; } if ((GET_CERT_ENCODING_TYPE(encoding_type) != X509_ASN_ENCODING) || revocation_type != CERT_CONTEXT_REVOCATION_TYPE) { SetLastError(static_cast<DWORD>(CRYPT_E_NO_REVOCATION_CHECK)); return FALSE; } // No revocation checking possible if there is no associated // CRLSet. CRLSet* crl_set = g_revocation_injector.Get().GetCRLSet(); if (!crl_set) return FALSE; // |revocation_params| is an optional structure; to make life simple and avoid // the need to constantly check whether or not it was supplied, create a local // copy. If the caller didn't supply anything, it will be empty; otherwise, // it will be (non-owning) copies of the caller's original params. CERT_REVOCATION_PARA local_params; memset(&local_params, 0, sizeof(local_params)); if (revocation_params) { DWORD bytes_to_copy = std::min(revocation_params->cbSize, static_cast<DWORD>(sizeof(local_params))); memcpy(&local_params, revocation_params, bytes_to_copy); } local_params.cbSize = sizeof(local_params); PCERT_CONTEXT subject_cert = cert_contexts[0]; if ((flags & CERT_VERIFY_REV_CHAIN_FLAG) && num_contexts > 1) { // Verifying a chain; first verify from the last certificate in the // chain to the first, and then leave the last certificate (which // is presumably self-issued, although it may simply be a trust // anchor) as the |subject_cert| in order to scan for more // revocations. std::string issuer_hash; PCCERT_CONTEXT issuer_cert = nullptr; for (DWORD i = num_contexts; i > 0; --i) { subject_cert = cert_contexts[i - 1]; if (!subject_cert) { SetLastError(static_cast<DWORD>(E_INVALIDARG)); return FALSE; } CRLSetResult result = CheckRevocationWithCRLSet( crl_set, subject_cert, issuer_cert, &issuer_hash); if (result == kCRLSetRevoked) { revocation_status->dwIndex = i - 1; revocation_status->dwError = static_cast<DWORD>(CRYPT_E_REVOKED); revocation_status->dwReason = CRL_REASON_UNSPECIFIED; SetLastError(revocation_status->dwError); return FALSE; } issuer_cert = subject_cert; } // Verified all certificates from the trust anchor to the leaf, and none // were explicitly revoked. Now do a second pass to attempt to determine // the issuer for cert_contexts[num_contexts - 1], so that the // Issuer SPKI+Serial can be checked for that certificate. // // This code intentionally ignores the flag subject_cert = cert_contexts[num_contexts - 1]; // Reset local_params.pIssuerCert, since it would contain the issuer // for cert_contexts[0]. local_params.pIssuerCert = nullptr; // Fixup the revocation index to point to this cert (in the event it is // revoked). If it isn't revoked, this will be done undone later. revocation_status->dwIndex = num_contexts - 1; } // Determine the issuer cert for the incoming cert ScopedPCCERT_CONTEXT issuer_cert; if (local_params.pIssuerCert && CryptVerifyCertificateSignatureEx( NULL, subject_cert->dwCertEncodingType, CRYPT_VERIFY_CERT_SIGN_SUBJECT_CERT, subject_cert, CRYPT_VERIFY_CERT_SIGN_ISSUER_CERT, const_cast<PCERT_CONTEXT>(local_params.pIssuerCert), 0, nullptr)) { // Caller has already supplied the issuer cert via the revocation params; // just use that. issuer_cert.reset( CertDuplicateCertificateContext(local_params.pIssuerCert)); } else if (CertCompareCertificateName(subject_cert->dwCertEncodingType, &subject_cert->pCertInfo->Subject, &subject_cert->pCertInfo->Issuer) && CryptVerifyCertificateSignatureEx( NULL, subject_cert->dwCertEncodingType, CRYPT_VERIFY_CERT_SIGN_SUBJECT_CERT, subject_cert, CRYPT_VERIFY_CERT_SIGN_ISSUER_CERT, subject_cert, 0, nullptr)) { // Certificate is self-signed; use it as its own issuer. issuer_cert.reset(CertDuplicateCertificateContext(subject_cert)); } else { // Scan the caller-supplied stores first, to try and find the issuer cert. for (DWORD i = 0; i < local_params.cCertStore && !issuer_cert; ++i) { PCCERT_CONTEXT previous_cert = nullptr; for (;;) { DWORD store_search_flags = CERT_STORE_SIGNATURE_FLAG; previous_cert = CertGetIssuerCertificateFromStore( local_params.rgCertStore[i], subject_cert, previous_cert, &store_search_flags); if (!previous_cert) break; // If a cert is found and meets the criteria, the flag will be reset to // zero. Thus NOT having the bit set is equivalent to having found a // matching certificate. if (!(store_search_flags & CERT_STORE_SIGNATURE_FLAG)) { // No need to dupe; reference is held. issuer_cert.reset(previous_cert); break; } } if (issuer_cert) break; if (GetLastError() == static_cast<DWORD>(CRYPT_E_SELF_SIGNED)) { issuer_cert.reset(CertDuplicateCertificateContext(subject_cert)); break; } } // At this point, the Microsoft provider opens up the "CA", "Root", and // "SPC" stores to search for the issuer certificate, if not found in the // caller-supplied stores. It is unclear whether that is necessary here. } if (!issuer_cert) { // Rather than return CRYPT_E_NO_REVOCATION_CHECK (indicating everything // is fine to try the next provider), return CRYPT_E_REVOCATION_OFFLINE. // This propogates up to the caller as an error while checking revocation, // which is the desired intent if there are certificates that cannot // be checked. revocation_status->dwIndex = 0; revocation_status->dwError = static_cast<DWORD>(CRYPT_E_REVOCATION_OFFLINE); SetLastError(revocation_status->dwError); return FALSE; } std::string unused; CRLSetResult result = CheckRevocationWithCRLSet(crl_set, subject_cert, issuer_cert.get(), &unused); if (result == kCRLSetRevoked) { revocation_status->dwError = static_cast<DWORD>(CRYPT_E_REVOKED); revocation_status->dwReason = CRL_REASON_UNSPECIFIED; SetLastError(revocation_status->dwError); return FALSE; } // The result is ALWAYS FALSE in order to allow the next revocation provider // a chance to examine. The only difference is whether or not an error is // indicated via dwError (and SetLastError()). // Reset the error index so that Windows does not believe this code has // examined the entire chain and found no issues until the last cert (thus // skipping other revocation providers). revocation_status->dwIndex = 0; return FALSE; } class ScopedThreadLocalCRLSet { public: explicit ScopedThreadLocalCRLSet(CRLSet* crl_set) { g_revocation_injector.Get().SetCRLSet(crl_set); } ~ScopedThreadLocalCRLSet() { g_revocation_injector.Get().SetCRLSet(nullptr); } }; } // namespace CertVerifyProcWin::CertVerifyProcWin() {} CertVerifyProcWin::~CertVerifyProcWin() {} bool CertVerifyProcWin::SupportsAdditionalTrustAnchors() const { return false; } bool CertVerifyProcWin::SupportsOCSPStapling() const { // CERT_OCSP_RESPONSE_PROP_ID is only implemented on Vista+, but it can be // set on Windows XP without error. There is some overhead from the server // sending the OCSP response if it supports the extension, for the subset of // XP clients who will request it but be unable to use it, but this is an // acceptable trade-off for simplicity of implementation. return true; } int CertVerifyProcWin::VerifyInternal( X509Certificate* cert, const std::string& hostname, const std::string& ocsp_response, int flags, CRLSet* crl_set, const CertificateList& additional_trust_anchors, CertVerifyResult* verify_result) { // Ensure the Revocation Provider has been installed and configured for this // CRLSet. ScopedThreadLocalCRLSet thread_local_crlset(crl_set); ScopedPCCERT_CONTEXT cert_list = x509_util::CreateCertContextWithChain( cert, x509_util::InvalidIntermediateBehavior::kIgnore); if (!cert_list) { verify_result->cert_status |= CERT_STATUS_INVALID; return ERR_CERT_INVALID; } // Build and validate certificate chain. CERT_CHAIN_PARA chain_para; memset(&chain_para, 0, sizeof(chain_para)); chain_para.cbSize = sizeof(chain_para); // ExtendedKeyUsage. // We still need to request szOID_SERVER_GATED_CRYPTO and szOID_SGC_NETSCAPE // today because some certificate chains need them. IE also requests these // two usages. static const LPCSTR usage[] = { szOID_PKIX_KP_SERVER_AUTH, szOID_SERVER_GATED_CRYPTO, szOID_SGC_NETSCAPE }; chain_para.RequestedUsage.dwType = USAGE_MATCH_TYPE_OR; chain_para.RequestedUsage.Usage.cUsageIdentifier = base::size(usage); chain_para.RequestedUsage.Usage.rgpszUsageIdentifier = const_cast<LPSTR*>(usage); // Get the certificatePolicies extension of the certificate. std::unique_ptr<CERT_POLICIES_INFO, base::FreeDeleter> policies_info; LPSTR ev_policy_oid = NULL; GetCertPoliciesInfo(cert_list.get(), &policies_info); if (policies_info) { EVRootCAMetadata* metadata = EVRootCAMetadata::GetInstance(); for (DWORD i = 0; i < policies_info->cPolicyInfo; ++i) { LPSTR policy_oid = policies_info->rgPolicyInfo[i].pszPolicyIdentifier; if (metadata->IsEVPolicyOID(policy_oid)) { ev_policy_oid = policy_oid; chain_para.RequestedIssuancePolicy.dwType = USAGE_MATCH_TYPE_AND; chain_para.RequestedIssuancePolicy.Usage.cUsageIdentifier = 1; chain_para.RequestedIssuancePolicy.Usage.rgpszUsageIdentifier = &ev_policy_oid; // De-prioritize the CA/Browser forum Extended Validation policy // (2.23.140.1.1). See https://crbug.com/705285. if (!EVRootCAMetadata::IsCaBrowserForumEvOid(ev_policy_oid)) break; } } } // Revocation checking is always enabled, in order to enable CRLSets to be // evaluated as part of a revocation provider. However, when the caller did // not explicitly request revocation checking (which is to say, online // revocation checking), then only enable cached results. This disables OCSP // and CRL fetching, but still allows the revocation provider to be called. // Note: The root cert is also checked for revocation status, so that CRLSets // will cover revoked SPKIs. DWORD chain_flags = CERT_CHAIN_REVOCATION_CHECK_CHAIN; bool rev_checking_enabled = (flags & CertVerifier::VERIFY_REV_CHECKING_ENABLED); if (rev_checking_enabled) { verify_result->cert_status |= CERT_STATUS_REV_CHECKING_ENABLED; } else { chain_flags |= CERT_CHAIN_REVOCATION_CHECK_CACHE_ONLY; } // By default, use the default HCERTCHAINENGINE (aka HCCE_CURRENT_USER). When // running tests, use a dynamic HCERTCHAINENGINE. All of the status and cache // of verified certificates and chains is tied to the HCERTCHAINENGINE. As // each invocation may have changed the set of known roots, invalidate the // cache between runs. // // This is not the most efficient means of doing so; it's possible to mark the // Root store used by TestRootCerts as changed, via CertControlStore with the // CERT_STORE_CTRL_NOTIFY_CHANGE / CERT_STORE_CTRL_RESYNC, but that's more // complexity for what is test-only code. ScopedHCERTCHAINENGINE chain_engine(NULL); if (TestRootCerts::HasInstance()) chain_engine.reset(TestRootCerts::GetInstance()->GetChainEngine()); // Add stapled OCSP response data, which will be preferred over online checks // and used when in cache-only mode. if (!ocsp_response.empty()) { CRYPT_DATA_BLOB ocsp_response_blob; ocsp_response_blob.cbData = ocsp_response.size(); ocsp_response_blob.pbData = reinterpret_cast<BYTE*>(const_cast<char*>(ocsp_response.data())); CertSetCertificateContextProperty( cert_list.get(), CERT_OCSP_RESPONSE_PROP_ID, CERT_SET_PROPERTY_IGNORE_PERSIST_ERROR_FLAG, &ocsp_response_blob); } CERT_STRONG_SIGN_SERIALIZED_INFO strong_signed_info; memset(&strong_signed_info, 0, sizeof(strong_signed_info)); strong_signed_info.dwFlags = 0; // Don't check OCSP or CRL signatures. // Note that the following two configurations result in disabling support for // any CNG-added algorithms, which may result in some disruption for internal // PKI operations that use national forms of crypto (e.g. GOST). However, the // fallback mechanism for this (to support SHA-1 chains) will re-enable them, // so they should continue to work - just with added latency. wchar_t hash_algs[] = L"RSA/SHA256;RSA/SHA384;RSA/SHA512;" L"ECDSA/SHA256;ECDSA/SHA384;ECDSA/SHA512"; strong_signed_info.pwszCNGSignHashAlgids = hash_algs; // RSA-1024 bit support is intentionally enabled here. More investigation is // needed to determine if setting CERT_STRONG_SIGN_DISABLE_END_CHECK_FLAG in // the dwStrongSignFlags of |chain_para| would allow the ability to disable // support for intermediates/roots < 2048-bits, while still ensuring that // end-entity certs signed with SHA-1 are flagged/rejected. wchar_t key_sizes[] = L"RSA/1024;ECDSA/256"; strong_signed_info.pwszCNGPubKeyMinBitLengths = key_sizes; CERT_STRONG_SIGN_PARA strong_sign_params; memset(&strong_sign_params, 0, sizeof(strong_sign_params)); strong_sign_params.cbSize = sizeof(strong_sign_params); strong_sign_params.dwInfoChoice = CERT_STRONG_SIGN_SERIALIZED_INFO_CHOICE; strong_sign_params.pSerializedInfo = &strong_signed_info; chain_para.dwStrongSignFlags = 0; chain_para.pStrongSignPara = &strong_sign_params; PCCERT_CHAIN_CONTEXT chain_context = nullptr; // First, try to verify with strong signing enabled. If this fails, or if the // chain is rejected, then clear it from |chain_para| so that all subsequent // calls will use the fallback path. BOOL chain_result = CertGetCertificateChain(chain_engine, cert_list.get(), NULL, // current system time cert_list->hCertStore, &chain_para, chain_flags, NULL, // reserved &chain_context); if (chain_result && chain_context && (chain_context->TrustStatus.dwErrorStatus & (CERT_TRUST_HAS_WEAK_SIGNATURE | CERT_TRUST_IS_NOT_SIGNATURE_VALID))) { // The attempt to verify with strong-sign (only SHA-2) failed, so fall back // to disabling it. This will allow SHA-1 chains to be returned, which will // then be subsequently signalled as weak if necessary. CertFreeCertificateChain(chain_context); chain_context = nullptr; chain_para.pStrongSignPara = nullptr; chain_para.dwStrongSignFlags = 0; chain_result = CertGetCertificateChain(chain_engine, cert_list.get(), NULL, // current system time cert_list->hCertStore, &chain_para, chain_flags, NULL, // reserved &chain_context); } if (!chain_result) { verify_result->cert_status |= CERT_STATUS_INVALID; return MapSecurityError(GetLastError()); } // Perform a second check with CRLSets. Although the Revocation Provider // should have prevented invalid paths from being built, the behaviour and // timing of how a Revocation Provider is invoked is not well documented. This // is just defense in depth. CRLSetResult crl_set_result = kCRLSetUnknown; if (crl_set) crl_set_result = CheckChainRevocationWithCRLSet(chain_context, crl_set); if (crl_set_result == kCRLSetRevoked) { verify_result->cert_status |= CERT_STATUS_REVOKED; } else if (crl_set_result == kCRLSetUnknown && !rev_checking_enabled && ev_policy_oid) { // We don't have fresh information about this chain from the CRLSet and // it's probably an EV certificate. Retry with online revocation checking. rev_checking_enabled = true; chain_flags &= ~CERT_CHAIN_REVOCATION_CHECK_CACHE_ONLY; verify_result->cert_status |= CERT_STATUS_REV_CHECKING_ENABLED; CertFreeCertificateChain(chain_context); if (!CertGetCertificateChain( chain_engine, cert_list.get(), NULL, // current system time cert_list->hCertStore, &chain_para, chain_flags, NULL, // reserved &chain_context)) { verify_result->cert_status |= CERT_STATUS_INVALID; return MapSecurityError(GetLastError()); } } if (chain_context->TrustStatus.dwErrorStatus & CERT_TRUST_IS_NOT_VALID_FOR_USAGE) { ev_policy_oid = NULL; chain_para.RequestedIssuancePolicy.Usage.cUsageIdentifier = 0; chain_para.RequestedIssuancePolicy.Usage.rgpszUsageIdentifier = NULL; CertFreeCertificateChain(chain_context); if (!CertGetCertificateChain( chain_engine, cert_list.get(), NULL, // current system time cert_list->hCertStore, &chain_para, chain_flags, NULL, // reserved &chain_context)) { verify_result->cert_status |= CERT_STATUS_INVALID; return MapSecurityError(GetLastError()); } } CertVerifyResult temp_verify_result = *verify_result; GetCertChainInfo(chain_context, verify_result); if (!verify_result->is_issued_by_known_root && (flags & CertVerifier::VERIFY_REV_CHECKING_REQUIRED_LOCAL_ANCHORS)) { *verify_result = temp_verify_result; rev_checking_enabled = true; verify_result->cert_status |= CERT_STATUS_REV_CHECKING_ENABLED; chain_flags &= ~CERT_CHAIN_REVOCATION_CHECK_CACHE_ONLY; CertFreeCertificateChain(chain_context); if (!CertGetCertificateChain( chain_engine, cert_list.get(), NULL, // current system time cert_list->hCertStore, &chain_para, chain_flags, NULL, // reserved &chain_context)) { verify_result->cert_status |= CERT_STATUS_INVALID; return MapSecurityError(GetLastError()); } GetCertChainInfo(chain_context, verify_result); if (chain_context->TrustStatus.dwErrorStatus & CERT_TRUST_IS_OFFLINE_REVOCATION) { verify_result->cert_status |= CERT_STATUS_REVOKED; } } ScopedPCCERT_CHAIN_CONTEXT scoped_chain_context(chain_context); verify_result->cert_status |= MapCertChainErrorStatusToCertStatus( chain_context->TrustStatus.dwErrorStatus); // Flag certificates that have a Subject common name with a NULL character. if (CertSubjectCommonNameHasNull(cert_list.get())) verify_result->cert_status |= CERT_STATUS_INVALID; base::string16 hostname16 = base::ASCIIToUTF16(hostname); SSL_EXTRA_CERT_CHAIN_POLICY_PARA extra_policy_para; memset(&extra_policy_para, 0, sizeof(extra_policy_para)); extra_policy_para.cbSize = sizeof(extra_policy_para); extra_policy_para.dwAuthType = AUTHTYPE_SERVER; // Certificate name validation happens separately, later, using an internal // routine that has better support for RFC 6125 name matching. extra_policy_para.fdwChecks = 0x00001000; // SECURITY_FLAG_IGNORE_CERT_CN_INVALID extra_policy_para.pwszServerName = const_cast<base::char16*>(hostname16.c_str()); CERT_CHAIN_POLICY_PARA policy_para; memset(&policy_para, 0, sizeof(policy_para)); policy_para.cbSize = sizeof(policy_para); policy_para.dwFlags = 0; policy_para.pvExtraPolicyPara = &extra_policy_para; CERT_CHAIN_POLICY_STATUS policy_status; memset(&policy_status, 0, sizeof(policy_status)); policy_status.cbSize = sizeof(policy_status); if (!CertVerifyCertificateChainPolicy( CERT_CHAIN_POLICY_SSL, chain_context, &policy_para, &policy_status)) { return MapSecurityError(GetLastError()); } if (policy_status.dwError) { verify_result->cert_status |= MapNetErrorToCertStatus( MapSecurityError(policy_status.dwError)); } // TODO(wtc): Suppress CERT_STATUS_NO_REVOCATION_MECHANISM for now to be // compatible with WinHTTP, which doesn't report this error (bug 3004). verify_result->cert_status &= ~CERT_STATUS_NO_REVOCATION_MECHANISM; if (!rev_checking_enabled) { // If we didn't do online revocation checking then Windows will report // CERT_UNABLE_TO_CHECK_REVOCATION unless it had cached OCSP or CRL // information for every certificate. We only want to put up revoked // statuses from the offline checks so we squash this error. verify_result->cert_status &= ~CERT_STATUS_UNABLE_TO_CHECK_REVOCATION; } AppendPublicKeyHashesAndUpdateKnownRoot( chain_context, &verify_result->public_key_hashes, &verify_result->is_issued_by_known_root); if (IsCertStatusError(verify_result->cert_status)) return MapCertStatusToNetError(verify_result->cert_status); if (ev_policy_oid && CheckEV(chain_context, rev_checking_enabled, ev_policy_oid)) { verify_result->cert_status |= CERT_STATUS_IS_EV; } return OK; } } // namespace net
{ "pile_set_name": "Github" }
/* * Incremental bus scan, based on bus topology * * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/bug.h> #include <linux/errno.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/atomic.h> #include <asm/byteorder.h> #include "core.h" #define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f) #define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01) #define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01) #define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f) #define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03) #define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01) #define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01) #define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01) #define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07) #define SELFID_PORT_CHILD 0x3 #define SELFID_PORT_PARENT 0x2 #define SELFID_PORT_NCONN 0x1 #define SELFID_PORT_NONE 0x0 static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count) { u32 q; int port_type, shift, seq; *total_port_count = 0; *child_port_count = 0; shift = 6; q = *sid; seq = 0; while (1) { port_type = (q >> shift) & 0x03; switch (port_type) { case SELFID_PORT_CHILD: (*child_port_count)++; case SELFID_PORT_PARENT: case SELFID_PORT_NCONN: (*total_port_count)++; case SELFID_PORT_NONE: break; } shift -= 2; if (shift == 0) { if (!SELF_ID_MORE_PACKETS(q)) return sid + 1; shift = 16; sid++; q = *sid; /* * Check that the extra packets actually are * extended self ID packets and that the * sequence numbers in the extended self ID * packets increase as expected. */ if (!SELF_ID_EXTENDED(q) || seq != SELF_ID_EXT_SEQUENCE(q)) return NULL; seq++; } } } static int get_port_type(u32 *sid, int port_index) { int index, shift; index = (port_index + 5) / 8; shift = 16 - ((port_index + 5) & 7) * 2; return (sid[index] >> shift) & 0x03; } static struct fw_node *fw_node_create(u32 sid, int port_count, int color) { struct fw_node *node; node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]), GFP_ATOMIC); if (node == NULL) return NULL; node->color = color; node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid); node->link_on = SELF_ID_LINK_ON(sid); node->phy_speed = SELF_ID_PHY_SPEED(sid); node->initiated_reset = SELF_ID_PHY_INITIATOR(sid); node->port_count = port_count; refcount_set(&node->ref_count, 1); INIT_LIST_HEAD(&node->link); return node; } /* * Compute the maximum hop count for this node and it's children. The * maximum hop count is the maximum number of connections between any * two nodes in the subtree rooted at this node. We need this for * setting the gap count. As we build the tree bottom up in * build_tree() below, this is fairly easy to do: for each node we * maintain the max hop count and the max depth, ie the number of hops * to the furthest leaf. Computing the max hop count breaks down into * two cases: either the path goes through this node, in which case * the hop count is the sum of the two biggest child depths plus 2. * Or it could be the case that the max hop path is entirely * containted in a child tree, in which case the max hop count is just * the max hop count of this child. */ static void update_hop_count(struct fw_node *node) { int depths[2] = { -1, -1 }; int max_child_hops = 0; int i; for (i = 0; i < node->port_count; i++) { if (node->ports[i] == NULL) continue; if (node->ports[i]->max_hops > max_child_hops) max_child_hops = node->ports[i]->max_hops; if (node->ports[i]->max_depth > depths[0]) { depths[1] = depths[0]; depths[0] = node->ports[i]->max_depth; } else if (node->ports[i]->max_depth > depths[1]) depths[1] = node->ports[i]->max_depth; } node->max_depth = depths[0] + 1; node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2); } static inline struct fw_node *fw_node(struct list_head *l) { return list_entry(l, struct fw_node, link); } /* * This function builds the tree representation of the topology given * by the self IDs from the latest bus reset. During the construction * of the tree, the function checks that the self IDs are valid and * internally consistent. On success this function returns the * fw_node corresponding to the local card otherwise NULL. */ static struct fw_node *build_tree(struct fw_card *card, u32 *sid, int self_id_count) { struct fw_node *node, *child, *local_node, *irm_node; struct list_head stack, *h; u32 *next_sid, *end, q; int i, port_count, child_port_count, phy_id, parent_count, stack_depth; int gap_count; bool beta_repeaters_present; local_node = NULL; node = NULL; INIT_LIST_HEAD(&stack); stack_depth = 0; end = sid + self_id_count; phy_id = 0; irm_node = NULL; gap_count = SELF_ID_GAP_COUNT(*sid); beta_repeaters_present = false; while (sid < end) { next_sid = count_ports(sid, &port_count, &child_port_count); if (next_sid == NULL) { fw_err(card, "inconsistent extended self IDs\n"); return NULL; } q = *sid; if (phy_id != SELF_ID_PHY_ID(q)) { fw_err(card, "PHY ID mismatch in self ID: %d != %d\n", phy_id, SELF_ID_PHY_ID(q)); return NULL; } if (child_port_count > stack_depth) { fw_err(card, "topology stack underflow\n"); return NULL; } /* * Seek back from the top of our stack to find the * start of the child nodes for this node. */ for (i = 0, h = &stack; i < child_port_count; i++) h = h->prev; /* * When the stack is empty, this yields an invalid value, * but that pointer will never be dereferenced. */ child = fw_node(h); node = fw_node_create(q, port_count, card->color); if (node == NULL) { fw_err(card, "out of memory while building topology\n"); return NULL; } if (phy_id == (card->node_id & 0x3f)) local_node = node; if (SELF_ID_CONTENDER(q)) irm_node = node; parent_count = 0; for (i = 0; i < port_count; i++) { switch (get_port_type(sid, i)) { case SELFID_PORT_PARENT: /* * Who's your daddy? We dont know the * parent node at this time, so we * temporarily abuse node->color for * remembering the entry in the * node->ports array where the parent * node should be. Later, when we * handle the parent node, we fix up * the reference. */ parent_count++; node->color = i; break; case SELFID_PORT_CHILD: node->ports[i] = child; /* * Fix up parent reference for this * child node. */ child->ports[child->color] = node; child->color = card->color; child = fw_node(child->link.next); break; } } /* * Check that the node reports exactly one parent * port, except for the root, which of course should * have no parents. */ if ((next_sid == end && parent_count != 0) || (next_sid < end && parent_count != 1)) { fw_err(card, "parent port inconsistency for node %d: " "parent_count=%d\n", phy_id, parent_count); return NULL; } /* Pop the child nodes off the stack and push the new node. */ __list_del(h->prev, &stack); list_add_tail(&node->link, &stack); stack_depth += 1 - child_port_count; if (node->phy_speed == SCODE_BETA && parent_count + child_port_count > 1) beta_repeaters_present = true; /* * If PHYs report different gap counts, set an invalid count * which will force a gap count reconfiguration and a reset. */ if (SELF_ID_GAP_COUNT(q) != gap_count) gap_count = 0; update_hop_count(node); sid = next_sid; phy_id++; } card->root_node = node; card->irm_node = irm_node; card->gap_count = gap_count; card->beta_repeaters_present = beta_repeaters_present; return local_node; } typedef void (*fw_node_callback_t)(struct fw_card * card, struct fw_node * node, struct fw_node * parent); static void for_each_fw_node(struct fw_card *card, struct fw_node *root, fw_node_callback_t callback) { struct list_head list; struct fw_node *node, *next, *child, *parent; int i; INIT_LIST_HEAD(&list); fw_node_get(root); list_add_tail(&root->link, &list); parent = NULL; list_for_each_entry(node, &list, link) { node->color = card->color; for (i = 0; i < node->port_count; i++) { child = node->ports[i]; if (!child) continue; if (child->color == card->color) parent = child; else { fw_node_get(child); list_add_tail(&child->link, &list); } } callback(card, node, parent); } list_for_each_entry_safe(node, next, &list, link) fw_node_put(node); } static void report_lost_node(struct fw_card *card, struct fw_node *node, struct fw_node *parent) { fw_node_event(card, node, FW_NODE_DESTROYED); fw_node_put(node); /* Topology has changed - reset bus manager retry counter */ card->bm_retries = 0; } static void report_found_node(struct fw_card *card, struct fw_node *node, struct fw_node *parent) { int b_path = (node->phy_speed == SCODE_BETA); if (parent != NULL) { /* min() macro doesn't work here with gcc 3.4 */ node->max_speed = parent->max_speed < node->phy_speed ? parent->max_speed : node->phy_speed; node->b_path = parent->b_path && b_path; } else { node->max_speed = node->phy_speed; node->b_path = b_path; } fw_node_event(card, node, FW_NODE_CREATED); /* Topology has changed - reset bus manager retry counter */ card->bm_retries = 0; } void fw_destroy_nodes(struct fw_card *card) { unsigned long flags; spin_lock_irqsave(&card->lock, flags); card->color++; if (card->local_node != NULL) for_each_fw_node(card, card->local_node, report_lost_node); card->local_node = NULL; spin_unlock_irqrestore(&card->lock, flags); } static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) { struct fw_node *tree; int i; tree = node1->ports[port]; node0->ports[port] = tree; for (i = 0; i < tree->port_count; i++) { if (tree->ports[i] == node1) { tree->ports[i] = node0; break; } } } /* * Compare the old topology tree for card with the new one specified by root. * Queue the nodes and mark them as either found, lost or updated. * Update the nodes in the card topology tree as we go. */ static void update_tree(struct fw_card *card, struct fw_node *root) { struct list_head list0, list1; struct fw_node *node0, *node1, *next1; int i, event; INIT_LIST_HEAD(&list0); list_add_tail(&card->local_node->link, &list0); INIT_LIST_HEAD(&list1); list_add_tail(&root->link, &list1); node0 = fw_node(list0.next); node1 = fw_node(list1.next); while (&node0->link != &list0) { WARN_ON(node0->port_count != node1->port_count); if (node0->link_on && !node1->link_on) event = FW_NODE_LINK_OFF; else if (!node0->link_on && node1->link_on) event = FW_NODE_LINK_ON; else if (node1->initiated_reset && node1->link_on) event = FW_NODE_INITIATED_RESET; else event = FW_NODE_UPDATED; node0->node_id = node1->node_id; node0->color = card->color; node0->link_on = node1->link_on; node0->initiated_reset = node1->initiated_reset; node0->max_hops = node1->max_hops; node1->color = card->color; fw_node_event(card, node0, event); if (card->root_node == node1) card->root_node = node0; if (card->irm_node == node1) card->irm_node = node0; for (i = 0; i < node0->port_count; i++) { if (node0->ports[i] && node1->ports[i]) { /* * This port didn't change, queue the * connected node for further * investigation. */ if (node0->ports[i]->color == card->color) continue; list_add_tail(&node0->ports[i]->link, &list0); list_add_tail(&node1->ports[i]->link, &list1); } else if (node0->ports[i]) { /* * The nodes connected here were * unplugged; unref the lost nodes and * queue FW_NODE_LOST callbacks for * them. */ for_each_fw_node(card, node0->ports[i], report_lost_node); node0->ports[i] = NULL; } else if (node1->ports[i]) { /* * One or more node were connected to * this port. Move the new nodes into * the tree and queue FW_NODE_CREATED * callbacks for them. */ move_tree(node0, node1, i); for_each_fw_node(card, node0->ports[i], report_found_node); } } node0 = fw_node(node0->link.next); next1 = fw_node(node1->link.next); fw_node_put(node1); node1 = next1; } } static void update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count) { int node_count = (card->root_node->node_id & 0x3f) + 1; __be32 *map = card->topology_map; *map++ = cpu_to_be32((self_id_count + 2) << 16); *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1); *map++ = cpu_to_be32((node_count << 16) | self_id_count); while (self_id_count--) *map++ = cpu_to_be32p(self_ids++); fw_compute_block_crc(card->topology_map); } void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, int self_id_count, u32 *self_ids, bool bm_abdicate) { struct fw_node *local_node; unsigned long flags; /* * If the selfID buffer is not the immediate successor of the * previously processed one, we cannot reliably compare the * old and new topologies. */ if (!is_next_generation(generation, card->generation) && card->local_node != NULL) { fw_destroy_nodes(card); card->bm_retries = 0; } spin_lock_irqsave(&card->lock, flags); card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated; card->node_id = node_id; /* * Update node_id before generation to prevent anybody from using * a stale node_id together with a current generation. */ smp_wmb(); card->generation = generation; card->reset_jiffies = get_jiffies_64(); card->bm_node_id = 0xffff; card->bm_abdicate = bm_abdicate; fw_schedule_bm_work(card, 0); local_node = build_tree(card, self_ids, self_id_count); update_topology_map(card, self_ids, self_id_count); card->color++; if (local_node == NULL) { fw_err(card, "topology build failed\n"); /* FIXME: We need to issue a bus reset in this case. */ } else if (card->local_node == NULL) { card->local_node = local_node; for_each_fw_node(card, local_node, report_found_node); } else { update_tree(card, local_node); } spin_unlock_irqrestore(&card->lock, flags); } EXPORT_SYMBOL(fw_core_handle_bus_reset);
{ "pile_set_name": "Github" }
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight threads of different priorities, using a message queue, semaphore, mutex, event flags group, byte pool, and block pool. */ #include "tx_api.h" #define DEMO_STACK_SIZE 1024 #define DEMO_BYTE_POOL_SIZE 9120 #define DEMO_BLOCK_POOL_SIZE 100 #define DEMO_QUEUE_SIZE 100 /* Define the ThreadX object control blocks... */ TX_THREAD thread_0; TX_THREAD thread_1; TX_THREAD thread_2; TX_THREAD thread_3; TX_THREAD thread_4; TX_THREAD thread_5; TX_THREAD thread_6; TX_THREAD thread_7; TX_QUEUE queue_0; TX_SEMAPHORE semaphore_0; TX_MUTEX mutex_0; TX_EVENT_FLAGS_GROUP event_flags_0; TX_BYTE_POOL byte_pool_0; TX_BLOCK_POOL block_pool_0; /* Define the counters used in the demo application... */ ULONG thread_0_counter; ULONG thread_1_counter; ULONG thread_1_messages_sent; ULONG thread_2_counter; ULONG thread_2_messages_received; ULONG thread_3_counter; ULONG thread_4_counter; ULONG thread_5_counter; ULONG thread_6_counter; ULONG thread_7_counter; /* Define thread prototypes. */ void thread_0_entry(ULONG thread_input); void thread_1_entry(ULONG thread_input); void thread_2_entry(ULONG thread_input); void thread_3_and_4_entry(ULONG thread_input); void thread_5_entry(ULONG thread_input); void thread_6_and_7_entry(ULONG thread_input); /* Define main entry point. */ int main() { /* Enter the ThreadX kernel. */ tx_kernel_enter(); } /* Define what the initial system looks like. */ void tx_application_define(void *first_unused_memory) { CHAR *pointer = TX_NULL; /* Create a byte memory pool from which to allocate the thread stacks. */ tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE); /* Put system definition stuff in here, e.g. thread creates and other assorted create information. */ /* Allocate the stack for thread 0. */ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT); /* Create the main thread. */ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0, pointer, DEMO_STACK_SIZE, 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START); /* Allocate the stack for thread 1. */ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT); /* Create threads 1 and 2. These threads pass information through a ThreadX message queue. It is also interesting to note that these threads have a time slice. */ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1, pointer, DEMO_STACK_SIZE, 16, 16, 4, TX_AUTO_START); /* Allocate the stack for thread 2. */ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT); tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2, pointer, DEMO_STACK_SIZE, 16, 16, 4, TX_AUTO_START); /* Allocate the stack for thread 3. */ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT); /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore. An interesting thing here is that both threads share the same instruction area. */ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3, pointer, DEMO_STACK_SIZE, 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START); /* Allocate the stack for thread 4. */ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT); tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4, pointer, DEMO_STACK_SIZE, 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START); /* Allocate the stack for thread 5. */ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT); /* Create thread 5. This thread simply pends on an event flag which will be set by thread_0. */ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5, pointer, DEMO_STACK_SIZE, 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START); /* Allocate the stack for thread 6. */ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT); /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6, pointer, DEMO_STACK_SIZE, 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START); /* Allocate the stack for thread 7. */ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT); tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7, pointer, DEMO_STACK_SIZE, 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START); /* Allocate the message queue. */ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT); /* Create the message queue shared by threads 1 and 2. */ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG)); /* Create the semaphore used by threads 3 and 4. */ tx_semaphore_create(&semaphore_0, "semaphore 0", 1); /* Create the event flags group used by threads 1 and 5. */ tx_event_flags_create(&event_flags_0, "event flags 0"); /* Create the mutex used by thread 6 and 7 without priority inheritance. */ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT); /* Allocate the memory for a small block pool. */ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT); /* Create a block memory pool to allocate a message buffer from. */ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE); /* Allocate a block and release the block memory. */ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT); /* Release the block back to the pool. */ tx_block_release(pointer); } /* Define the test threads. */ void thread_0_entry(ULONG thread_input) { UINT status; /* This thread simply sits in while-forever-sleep loop. */ while(1) { /* Increment the thread counter. */ thread_0_counter++; /* Sleep for 10 ticks. */ tx_thread_sleep(10); /* Set event flag 0 to wakeup thread 5. */ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR); /* Check status. */ if (status != TX_SUCCESS) break; } } void thread_1_entry(ULONG thread_input) { UINT status; /* This thread simply sends messages to a queue shared by thread 2. */ while(1) { /* Increment the thread counter. */ thread_1_counter++; /* Send message to queue 0. */ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER); /* Check completion status. */ if (status != TX_SUCCESS) break; /* Increment the message sent. */ thread_1_messages_sent++; } } void thread_2_entry(ULONG thread_input) { ULONG received_message; UINT status; /* This thread retrieves messages placed on the queue by thread 1. */ while(1) { /* Increment the thread counter. */ thread_2_counter++; /* Retrieve a message from the queue. */ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER); /* Check completion status and make sure the message is what we expected. */ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received)) break; /* Otherwise, all is okay. Increment the received message count. */ thread_2_messages_received++; } } void thread_3_and_4_entry(ULONG thread_input) { UINT status; /* This function is executed from thread 3 and thread 4. As the loop below shows, these function compete for ownership of semaphore_0. */ while(1) { /* Increment the thread counter. */ if (thread_input == 3) thread_3_counter++; else thread_4_counter++; /* Get the semaphore with suspension. */ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER); /* Check status. */ if (status != TX_SUCCESS) break; /* Sleep for 2 ticks to hold the semaphore. */ tx_thread_sleep(2); /* Release the semaphore. */ status = tx_semaphore_put(&semaphore_0); /* Check status. */ if (status != TX_SUCCESS) break; } } void thread_5_entry(ULONG thread_input) { UINT status; ULONG actual_flags; /* This thread simply waits for an event in a forever loop. */ while(1) { /* Increment the thread counter. */ thread_5_counter++; /* Wait for event flag 0. */ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR, &actual_flags, TX_WAIT_FOREVER); /* Check status. */ if ((status != TX_SUCCESS) || (actual_flags != 0x1)) break; } } void thread_6_and_7_entry(ULONG thread_input) { UINT status; /* This function is executed from thread 6 and thread 7. As the loop below shows, these function compete for ownership of mutex_0. */ while(1) { /* Increment the thread counter. */ if (thread_input == 6) thread_6_counter++; else thread_7_counter++; /* Get the mutex with suspension. */ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER); /* Check status. */ if (status != TX_SUCCESS) break; /* Get the mutex again with suspension. This shows that an owning thread may retrieve the mutex it owns multiple times. */ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER); /* Check status. */ if (status != TX_SUCCESS) break; /* Sleep for 2 ticks to hold the mutex. */ tx_thread_sleep(2); /* Release the mutex. */ status = tx_mutex_put(&mutex_0); /* Check status. */ if (status != TX_SUCCESS) break; /* Release the mutex again. This will actually release ownership since it was obtained twice. */ status = tx_mutex_put(&mutex_0); /* Check status. */ if (status != TX_SUCCESS) break; } }
{ "pile_set_name": "Github" }
.gist { .highlight { color:#000; } .render-container .render-viewer-error, .render-container .render-viewer-fatal, .render-container .octospinner { display:none; } .gist-render iframe { width:100%; } .gist-file.gist-render .highlight { border:none; } .gist-file .gist-meta .highlight a { font-weight:700; color:#666; text-decoration:none; } .highlight { background:#fff; } .highlight .err { color:#a61717; background-color:#e3d2d2; } .highlight .cp { color:#999; font-weight:700; } .highlight .cs { color:#999; font-weight:700; font-style:italic; } .highlight .gd { color:#000; background-color:#fdd; } .highlight .gd .x { color:#000; background-color:#faa; } .highlight .ge { color:#000; font-style:italic; } .highlight .gi { color:#000; background-color:#dfd; } .highlight .gi .x { color:#000; background-color:#afa; } .highlight .go { color:#888; } .highlight .gs { font-weight:700; } .highlight .gu { color:#aaa; } .highlight .nb { color:#0086b3; } .highlight .ni { color:purple; } .highlight .nt { color:navy; } .highlight .w { color:#bbb; } .highlight .sr { color:#009926; } .highlight .ss { color:#990073; } .highlight .c, .highlight .cm, .highlight .c1 { color:#998; font-style:italic; } .highlight .k, .highlight .o, .highlight .kc, .highlight .kd, .highlight .kp, .highlight .kr, .highlight .ow, .highlight .n, .highlight .p { color:#000; font-weight:700; } .highlight .gr, .highlight .gt { color:#a00; } .highlight .gh, .highlight .bp { color:#999; } .highlight .gp, .highlight .nn { color:#555; } .highlight .kt, .highlight .nc { color:#458; font-weight:700; } .highlight .m, .highlight .mf, .highlight .mh, .highlight .mi, .highlight .mo, .highlight .il { color:#099; } .highlight .s, .highlight .sb, .highlight .sc, .highlight .sd, .highlight .s2, .highlight .se, .highlight .sh, .highlight .si, .highlight .sx, .highlight .s1 { color:#d14; } .highlight .na, .highlight .no, .highlight .nv, .highlight .vc, .highlight .vg, .highlight .vi { color:teal; } .highlight .ne, .highlight .nf { color:#900; font-weight:700; } }
{ "pile_set_name": "Github" }
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/dbformat.h" #include "util/logging.h" #include "util/testharness.h" namespace leveldb { static std::string IKey(const std::string& user_key, uint64_t seq, ValueType vt) { std::string encoded; AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt)); return encoded; } static std::string Shorten(const std::string& s, const std::string& l) { std::string result = s; InternalKeyComparator(BytewiseComparator()).FindShortestSeparator(&result, l); return result; } static std::string ShortSuccessor(const std::string& s) { std::string result = s; InternalKeyComparator(BytewiseComparator()).FindShortSuccessor(&result); return result; } static void TestKey(const std::string& key, uint64_t seq, ValueType vt) { std::string encoded = IKey(key, seq, vt); Slice in(encoded); ParsedInternalKey decoded("", 0, kTypeValue); ASSERT_TRUE(ParseInternalKey(in, &decoded)); ASSERT_EQ(key, decoded.user_key.ToString()); ASSERT_EQ(seq, decoded.sequence); ASSERT_EQ(vt, decoded.type); ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded)); } class FormatTest { }; TEST(FormatTest, InternalKey_EncodeDecode) { const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" }; const uint64_t seq[] = { 1, 2, 3, (1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1, (1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1, (1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1 }; for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) { for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) { TestKey(keys[k], seq[s], kTypeValue); TestKey("hello", 1, kTypeDeletion); } } } TEST(FormatTest, InternalKeyShortSeparator) { // When user keys are same ASSERT_EQ(IKey("foo", 100, kTypeValue), Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 99, kTypeValue))); ASSERT_EQ(IKey("foo", 100, kTypeValue), Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 101, kTypeValue))); ASSERT_EQ(IKey("foo", 100, kTypeValue), Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeValue))); ASSERT_EQ(IKey("foo", 100, kTypeValue), Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeDeletion))); // When user keys are misordered ASSERT_EQ(IKey("foo", 100, kTypeValue), Shorten(IKey("foo", 100, kTypeValue), IKey("bar", 99, kTypeValue))); // When user keys are different, but correctly ordered ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek), Shorten(IKey("foo", 100, kTypeValue), IKey("hello", 200, kTypeValue))); // When start user key is prefix of limit user key ASSERT_EQ(IKey("foo", 100, kTypeValue), Shorten(IKey("foo", 100, kTypeValue), IKey("foobar", 200, kTypeValue))); // When limit user key is prefix of start user key ASSERT_EQ(IKey("foobar", 100, kTypeValue), Shorten(IKey("foobar", 100, kTypeValue), IKey("foo", 200, kTypeValue))); } TEST(FormatTest, InternalKeyShortestSuccessor) { ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek), ShortSuccessor(IKey("foo", 100, kTypeValue))); ASSERT_EQ(IKey("\xff\xff", 100, kTypeValue), ShortSuccessor(IKey("\xff\xff", 100, kTypeValue))); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
{ "pile_set_name": "Github" }
.. Astrometry.net documentation master file, created by sphinx-quickstart on Fri May 18 13:56:06 2012. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to Astrometry.net's documentation! ========================================== Contents: .. toctree:: :maxdepth: 1 readme changelog build build-index net/api libkd api nova oaq code .. toctree:: :hidden: net/orientation net/api net/models backups api api-util-py pykd ttime Other places: * `This document on the web <http://astrometry.net/doc>`_ * `home page <http://astrometry.net>`_ * `web service <http://nova.astrometry.net>`_ Internal docs: * :ref:`nova_orient` * :ref:`backups` Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`
{ "pile_set_name": "Github" }
/* Copyright ยฉ 2017 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: BSD-2-Clause Generated by: https://github.com/swagger-api/swagger-codegen.git */ package nsxt import ( "context" "encoding/json" "fmt" "github.com/vmware/go-vmware-nsxt/manager" "github.com/vmware/go-vmware-nsxt/monitoring" "net/http" "net/url" "strings" ) // Linger please var ( _ context.Context ) type TroubleshootingAndMonitoringApiService service /* TroubleshootingAndMonitoringApiService Create a mirror session Create a mirror session * @param ctx context.Context Authentication Context @param portMirroringSession @return manager.PortMirroringSession*/ func (a *TroubleshootingAndMonitoringApiService) CreatePortMirroringSessions(ctx context.Context, portMirroringSession manager.PortMirroringSession) (manager.PortMirroringSession, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Post") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload manager.PortMirroringSession ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/mirror-sessions" localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } // body params localVarPostBody = &portMirroringSession r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Initiate a monitoring.Traceflow Operation on the Specified Port Initiate a monitoring.Traceflow Operation on the Specified Port * @param ctx context.Context Authentication Context @param traceflowRequest @return monitoringTraceflow*/ func (a *TroubleshootingAndMonitoringApiService) CreateTraceflow(ctx context.Context, traceflowRequest monitoring.TraceflowRequest) (monitoring.Traceflow, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Post") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload monitoring.Traceflow ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/traceflows" localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } // body params localVarPostBody = &traceflowRequest r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Delete the mirror session Delete the mirror session * @param ctx context.Context Authentication Context @param mirrorSessionId @return */ func (a *TroubleshootingAndMonitoringApiService) DeletePortMirroringSession(ctx context.Context, mirrorSessionId string) (*http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Delete") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/mirror-sessions/{mirror-session-id}" localVarPath = strings.Replace(localVarPath, "{"+"mirror-session-id"+"}", fmt.Sprintf("%v", mirrorSessionId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return localVarHttpResponse, reportError(localVarHttpResponse.Status) } return localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Delete the monitoring.Traceflow round Delete the monitoring.Traceflow round * @param ctx context.Context Authentication Context @param traceflowId @return */ func (a *TroubleshootingAndMonitoringApiService) DeleteTraceflow(ctx context.Context, traceflowId string) (*http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Delete") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/traceflows/{traceflow-id}" localVarPath = strings.Replace(localVarPath, "{"+"traceflow-id"+"}", fmt.Sprintf("%v", traceflowId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return localVarHttpResponse, reportError(localVarHttpResponse.Status) } return localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Get networking entities between two logical ports with VIF attachment Get networking entities between two logical ports with VIF attachment * @param ctx context.Context Authentication Context @param lportId ID of source port @param peerPortId ID of peer port @return monitoring.PortConnectionEntities*/ func (a *TroubleshootingAndMonitoringApiService) GetForwardingPath(ctx context.Context, lportId string, peerPortId string) (manager.PortConnectionEntities, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload manager.PortConnectionEntities ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/logical-ports/{lport-id}/forwarding-path" localVarPath = strings.Replace(localVarPath, "{"+"lport-id"+"}", fmt.Sprintf("%v", lportId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} localVarQueryParams.Add("peer_port_id", parameterToString(peerPortId, "")) // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Get high-level summary of a transport zone Get high-level summary of a transport zone * @param ctx context.Context Authentication Context @param zoneId ID of transport zone @param optional (nil or map[string]interface{}) with one or more of: @param "source" (string) Data source type. @return monitoring.HeatMapTransportZoneStatus*/ func (a *TroubleshootingAndMonitoringApiService) GetHeatmapTransportZoneStatus(ctx context.Context, zoneId string, localVarOptionals map[string]interface{}) (monitoring.HeatMapTransportZoneStatus, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload monitoring.HeatMapTransportZoneStatus ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/transport-zones/{zone-id}/status" localVarPath = strings.Replace(localVarPath, "{"+"zone-id"+"}", fmt.Sprintf("%v", zoneId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} if err := typeCheckParameter(localVarOptionals["source"], "string", "source"); err != nil { return successPayload, nil, err } if localVarTempParam, localVarOk := localVarOptionals["source"].(string); localVarOk { localVarQueryParams.Add("source", parameterToString(localVarTempParam, "")) } // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Get the list of IPFIX observation points Get the list of IPFIX observation points * @param ctx context.Context Authentication Context @return monitoring.IpfixObsPointsListResult*/ func (a *TroubleshootingAndMonitoringApiService) GetIpfixObsPoints(ctx context.Context) (monitoring.IpfixObsPointsListResult, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload monitoring.IpfixObsPointsListResult ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/ipfix-obs-points" localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Get the mirror session Get the mirror session * @param ctx context.Context Authentication Context @param mirrorSessionId @return manager.PortMirroringSession*/ func (a *TroubleshootingAndMonitoringApiService) GetPortMirroringSession(ctx context.Context, mirrorSessionId string) (manager.PortMirroringSession, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload manager.PortMirroringSession ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/mirror-sessions/{mirror-session-id}" localVarPath = strings.Replace(localVarPath, "{"+"mirror-session-id"+"}", fmt.Sprintf("%v", mirrorSessionId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Read global switch IPFIX export configuration Read global switch IPFIX export configuration * @param ctx context.Context Authentication Context @return monitoring.IpfixObsPointConfig*/ func (a *TroubleshootingAndMonitoringApiService) GetSwitchIpfixConfig(ctx context.Context) (monitoring.IpfixObsPointConfig, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload monitoring.IpfixObsPointConfig ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/ipfix-obs-points/switch-global" localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Get the monitoring.Traceflow round status and result summary Get the monitoring.Traceflow round status and result summary * @param ctx context.Context Authentication Context @param traceflowId @return monitoring.Traceflow*/ func (a *TroubleshootingAndMonitoringApiService) GetTraceflow(ctx context.Context, traceflowId string) (monitoring.Traceflow, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload monitoring.Traceflow ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/traceflows/{traceflow-id}" localVarPath = strings.Replace(localVarPath, "{"+"traceflow-id"+"}", fmt.Sprintf("%v", traceflowId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Get observations for the monitoring.Traceflow round Get observations for the monitoring.Traceflow round * @param ctx context.Context Authentication Context @param traceflowId @param optional (nil or map[string]interface{}) with one or more of: @param "componentName" (string) Observations having the given component name will be listed. @param "componentType" (string) Observations having the given component type will be listed. @param "cursor" (string) Opaque cursor to be used for getting next page of records (supplied by current result page) @param "includedFields" (string) Comma separated list of fields that should be included to result of query @param "pageSize" (int64) Maximum number of results to return in this page (server may return fewer) @param "resourceType" (string) The type of observations that will be listed. @param "sortAscending" (bool) @param "sortBy" (string) Field by which records are sorted @param "transportNodeName" (string) Observations having the given transport node name will be listed. @return monitoring.TraceflowObservationListResult*/ func (a *TroubleshootingAndMonitoringApiService) GetTraceflowObservations(ctx context.Context, traceflowId string, localVarOptionals map[string]interface{}) (monitoring.TraceflowObservationListResult, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload monitoring.TraceflowObservationListResult ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/traceflows/{traceflow-id}/observations" localVarPath = strings.Replace(localVarPath, "{"+"traceflow-id"+"}", fmt.Sprintf("%v", traceflowId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} if err := typeCheckParameter(localVarOptionals["componentName"], "string", "componentName"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["componentType"], "string", "componentType"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["cursor"], "string", "cursor"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["includedFields"], "string", "includedFields"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["pageSize"], "int64", "pageSize"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["resourceType"], "string", "resourceType"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["sortAscending"], "bool", "sortAscending"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["sortBy"], "string", "sortBy"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["transportNodeName"], "string", "transportNodeName"); err != nil { return successPayload, nil, err } if localVarTempParam, localVarOk := localVarOptionals["componentName"].(string); localVarOk { localVarQueryParams.Add("component_name", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["componentType"].(string); localVarOk { localVarQueryParams.Add("component_type", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["cursor"].(string); localVarOk { localVarQueryParams.Add("cursor", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["includedFields"].(string); localVarOk { localVarQueryParams.Add("included_fields", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["pageSize"].(int64); localVarOk { localVarQueryParams.Add("page_size", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["resourceType"].(string); localVarOk { localVarQueryParams.Add("resource_type", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["sortAscending"].(bool); localVarOk { localVarQueryParams.Add("sort_ascending", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["sortBy"].(string); localVarOk { localVarQueryParams.Add("sort_by", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["transportNodeName"].(string); localVarOk { localVarQueryParams.Add("transport_node_name", parameterToString(localVarTempParam, "")) } // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Creates a status report of transport nodes of all the transport zones Creates a status report of transport nodes of all the transport zones * @param ctx context.Context Authentication Context @param optional (nil or map[string]interface{}) with one or more of: @param "source" (string) Data source type. @param "status" (string) Transport node @return */ func (a *TroubleshootingAndMonitoringApiService) GetTransportNodeReport(ctx context.Context, localVarOptionals map[string]interface{}) (*http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/transport-zones/transport-node-status-report" localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} if err := typeCheckParameter(localVarOptionals["source"], "string", "source"); err != nil { return nil, err } if err := typeCheckParameter(localVarOptionals["status"], "string", "status"); err != nil { return nil, err } if localVarTempParam, localVarOk := localVarOptionals["source"].(string); localVarOk { localVarQueryParams.Add("source", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["status"].(string); localVarOk { localVarQueryParams.Add("status", parameterToString(localVarTempParam, "")) } // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/octet-stream", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return localVarHttpResponse, reportError(localVarHttpResponse.Status) } return localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Creates a status report of transport nodes in a transport zone Creates a status report of transport nodes in a transport zone * @param ctx context.Context Authentication Context @param zoneId ID of transport zone @param optional (nil or map[string]interface{}) with one or more of: @param "source" (string) Data source type. @param "status" (string) Transport node @return */ func (a *TroubleshootingAndMonitoringApiService) GetTransportNodeReportForATransportZone(ctx context.Context, zoneId string, localVarOptionals map[string]interface{}) (*http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/transport-zones/{zone-id}/transport-node-status-report" localVarPath = strings.Replace(localVarPath, "{"+"zone-id"+"}", fmt.Sprintf("%v", zoneId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} if err := typeCheckParameter(localVarOptionals["source"], "string", "source"); err != nil { return nil, err } if err := typeCheckParameter(localVarOptionals["status"], "string", "status"); err != nil { return nil, err } if localVarTempParam, localVarOk := localVarOptionals["source"].(string); localVarOk { localVarQueryParams.Add("source", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["status"].(string); localVarOk { localVarQueryParams.Add("status", parameterToString(localVarTempParam, "")) } // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/octet-stream", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return localVarHttpResponse, reportError(localVarHttpResponse.Status) } return localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Read status of a transport node Read status of a transport node * @param ctx context.Context Authentication Context @param nodeId ID of transport node @param optional (nil or map[string]interface{}) with one or more of: @param "source" (string) Data source type. @return manager.TransportNodeStatus*/ func (a *TroubleshootingAndMonitoringApiService) GetTransportNodeStatus(ctx context.Context, nodeId string, localVarOptionals map[string]interface{}) (manager.TransportNodeStatus, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload manager.TransportNodeStatus ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/transport-nodes/{node-id}/status" localVarPath = strings.Replace(localVarPath, "{"+"node-id"+"}", fmt.Sprintf("%v", nodeId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} if err := typeCheckParameter(localVarOptionals["source"], "string", "source"); err != nil { return successPayload, nil, err } if localVarTempParam, localVarOk := localVarOptionals["source"].(string); localVarOk { localVarQueryParams.Add("source", parameterToString(localVarTempParam, "")) } // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService List available node logs Returns the number of log files and lists the log files that reside on the NSX virtual appliance. The list includes the filename, file size, and last-modified time in milliseconds since epoch (1 January 1970) for each log file. Knowing the last-modified time with millisecond accuracy since epoch is helpful when you are comparing two times, such as the time of a POST request and the end time on a server. * @param ctx context.Context Authentication Context @return monitoring.NodeLogPropertiesListResult*/ func (a *TroubleshootingAndMonitoringApiService) ListNodeLogs(ctx context.Context) (monitoring.NodeLogPropertiesListResult, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload monitoring.NodeLogPropertiesListResult ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/node/logs" localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService List all mirror sessions List all mirror sessions * @param ctx context.Context Authentication Context @param optional (nil or map[string]interface{}) with one or more of: @param "cursor" (string) Opaque cursor to be used for getting next page of records (supplied by current result page) @param "includedFields" (string) Comma separated list of fields that should be included to result of query @param "pageSize" (int64) Maximum number of results to return in this page (server may return fewer) @param "sortAscending" (bool) @param "sortBy" (string) Field by which records are sorted @return manager.PortMirroringSessionListResult*/ func (a *TroubleshootingAndMonitoringApiService) ListPortMirroringSession(ctx context.Context, localVarOptionals map[string]interface{}) (manager.PortMirroringSessionListResult, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload manager.PortMirroringSessionListResult ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/mirror-sessions" localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} if err := typeCheckParameter(localVarOptionals["cursor"], "string", "cursor"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["includedFields"], "string", "includedFields"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["pageSize"], "int64", "pageSize"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["sortAscending"], "bool", "sortAscending"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["sortBy"], "string", "sortBy"); err != nil { return successPayload, nil, err } if localVarTempParam, localVarOk := localVarOptionals["cursor"].(string); localVarOk { localVarQueryParams.Add("cursor", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["includedFields"].(string); localVarOk { localVarQueryParams.Add("included_fields", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["pageSize"].(int64); localVarOk { localVarQueryParams.Add("page_size", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["sortAscending"].(bool); localVarOk { localVarQueryParams.Add("sort_ascending", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["sortBy"].(string); localVarOk { localVarQueryParams.Add("sort_by", parameterToString(localVarTempParam, "")) } // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Read status of all transport nodes with tunnel connections to transport node Read status of all transport nodes with tunnel connections to transport node * @param ctx context.Context Authentication Context @param nodeId ID of transport node @param optional (nil or map[string]interface{}) with one or more of: @param "bfdDiagnosticCode" (string) BFD diagnostic code of Tunnel as defined in RFC 5880 @param "cursor" (string) Opaque cursor to be used for getting next page of records (supplied by current result page) @param "includedFields" (string) Comma separated list of fields that should be included to result of query @param "pageSize" (int64) Maximum number of results to return in this page (server may return fewer) @param "sortAscending" (bool) @param "sortBy" (string) Field by which records are sorted @param "source" (string) Data source type. @param "tunnelStatus" (string) Tunnel Status @return manager.TransportNodeStatusListResult*/ func (a *TroubleshootingAndMonitoringApiService) ListRemoteTransportNodeStatus(ctx context.Context, nodeId string, localVarOptionals map[string]interface{}) (manager.TransportNodeStatusListResult, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload manager.TransportNodeStatusListResult ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/transport-nodes/{node-id}/remote-transport-node-status" localVarPath = strings.Replace(localVarPath, "{"+"node-id"+"}", fmt.Sprintf("%v", nodeId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} if err := typeCheckParameter(localVarOptionals["bfdDiagnosticCode"], "string", "bfdDiagnosticCode"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["cursor"], "string", "cursor"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["includedFields"], "string", "includedFields"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["pageSize"], "int64", "pageSize"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["sortAscending"], "bool", "sortAscending"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["sortBy"], "string", "sortBy"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["source"], "string", "source"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["tunnelStatus"], "string", "tunnelStatus"); err != nil { return successPayload, nil, err } if localVarTempParam, localVarOk := localVarOptionals["bfdDiagnosticCode"].(string); localVarOk { localVarQueryParams.Add("bfd_diagnostic_code", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["cursor"].(string); localVarOk { localVarQueryParams.Add("cursor", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["includedFields"].(string); localVarOk { localVarQueryParams.Add("included_fields", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["pageSize"].(int64); localVarOk { localVarQueryParams.Add("page_size", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["sortAscending"].(bool); localVarOk { localVarQueryParams.Add("sort_ascending", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["sortBy"].(string); localVarOk { localVarQueryParams.Add("sort_by", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["source"].(string); localVarOk { localVarQueryParams.Add("source", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["tunnelStatus"].(string); localVarOk { localVarQueryParams.Add("tunnel_status", parameterToString(localVarTempParam, "")) } // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService List all monitoring.Traceflow rounds List all monitoring.Traceflow rounds; if a logical port id is given as a query parameter, only those originated from the logical port are returned. * @param ctx context.Context Authentication Context @param optional (nil or map[string]interface{}) with one or more of: @param "cursor" (string) Opaque cursor to be used for getting next page of records (supplied by current result page) @param "includedFields" (string) Comma separated list of fields that should be included to result of query @param "lportId" (string) id of the source logical port where the trace flows originated @param "pageSize" (int64) Maximum number of results to return in this page (server may return fewer) @param "sortAscending" (bool) @param "sortBy" (string) Field by which records are sorted @return monitoring.TraceflowListResult*/ func (a *TroubleshootingAndMonitoringApiService) ListTraceflows(ctx context.Context, localVarOptionals map[string]interface{}) (monitoring.TraceflowListResult, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload monitoring.TraceflowListResult ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/traceflows" localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} if err := typeCheckParameter(localVarOptionals["cursor"], "string", "cursor"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["includedFields"], "string", "includedFields"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["lportId"], "string", "lportId"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["pageSize"], "int64", "pageSize"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["sortAscending"], "bool", "sortAscending"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["sortBy"], "string", "sortBy"); err != nil { return successPayload, nil, err } if localVarTempParam, localVarOk := localVarOptionals["cursor"].(string); localVarOk { localVarQueryParams.Add("cursor", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["includedFields"].(string); localVarOk { localVarQueryParams.Add("included_fields", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["lportId"].(string); localVarOk { localVarQueryParams.Add("lport_id", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["pageSize"].(int64); localVarOk { localVarQueryParams.Add("page_size", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["sortAscending"].(bool); localVarOk { localVarQueryParams.Add("sort_ascending", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["sortBy"].(string); localVarOk { localVarQueryParams.Add("sort_by", parameterToString(localVarTempParam, "")) } // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Read status of all the transport nodes Read status of all the transport nodes * @param ctx context.Context Authentication Context @param optional (nil or map[string]interface{}) with one or more of: @param "cursor" (string) Opaque cursor to be used for getting next page of records (supplied by current result page) @param "includedFields" (string) Comma separated list of fields that should be included to result of query @param "pageSize" (int64) Maximum number of results to return in this page (server may return fewer) @param "sortAscending" (bool) @param "sortBy" (string) Field by which records are sorted @param "source" (string) Data source type. @param "status" (string) Transport node @return manager.TransportNodeStatusListResult*/ func (a *TroubleshootingAndMonitoringApiService) ListTransportNodeStatus(ctx context.Context, localVarOptionals map[string]interface{}) (manager.TransportNodeStatusListResult, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload manager.TransportNodeStatusListResult ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/transport-zones/transport-node-status" localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} if err := typeCheckParameter(localVarOptionals["cursor"], "string", "cursor"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["includedFields"], "string", "includedFields"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["pageSize"], "int64", "pageSize"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["sortAscending"], "bool", "sortAscending"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["sortBy"], "string", "sortBy"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["source"], "string", "source"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["status"], "string", "status"); err != nil { return successPayload, nil, err } if localVarTempParam, localVarOk := localVarOptionals["cursor"].(string); localVarOk { localVarQueryParams.Add("cursor", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["includedFields"].(string); localVarOk { localVarQueryParams.Add("included_fields", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["pageSize"].(int64); localVarOk { localVarQueryParams.Add("page_size", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["sortAscending"].(bool); localVarOk { localVarQueryParams.Add("sort_ascending", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["sortBy"].(string); localVarOk { localVarQueryParams.Add("sort_by", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["source"].(string); localVarOk { localVarQueryParams.Add("source", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["status"].(string); localVarOk { localVarQueryParams.Add("status", parameterToString(localVarTempParam, "")) } // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Read status of transport nodes in a transport zone Read status of transport nodes in a transport zone * @param ctx context.Context Authentication Context @param zoneId ID of transport zone @param optional (nil or map[string]interface{}) with one or more of: @param "cursor" (string) Opaque cursor to be used for getting next page of records (supplied by current result page) @param "includedFields" (string) Comma separated list of fields that should be included to result of query @param "pageSize" (int64) Maximum number of results to return in this page (server may return fewer) @param "sortAscending" (bool) @param "sortBy" (string) Field by which records are sorted @param "source" (string) Data source type. @param "status" (string) Transport node @return manager.TransportNodeStatusListResult*/ func (a *TroubleshootingAndMonitoringApiService) ListTransportNodeStatusForTransportZone(ctx context.Context, zoneId string, localVarOptionals map[string]interface{}) (manager.TransportNodeStatusListResult, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload manager.TransportNodeStatusListResult ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/transport-zones/{zone-id}/transport-node-status" localVarPath = strings.Replace(localVarPath, "{"+"zone-id"+"}", fmt.Sprintf("%v", zoneId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} if err := typeCheckParameter(localVarOptionals["cursor"], "string", "cursor"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["includedFields"], "string", "includedFields"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["pageSize"], "int64", "pageSize"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["sortAscending"], "bool", "sortAscending"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["sortBy"], "string", "sortBy"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["source"], "string", "source"); err != nil { return successPayload, nil, err } if err := typeCheckParameter(localVarOptionals["status"], "string", "status"); err != nil { return successPayload, nil, err } if localVarTempParam, localVarOk := localVarOptionals["cursor"].(string); localVarOk { localVarQueryParams.Add("cursor", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["includedFields"].(string); localVarOk { localVarQueryParams.Add("included_fields", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["pageSize"].(int64); localVarOk { localVarQueryParams.Add("page_size", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["sortAscending"].(bool); localVarOk { localVarQueryParams.Add("sort_ascending", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["sortBy"].(string); localVarOk { localVarQueryParams.Add("sort_by", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["source"].(string); localVarOk { localVarQueryParams.Add("source", parameterToString(localVarTempParam, "")) } if localVarTempParam, localVarOk := localVarOptionals["status"].(string); localVarOk { localVarQueryParams.Add("status", parameterToString(localVarTempParam, "")) } // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Read node log properties For a single specified log file, lists the filename, file size, and last-modified time. * @param ctx context.Context Authentication Context @param logName Name of log file to read properties @return monitoring.NodeLogProperties*/ func (a *TroubleshootingAndMonitoringApiService) ReadNodeLog(ctx context.Context, logName string) (monitoring.NodeLogProperties, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload monitoring.NodeLogProperties ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/node/logs/{log-name}" localVarPath = strings.Replace(localVarPath, "{"+"log-name"+"}", fmt.Sprintf("%v", logName), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", "application/octet-stream", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Read node log contents For a single specified log file, returns the content of the log file. This method supports byte-range requests. To request just a portion of a log file, supply an HTTP Range header, e.g. \&quot;Range: bytes&#x3D;&lt;start&gt;-&lt;end&gt;\&quot;. &lt;end&gt; is optional, and, if omitted, the file contents from start to the end of the file are returned.&#39; * @param ctx context.Context Authentication Context @param logName Name of log to read @return */ func (a *TroubleshootingAndMonitoringApiService) ReadNodeLogData(ctx context.Context, logName string) (*http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Get") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/node/logs/{log-name}/data" localVarPath = strings.Replace(localVarPath, "{"+"log-name"+"}", fmt.Sprintf("%v", logName), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/octet-stream", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return localVarHttpResponse, reportError(localVarHttpResponse.Status) } return localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Update the mirror session Update the mirror session * @param ctx context.Context Authentication Context @param mirrorSessionId @param portMirroringSession @return manager.PortMirroringSession*/ func (a *TroubleshootingAndMonitoringApiService) UpdatePortMirroringSession(ctx context.Context, mirrorSessionId string, portMirroringSession manager.PortMirroringSession) (manager.PortMirroringSession, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Put") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload manager.PortMirroringSession ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/mirror-sessions/{mirror-session-id}" localVarPath = strings.Replace(localVarPath, "{"+"mirror-session-id"+"}", fmt.Sprintf("%v", mirrorSessionId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } // body params localVarPostBody = &portMirroringSession r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Update global switch IPFIX export configuration Update global switch IPFIX export configuration * @param ctx context.Context Authentication Context @param ipfixObsPointConfig @return monitoring.IpfixObsPointConfig*/ func (a *TroubleshootingAndMonitoringApiService) UpdateSwitchIpfixConfig(ctx context.Context, ipfixObsPointConfig monitoring.IpfixObsPointConfig) (monitoring.IpfixObsPointConfig, *http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Put") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte successPayload monitoring.IpfixObsPointConfig ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/ipfix-obs-points/switch-global" localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } // body params localVarPostBody = &ipfixObsPointConfig r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return successPayload, nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return successPayload, localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status) } if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil { return successPayload, localVarHttpResponse, err } return successPayload, localVarHttpResponse, err } /* TroubleshootingAndMonitoringApiService Verify whether the mirror session is still valid Verify whether all participants are on the same transport node * @param ctx context.Context Authentication Context @param mirrorSessionId @return */ func (a *TroubleshootingAndMonitoringApiService) VerifyPortMirroringSessionVerify(ctx context.Context, mirrorSessionId string) (*http.Response, error) { var ( localVarHttpMethod = strings.ToUpper("Post") localVarPostBody interface{} localVarFileName string localVarFileBytes []byte ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/mirror-sessions/{mirror-session-id}?action=verify" localVarPath = strings.Replace(localVarPath, "{"+"mirror-session-id"+"}", fmt.Sprintf("%v", mirrorSessionId), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} // to determine the Content-Type header localVarHttpContentTypes := []string{"application/json"} // set Content-Type header localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes) if localVarHttpContentType != "" { localVarHeaderParams["Content-Type"] = localVarHttpContentType } // to determine the Accept header localVarHttpHeaderAccepts := []string{ "application/json", } // set Accept header localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts) if localVarHttpHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHttpHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) if err != nil { return nil, err } localVarHttpResponse, err := a.client.callAPI(r) if err != nil || localVarHttpResponse == nil { return localVarHttpResponse, err } defer localVarHttpResponse.Body.Close() if localVarHttpResponse.StatusCode >= 300 { return localVarHttpResponse, reportError(localVarHttpResponse.Status) } return localVarHttpResponse, err }
{ "pile_set_name": "Github" }
<configuration> <project> <executable>knutalg.exe</executable> <arguments></arguments> <output></output> <options></options> <namespace>knutalg</namespace> <template>console</template> <debuginfo>-1</debuginfo> </project> <files> <module> <include>main.l</include> </module> </files> </configuration>
{ "pile_set_name": "Github" }
๏ปฟ<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" /> <PropertyGroup> <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration> <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform> <ProductVersion> </ProductVersion> <SchemaVersion>2.0</SchemaVersion> <ProjectGuid>{6EA934DD-D6E6-4407-A91F-DB0521CC7803}</ProjectGuid> <ProjectTypeGuids>{349c5851-65df-11da-9384-00065b846f21};{fae04ec0-301f-11d3-bf4b-00c04f79efbc}</ProjectTypeGuids> <OutputType>Library</OutputType> <AppDesignerFolder>Properties</AppDesignerFolder> <RootNamespace>WebPages</RootNamespace> <AssemblyName>WebPages</AssemblyName> <TargetFrameworkVersion>v4.5</TargetFrameworkVersion> <UseIISExpress>true</UseIISExpress> <IISExpressSSLPort /> <IISExpressAnonymousAuthentication /> <IISExpressWindowsAuthentication /> <IISExpressUseClassicPipelineMode /> </PropertyGroup> <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' "> <DebugSymbols>true</DebugSymbols> <DebugType>full</DebugType> <Optimize>false</Optimize> <OutputPath>bin\</OutputPath> <DefineConstants>DEBUG;TRACE</DefineConstants> <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' "> <DebugType>pdbonly</DebugType> <Optimize>true</Optimize> <OutputPath>bin\</OutputPath> <DefineConstants>TRACE</DefineConstants> <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> <ItemGroup> <Reference Include="Microsoft.CSharp" /> <Reference Include="System.Web.DynamicData" /> <Reference Include="System.Web.Entity" /> <Reference Include="System.Web.ApplicationServices" /> <Reference Include="System.ComponentModel.DataAnnotations" /> <Reference Include="System" /> <Reference Include="System.Data" /> <Reference Include="System.Core" /> <Reference Include="System.Data.DataSetExtensions" /> <Reference Include="System.Web.Extensions" /> <Reference Include="System.Xml.Linq" /> <Reference Include="System.Drawing" /> <Reference Include="System.Web" /> <Reference Include="System.Xml" /> <Reference Include="System.Configuration" /> <Reference Include="System.Web.Services" /> <Reference Include="System.EnterpriseServices" /> </ItemGroup> <ItemGroup> <Content Include="src\config.xml" /> <Content Include="src\console.css" /> <Content Include="src\console.html" /> <Content Include="src\console.js" /> <Content Include="src\d-pad.html" /> <Content Include="src\marker-icon.png" /> <Content Include="src\flight-control.html" /> <Content Include="src\information.css" /> <Content Include="src\information.html" /> <Content Include="src\jKSPWAPICore.css" /> <Content Include="src\jKSPWAPICore.js" /> <Content Include="src\jquery-1.9.1.min.js" /> <Content Include="src\layers.png" /> <Content Include="src\leafletksp.css" /> <Content Include="src\leafletksp.js" /> <Content Include="src\map.html" /> <Content Include="src\marker-icon%402x.png" /> <Content Include="src\marker-shadow.png" /> <Content Include="src\markers-anomaly.png" /> <Content Include="src\markers-shadow.png" /> <Content Include="src\markers-spacecenter.png" /> <Content Include="src\smart-ass.html" /> <Content Include="src\speech.html" /> <Content Include="src\touchball-pyr.html" /> <Content Include="src\zoom-in.png" /> <Content Include="src\zoom-out.png" /> <Content Include="Web.config" /> </ItemGroup> <ItemGroup> <Compile Include="Properties\AssemblyInfo.cs" /> </ItemGroup> <ItemGroup> <Content Include="src\console.coffee" /> <None Include="Web.Debug.config"> <DependentUpon>Web.config</DependentUpon> </None> <None Include="Web.Release.config"> <DependentUpon>Web.config</DependentUpon> </None> </ItemGroup> <PropertyGroup> <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">10.0</VisualStudioVersion> <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath> </PropertyGroup> <Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets" /> <Import Project="$(VSToolsPath)\WebApplications\Microsoft.WebApplication.targets" Condition="'$(VSToolsPath)' != ''" /> <Import Project="$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v10.0\WebApplications\Microsoft.WebApplication.targets" Condition="false" /> <ProjectExtensions> <VisualStudio> <FlavorProperties GUID="{349c5851-65df-11da-9384-00065b846f21}"> <WebProjectProperties> <UseIIS>True</UseIIS> <AutoAssignPort>True</AutoAssignPort> <DevelopmentServerPort>49366</DevelopmentServerPort> <DevelopmentServerVPath>/</DevelopmentServerVPath> <IISUrl>http://localhost:50404/</IISUrl> <NTLMAuthentication>False</NTLMAuthentication> <UseCustomServer>False</UseCustomServer> <CustomServerUrl> </CustomServerUrl> <SaveServerSettingsInUserFile>False</SaveServerSettingsInUserFile> </WebProjectProperties> </FlavorProperties> </VisualStudio> </ProjectExtensions> <PropertyGroup> <PostBuildEvent>xcopy "$(ProjectDir)\src\*" "$(ProjectDir)..\..\publish\GameData\Telemachus\Plugins\PluginData\Telemachus\" /e /y /i /r xcopy "$(ProjectDir)\src\*" "$(ProjectDir)..\..\ksp-telemachus-dev\GameData\Telemachus\Plugins\PluginData\Telemachus\" /e /y /i /r </PostBuildEvent> </PropertyGroup> <!-- To modify your build process, add your task inside one of the targets below and uncomment it. Other similar extension points exist, see Microsoft.Common.targets. <Target Name="BeforeBuild"> </Target> <Target Name="AfterBuild"> </Target> --> </Project>
{ "pile_set_name": "Github" }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=settings.k8s.io package v1alpha1 // import "k8s.io/api/settings/v1alpha1"
{ "pile_set_name": "Github" }
## int-hashtable.pkg # # A specialization of the hashtable generic to integer keys. # Compiled by: # src/lib/std/standard.lib # "If it is not beautiful, it is not done." stipulate package hr = hashtable_representation; # hashtable_representation is from src/lib/src/hashtable-rep.pkg package rwv = rw_vector; # rw_vector is from src/lib/std/src/rw-vector.pkg herein package int_hashtable : Typelocked_Hashtable # Typelocked_Hashtable is from src/lib/src/typelocked-hashtable.api where key::Hash_Key == Int = package { package key { Hash_Key = Int; fun same_key (a: Int, b) = a == b; fun hash_value a = unt::from_int a; }; include package key; Hashtable X = HASHTABLE { not_found_exception: Exception, table: Ref( hr::Table( Hash_Key, X ) ), n_items: Ref( Int ) }; fun index (i, size) = unt::to_int_x( unt::bitwise_and( i, unt::from_int size - 0u1)); # Create a new table; # The int is a size hint and the exception # is to be raised by find. # fun make_hashtable { size_hint, not_found_exception } = HASHTABLE { not_found_exception, table => REF (hr::allot size_hint), n_items => REF 0 }; # Remove all elements from the table: # fun clear (HASHTABLE { table, n_items, ... } ) = { hr::clear *table; n_items := 0; }; # Insert an item. # # If the key already has an # item associated with it, # then the old item is discarded. # fun set (my_table as HASHTABLE { table, n_items, ... } ) (key, item) = { vector = *table; size = rwv::length vector; hash = hash_value key; index = index (hash, size); fun get hr::NIL => { rwv::set (vector, index, hr::BUCKET (hash, key, item, rwv::get (vector, index))); n_items := *n_items + 1; hr::grow_table_if_needed (table, *n_items); hr::NIL; }; get (hr::BUCKET (h, k, v, r)) => if (hash == h and same_key (key, k)) hr::BUCKET (hash, key, item, r); else case (get r) hr::NIL => hr::NIL; rest => hr::BUCKET (h, k, v, rest); esac; fi; end; case (get (rwv::get (vector, index))) hr::NIL => (); b => rwv::set (vector, index, b); esac; }; # Return TRUE, if the key is in the domain of the table: # fun contains_key (HASHTABLE { table, ... } ) key = get' (rwv::get (vector, index)) where vector = *table; hash = hash_value key; index = index (hash, rwv::length vector); fun get' hr::NIL => FALSE; get' (hr::BUCKET (h, k, v, r)) => ((hash == h) and same_key (key, k)) or get' r; end; end; # Find an item, the table's exception # is raised if the item doesn't exist: # fun get (HASHTABLE { table, not_found_exception, ... } ) key = get' (rwv::get (vector, index)) where vector = *table; hash = hash_value key; index = index (hash, rwv::length vector); fun get' hr::NIL => raise exception not_found_exception; get' (hr::BUCKET (h, k, v, r)) => if (hash == h and same_key (key, k)) v; else get' r; fi; end; end; # Look up for an item, # return NULL if the item doesn't exist: # fun find (HASHTABLE { table, ... } ) key = get' (rwv::get (vector, index)) where vector = *table; size = rwv::length vector; hash = hash_value key; index = index (hash, size); fun get' hr::NIL => NULL; get' (hr::BUCKET (h, k, v, r)) => if (hash == h and same_key (key, k)) THE v; else get' r; fi; end; end; stipulate # Remove an item. The table's exception is raised if # the item doesn't exist. # fun get_and_drop' (HASHTABLE { not_found_exception, table, n_items }, key) = { vector = *table; size = rwv::length vector; hash = hash_value key; index = index (hash, size); fun get' hr::NIL => raise exception not_found_exception; get' (hr::BUCKET (h, k, v, r)) => if (hash == h and same_key (key, k)) (v, r); else my (item, r') = get' r; (item, hr::BUCKET (h, k, v, r')); fi; end; (get' (rwv::get (vector, index))) -> (item, bucket); rwv::set (vector, index, bucket); n_items := *n_items - 1; item; }; herein fun get_and_drop (hashtable as HASHTABLE { not_found_exception, ... }) key = { THE (get_and_drop' (hashtable, key)) except not_found_exception = NULL; }; fun drop hashtable key = { (get_and_drop' (hashtable, key)); (); } except not_found_exception = (); end; # fun vals_count (HASHTABLE { n_items, ... } ) = *n_items; # Return the number of items in the table. # fun vals_list (HASHTABLE { table => REF vector, n_items, ... } ) # Return a list of the items in the table. = hr::vals_list (vector, n_items); fun keyvals_list (HASHTABLE { table => REF vector, n_items, ... } ) = hr::keyvals_list (vector, n_items); # Apply a function to the entries of the table: # fun keyed_apply f (HASHTABLE { table, ... } ) = hr::keyed_apply f *table; fun apply f (HASHTABLE { table, ... } ) = hr::apply f *table; # Map a table to a new table # that has the same keys and exception: # fun keyed_map f (HASHTABLE { table, n_items, not_found_exception } ) = HASHTABLE { table => REF (hr::keyed_map f *table), n_items => REF *n_items, not_found_exception }; fun map f (HASHTABLE { table, n_items, not_found_exception } ) = HASHTABLE { table => REF (hr::map f *table), n_items => REF *n_items, not_found_exception }; # Fold a function over the entries of the table: # fun foldi f init (HASHTABLE { table, ... } ) = hr::foldi f init *table; fun fold f init (HASHTABLE { table, ... } ) = hr::fold f init *table; # Modify the hashtable items in place: # fun keyed_map_in_place f (HASHTABLE { table, ... } ) = hr::keyed_map_in_place f *table; fun map_in_place f (HASHTABLE { table, ... } ) = hr::map_in_place f *table; # Remove any hashtable items that # do not satisfy the given predicate: # fun keyed_filter predicate (HASHTABLE { table, n_items, ... } ) = n_items := hr::keyed_filter predicate *table; fun filter predicate (HASHTABLE { table, n_items, ... } ) = n_items := hr::filter predicate *table; # Create a copy of a hashtable # fun copy (HASHTABLE { table, n_items, not_found_exception } ) = HASHTABLE { table => REF (hr::copy *table), n_items => REF *n_items, not_found_exception }; # Return a list of the sizes of the various buckets. This is to # allow users to gauge the quality of their hashing function. # fun bucket_sizes (HASHTABLE { table, ... } ) = hr::bucket_sizes *table; }; # package int_hashtable_g end; ########################################################################## # The following is support for outline-minor-mode in emacs. # # ^C @ ^T hides all Text. (Leaves all headings.) # # ^C @ ^A shows All of file. # # ^C @ ^Q Quickfolds entire file. (Leaves only top-level headings.) # # ^C @ ^I shows Immediate children of node. # # ^C @ ^S Shows all of a node. # # ^C @ ^D hiDes all of a node. # # ^HFoutline-mode gives more details. # # (Or do ^HI and read emacs:outline mode.) # # # # Local variables: # # mode: outline-minor # # outline-regexp: "[{ \t]*\\(fun \\)" # # End: # ########################################################################## ## AUTHOR: John Reppy ## Bell Labs ## Murray Hill, NJ 07974 ## jhr@research.bell-labs.com ## COPYRIGHT (c) 1999 Bell Labs, Lucent Technologies. ## Subsequent changes by Jeff Prothero Copyright (c) 2010-2015, ## released per terms of SMLNJ-COPYRIGHT.
{ "pile_set_name": "Github" }
; This test checks if debug loc is propagated to load/store created by GVN/Instcombine. ; RUN: opt < %s -gvn -S | FileCheck %s --check-prefixes=ALL ; RUN: opt < %s -gvn -instcombine -S | FileCheck %s --check-prefixes=ALL ; struct node { ; int *v; ; struct desc *descs; ; }; ; struct desc { ; struct node *node; ; }; ; extern int bar(void *v, void* n); ; int test(struct desc *desc) ; { ; void *v, *n; ; v = !desc ? ((void *)0) : desc->node->v; // Line 15 ; n = &desc->node->descs[0]; // Line 16 ; return bar(v, n); ; } ; Line 16, Column 13: ; n = &desc->node->descs[0]; ; ^ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64--linux-gnu" %struct.desc = type { %struct.node* } %struct.node = type { i32*, %struct.desc* } define i32 @test_no_null_opt(%struct.desc* readonly %desc) local_unnamed_addr #0 !dbg !4 { entry: %tobool = icmp eq %struct.desc* %desc, null br i1 %tobool, label %cond.end, label %cond.false, !dbg !9 ; ALL: br i1 %tobool, label %entry.cond.end_crit_edge, label %cond.false, !dbg [[LOC_15_6:![0-9]+]] ; ALL: entry.cond.end_crit_edge: ; ALL: load %struct.node*, %struct.node** null, align {{[0-9]+}}, !dbg [[LOC_16_13:![0-9]+]] cond.false: %0 = bitcast %struct.desc* %desc to i8***, !dbg !11 %1 = load i8**, i8*** %0, align 8, !dbg !11 %2 = load i8*, i8** %1, align 8 br label %cond.end, !dbg !9 cond.end: ; ALL: phi %struct.node* [ %3, %cond.false ], [ %.pre, %entry.cond.end_crit_edge ] ; ALL: phi i8* [ %2, %cond.false ], [ null, %entry.cond.end_crit_edge ] %3 = phi i8* [ %2, %cond.false ], [ null, %entry ], !dbg !9 %node2 = getelementptr inbounds %struct.desc, %struct.desc* %desc, i64 0, i32 0 %4 = load %struct.node*, %struct.node** %node2, align 8, !dbg !10 %descs = getelementptr inbounds %struct.node, %struct.node* %4, i64 0, i32 1 %5 = bitcast %struct.desc** %descs to i8** %6 = load i8*, i8** %5, align 8 %call = tail call i32 @bar(i8* %3, i8* %6) ret i32 %call } attributes #0 = { "null-pointer-is-valid"="true" } declare i32 @bar(i8*, i8*) local_unnamed_addr #1 !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!2, !3} !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, emissionKind: FullDebug) !1 = !DIFile(filename: "test.c", directory: ".") !2 = !{i32 2, !"Dwarf Version", i32 4} !3 = !{i32 2, !"Debug Info Version", i32 3} !4 = distinct !DISubprogram(name: "test_no_null_opt", scope: !1, file: !1, line: 12, type: !5, isLocal: false, isDefinition: true, scopeLine: 13, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !8) !5 = !DISubroutineType(types: !6) !6 = !{!7} !7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) !8 = !{} !9 = !DILocation(line: 15, column: 6, scope: !4) !10 = !DILocation(line: 16, column: 13, scope: !4) !11 = !DILocation(line: 15, column: 34, scope: !4) ;ALL: [[SCOPE:![0-9]+]] = distinct !DISubprogram(name: "test_no_null_opt",{{.*}} ;ALL: [[LOC_15_6]] = !DILocation(line: 15, column: 6, scope: [[SCOPE]]) ;ALL: [[LOC_16_13]] = !DILocation(line: 16, column: 13, scope: [[SCOPE]])
{ "pile_set_name": "Github" }
%% %% This is the AFNIC Whois server. %% %% complete date format : YYYY-MM-DDThh:mm:ssZ %% short date format : DD/MM %% version : FRNIC-2.5 %% %% Rights restricted by copyright. %% See https://www.afnic.fr/en/products-and-services/services/whois/whois-special-notice/ %% %% Use '-h' option to obtain more information about this service. %% %% [1.1.1.1 REQUEST] >> google.pm %% %% RL Net [##########] - RL IP [#####.....] %% domain: google.pm status: ACTIVE hold: NO holder-c: GIHU100-FRNIC admin-c: GIHU100-FRNIC tech-c: MC239-FRNIC zone-c: NFC1-FRNIC nsl-id: NSL17130-FRNIC registrar: MARKMONITOR Inc. Expiry Date: 2020-02-15T19:06:33Z created: 2011-12-06T09:12:15Z last-update: 2019-01-14T10:32:20Z source: FRNIC ns-list: NSL17130-FRNIC nserver: ns1.markmonitor.com nserver: ns3.markmonitor.com source: FRNIC registrar: MARKMONITOR Inc. type: Isp Option 1 address: 3540 East Longwing Lane address: address: ID 83646 MERIDIAN country: US phone: +1 208 389 5740 fax-no: +1 208 389 5771 e-mail: registry.admin@markmonitor.com website: http://www.markmonitor.com anonymous: NO registered: 2002-01-10T12:00:00Z source: FRNIC nic-hdl: GIHU100-FRNIC type: ORGANIZATION contact: Google Ireland Holdings Unlimited Company address: Google Ireland Holdings Unlimited Company address: 70 Sir John Rogerson's Quay address: 2 Dublin address: Dublin country: IE phone: +353.14361000 e-mail: dns-admin@google.com registrar: MARKMONITOR Inc. changed: 2018-03-02T18:03:31Z nic@nic.fr anonymous: NO obsoleted: NO eligstatus: not identified reachstatus: not identified source: FRNIC nic-hdl: GIHU100-FRNIC type: ORGANIZATION contact: Google Ireland Holdings Unlimited Company address: Google Ireland Holdings Unlimited Company address: 70 Sir John Rogerson's Quay address: 2 Dublin address: Dublin country: IE phone: +353.14361000 e-mail: dns-admin@google.com registrar: MARKMONITOR Inc. changed: 2018-03-02T18:03:31Z nic@nic.fr anonymous: NO obsoleted: NO eligstatus: not identified reachstatus: not identified source: FRNIC nic-hdl: MC239-FRNIC type: ROLE contact: MARKMONITOR CCOPS address: eMarkmonitor Inc. dba MarkMonitor address: PMB 155 address: 10400 Overland Road address: 83709-1433 Boise, Id address: US phone: +01 2083895740 e-mail: ccops@markmonitor.com admin-c: DL534-FRNIC tech-c: DL534-FRNIC registrar: MARKMONITOR Inc. changed: 2008-10-10T16:18:55Z ccops@markmonitor.com anonymous: NO obsoleted: NO eligstatus: not identified reachstatus: not identified source: FRNIC
{ "pile_set_name": "Github" }
<?php /* * Copyright 2014 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ class Google_Service_AdSenseHost_AdUnitMobileContentAdsSettings extends Google_Model { public $markupLanguage; public $scriptingLanguage; public $size; public $type; public function setMarkupLanguage($markupLanguage) { $this->markupLanguage = $markupLanguage; } public function getMarkupLanguage() { return $this->markupLanguage; } public function setScriptingLanguage($scriptingLanguage) { $this->scriptingLanguage = $scriptingLanguage; } public function getScriptingLanguage() { return $this->scriptingLanguage; } public function setSize($size) { $this->size = $size; } public function getSize() { return $this->size; } public function setType($type) { $this->type = $type; } public function getType() { return $this->type; } }
{ "pile_set_name": "Github" }
i.landsat.toar Calculates top-of-atmosphere radiance or reflectance and temperature for Landsat MSS/TM/ETM+/OLI Imagery (i.*) QgsProcessingParameterMultipleLayers|rasters|Landsat input rasters|3|None|False QgsProcessingParameterFile|metfile|Name of Landsat metadata file (.met or MTL.txt)|QgsProcessingParameterFile.File|None|None|True|Landsat metadata (*.met *.MET *.txt *.TXT) QgsProcessingParameterEnum|sensor|Spacecraft sensor|mss1;mss2;mss3;mss4;mss5;tm4;tm5;tm7;oli8|False|7|True QgsProcessingParameterEnum|method|Atmospheric correction method|uncorrected;dos1;dos2;dos2b;dos3;dos4|False|0|True QgsProcessingParameterString|date|Image acquisition date (yyyy-mm-dd)|None|False|True QgsProcessingParameterNumber|sun_elevation|Sun elevation in degrees|QgsProcessingParameterNumber.Double|None|True|0.0|360.0 QgsProcessingParameterString|product_date|Image creation date (yyyy-mm-dd)|None|False|True QgsProcessingParameterString|gain|Gain (H/L) of all Landsat ETM+ bands (1-5,61,62,7,8)|None|False|True QgsProcessingParameterNumber|percent|Percent of solar radiance in path radiance|QgsProcessingParameterNumber.Double|0.01|True|0.0|100.0 QgsProcessingParameterNumber|pixel|Minimum pixels to consider digital number as dark object|QgsProcessingParameterNumber.Integer|1000|True|0|None QgsProcessingParameterNumber|rayleigh|Rayleigh atmosphere (diffuse sky irradiance)|QgsProcessingParameterNumber.Double|0.0|True|0.0|None QgsProcessingParameterNumber|scale|Scale factor for output|QgsProcessingParameterNumber.Double|1.0|True|0.0|None *QgsProcessingParameterBoolean|-r|Output at-sensor radiance instead of reflectance for all bands|False *QgsProcessingParameterBoolean|-n|Input raster maps use as extension the number of the band instead the code|False QgsProcessingParameterFolderDestination|output|Output Directory
{ "pile_set_name": "Github" }
//===--- Sink.cpp ----- Code Sinking --------------------------------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// /// /// \file /// Many PIL instructions that don't have side effects at the PIL level are /// lowered to a sequence of LLVM instructions that does have side effects that /// LLVM can't sink. This pass sinks instructions close to their users. /// //===----------------------------------------------------------------------===// #define DEBUG_TYPE "sink-instructions" #include "polarphp/pil/lang/DebugUtils.h" #include "polarphp/pil/lang/Dominance.h" #include "polarphp/pil/lang/PILArgument.h" #include "polarphp/pil/lang/PILDebugScope.h" #include "polarphp/pil/lang/PILInstruction.h" #include "polarphp/pil/optimizer/analysis/DominanceAnalysis.h" #include "polarphp/pil/optimizer/analysis/LoopAnalysis.h" #include "polarphp/pil/optimizer/analysis/PostOrderAnalysis.h" #include "polarphp/pil/optimizer/passmgr/Transforms.h" #include "polarphp/pil/optimizer/utils/InstOptUtils.h" #include "llvm/ADT/Statistic.h" using namespace polar; STATISTIC(NumInstrSunk, "Number of instructions sunk"); namespace { class CodeSinkingPass : public PILFunctionTransform { public: CodeSinkingPass() = default; DominanceInfo *DT; PostOrderFunctionInfo *PO; PILLoopInfo *LoopInfo; /// returns True if were able to sink the instruction \p II /// closer to it's users. bool sinkInstruction(PILInstruction *II) { // We can't sink instructions that may read or write to memory // or side effects because it can change the semantics of the program. if (II->mayHaveSideEffects() || II->mayReadOrWriteMemory()) return false; // Some instructions don't have direct memory side effects but can't be sunk // because of other reasons. if (isa<MarkUninitializedInst>(II) || isa<MarkFunctionEscapeInst>(II) || isa<MarkDependenceInst>(II)) return false; // We don't sink stack allocations to not destroy the proper nesting of // stack allocations. if (II->isAllocatingStack() || II->isDeallocatingStack()) return false; PILBasicBlock *CurrentBlock = II->getParent(); PILBasicBlock *Dest = nullptr; unsigned InitialLoopDepth = LoopInfo->getLoopDepth(CurrentBlock); // TODO: We may want to delete debug instructions to allow us to sink more // instructions. for (auto result : II->getResults()) { for (auto *Operand : result->getUses()) { PILInstruction *User = Operand->getUser(); // Check if the instruction is already in the user's block. if (User->getParent() == CurrentBlock) return false; // Record the block of the first user and move on to // other users. if (!Dest) { Dest = User->getParent(); continue; } // Find a location that dominates all users. If we did not find such // a block or if it is the current block then bail out. Dest = DT->findNearestCommonDominator(Dest, User->getParent()); if (!Dest || Dest == CurrentBlock) return false; } } if (!Dest) return false; // We don't want to sink instructions into loops. Walk up the dom tree // until we reach the same loop nest level. while (LoopInfo->getLoopDepth(Dest) != InitialLoopDepth) { auto Node = DT->getNode(Dest); assert(Node && "Invalid dom tree"); auto IDom = Node->getIDom(); assert(IDom && "Can't find the idom"); Dest = IDom->getBlock(); if (!Dest || Dest == CurrentBlock) return false; } II->moveBefore(&*Dest->begin()); NumInstrSunk++; return true; } void run() override { bool Changed = false; auto *F = getFunction(); DT = PM->getAnalysis<DominanceAnalysis>()->get(F); PO = getAnalysis<PostOrderAnalysis>()->get(F); PILLoopAnalysis *LA = PM->getAnalysis<PILLoopAnalysis>(); LoopInfo = LA->get(F); auto postOrder = PO->getPostOrder(); // Scan the blocks in a post-order direction to make sure that we sink the // users of each instruction before visiting the instruction itself to allow // us to scan the function just once. for (auto &BB : postOrder) { auto Inst = BB->end(); auto Begin = BB->begin(); // Skip empty blocks. if (Inst == Begin) continue; // Point to the first real instruction. Inst--; while (true) { if (Inst == Begin) { // This is the first instruction in the block. Try to sink it and // move on to the next block. Changed |= sinkInstruction(&*Inst); break; } else { // Move the iterator to the next instruction because we may sink the // current instruction. PILInstruction *II = &*Inst; --Inst; Changed |= sinkInstruction(II); } } } if (Changed) PM->invalidateAnalysis(F, PILAnalysis::InvalidationKind::Instructions); } }; } // end anonymous namespace PILTransform *polar::createCodeSinking() { return new CodeSinkingPass(); }
{ "pile_set_name": "Github" }
/* * Vortex OpenSplice * * This software and documentation are Copyright 2006 to TO_YEAR ADLINK * Technology Limited, its affiliated companies and licensors. All rights * reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ import java.util.Collection; import java.util.HashSet; import java.util.Iterator; import org.omg.dds.core.ServiceEnvironment; import org.omg.dds.core.policy.DestinationOrder; import org.omg.dds.core.policy.Durability; import org.omg.dds.core.policy.Partition; import org.omg.dds.core.policy.PolicyFactory; import org.omg.dds.core.policy.Reliability; import org.omg.dds.core.status.Status; import org.omg.dds.domain.DomainParticipant; import org.omg.dds.domain.DomainParticipantFactory; import org.omg.dds.sub.DataReader; import org.omg.dds.sub.DataReaderQos; import org.omg.dds.sub.Sample; import org.omg.dds.sub.Subscriber; import org.omg.dds.sub.SubscriberQos; import org.omg.dds.topic.ContentFilteredTopic; import org.omg.dds.topic.Topic; import org.omg.dds.topic.TopicQos; import StockMarket.Stock; public class ContentFilteredTopicDataSubscriber { public static void main(String[] args) { String stkToSubscribe = null; if (args.length > 0) { stkToSubscribe = args[0]; } String partitionName = "ContentFilteredTopic example"; try { System.setProperty( ServiceEnvironment.IMPLEMENTATION_CLASS_NAME_PROPERTY, "org.opensplice.dds.core.OsplServiceEnvironment"); ServiceEnvironment env = ServiceEnvironment .createInstance(ContentFilteredTopicDataSubscriber.class.getClassLoader()); DomainParticipantFactory dpf = DomainParticipantFactory.getInstance(env); // create Domain Participant DomainParticipant participant = dpf.createParticipant(); // create Topic with reliable and transient qos PolicyFactory policyFactory = env.getSPI().getPolicyFactory(); Reliability reliable = policyFactory.Reliability().withReliable(); Durability durability = policyFactory.Durability().withTransient(); DestinationOrder des = policyFactory.DestinationOrder().withSourceTimestamp(); TopicQos topicQos = participant.getDefaultTopicQos().withPolicies(reliable,durability,des); Collection<Class<? extends Status>> status = new HashSet<Class<? extends Status>>(); Topic<Stock> topic = participant.createTopic("StockTrackerExclusive", Stock.class,topicQos, null, status); // create Subscriber with partition qos Partition partition = policyFactory.Partition().withName(partitionName); SubscriberQos subQos = participant.getDefaultSubscriberQos().withPolicy(partition); Subscriber sub = participant.createSubscriber(subQos); String parameters[] = new String[0]; DataReader<Stock> reader = null; ContentFilteredTopic<Stock> cfTopic = null; DataReaderQos drQos = sub.copyFromTopicQos(sub.getDefaultDataReaderQos(),topic.getQos()); if (stkToSubscribe == null) { // Subscribe to all stocks reader = sub.createDataReader(topic,drQos); } else { // create Content Filtered Topic String sqlExpr = "ticker = '" + stkToSubscribe + "'"; cfTopic = participant.createContentFilteredTopic("MyStockTopic",topic, sqlExpr, parameters); // create Filtered DataReader reader = sub.createDataReader(cfTopic,drQos); } // Read Events boolean terminate = false; int count = 0; System.out.println("Ready"); while (!terminate && count < 1500) { // We dont want the example to run indefinitely Iterator<Sample<Stock>> samples = reader.take(); /* Process each Sample*/ while (samples.hasNext()) { Sample<Stock> sample = samples.next(); Stock stock = sample.getData(); if (stock != null) { /* Check if the sample is valid. */ if (stock.price == -1.0f) { terminate = true; break; } System.out.println(stock.ticker + ": " + stock.price); } } try { Thread.sleep(200); } catch(InterruptedException ie) { // nothing to do } ++count; } System.out.println("Market Closed"); // clean up participant.close(); } catch(Exception e) { System.out.println("Error occured: " + e.getMessage()); e.printStackTrace(); } } }
{ "pile_set_name": "Github" }
>>SOURCE FREE IDENTIFICATION DIVISION. PROGRAM-ID. hickerson-series. ENVIRONMENT DIVISION. CONFIGURATION SECTION. REPOSITORY. FUNCTION ALL INTRINSIC . DATA DIVISION. WORKING-STORAGE SECTION. 01 n PIC 99 COMP. 01 h PIC Z(19)9.9(10). 01 First-Decimal-Digit-Pos CONSTANT 22. PROCEDURE DIVISION. PERFORM VARYING n FROM 0 BY 1 UNTIL n > 17 COMPUTE h = FACTORIAL(n) / (2 * LOG(2) ** (n + 1)) DISPLAY "h(" n ") = " h " which is " NO ADVANCING IF h (First-Decimal-Digit-Pos:1) = "0" OR "9" DISPLAY "an almost integer." ELSE DISPLAY "not an almost integer." END-IF END-PERFORM . END PROGRAM hickerson-series.
{ "pile_set_name": "Github" }
using System; using System.Data; using Mono.Data.Sqlite; using log4net; using System.Reflection; using System.IO; using System.Diagnostics; using System.Threading; namespace OWASP.WebGoat.NET.App_Code.DB { public class SqliteDbProvider : IDbProvider { private string _connectionString = string.Empty; private string _clientExec; private string _dbFileName; ILog log = LogManager.GetLogger(MethodBase.GetCurrentMethod().DeclaringType); public string Name { get { return DbConstants.DB_TYPE_SQLITE; } } public SqliteDbProvider(ConfigFile configFile) { _connectionString = string.Format("Data Source={0};Version=3", configFile.Get(DbConstants.KEY_FILE_NAME)); _clientExec = configFile.Get(DbConstants.KEY_CLIENT_EXEC); _dbFileName = configFile.Get(DbConstants.KEY_FILE_NAME); if (!File.Exists(_dbFileName)) SqliteConnection.CreateFile(_dbFileName); } public bool TestConnection() { try { using (SqliteConnection conn = new SqliteConnection(_connectionString)) { conn.Open(); using (SqliteCommand cmd = conn.CreateCommand()) { cmd.CommandText = "SELECT date('now')"; cmd.CommandType = CommandType.Text; cmd.ExecuteReader(); } } return true; } catch (Exception ex) { log.Error("Error testing DB", ex); return false; } } public DataSet GetCatalogData() { using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); SqliteDataAdapter da = new SqliteDataAdapter("select * from Products", connection); DataSet ds = new DataSet(); da.Fill(ds); return ds; } } public bool IsValidCustomerLogin(string email, string password) { //encode password string encoded_password = Encoder.Encode(password); //check email/password string sql = "select * from CustomerLogin where email = '" + email + "' and password = '" + encoded_password + "';"; using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); SqliteDataAdapter da = new SqliteDataAdapter(sql, connection); //TODO: User reader instead (for all calls) DataSet ds = new DataSet(); da.Fill(ds); try { return ds.Tables[0].Rows.Count == 0; } catch (Exception ex) { //Log this and pass the ball along. log.Error("Error checking login", ex); throw new Exception("Error checking login", ex); } } } public bool RecreateGoatDb() { try { log.Info("Running recreate"); string args = string.Format("\"{0}\"", _dbFileName); string script = Path.Combine(Settings.RootDir, DbConstants.DB_CREATE_SQLITE_SCRIPT); int retVal1 = Math.Abs(Util.RunProcessWithInput(_clientExec, args, script)); script = Path.Combine(Settings.RootDir, DbConstants.DB_LOAD_SQLITE_SCRIPT); int retVal2 = Math.Abs(Util.RunProcessWithInput(_clientExec, args, script)); return Math.Abs(retVal1) + Math.Abs(retVal2) == 0; } catch (Exception ex) { log.Error("Error rebulding DB", ex); return false; } } //Find the bugs! public string CustomCustomerLogin(string email, string password) { string error_message = null; try { //get data string sql = "select * from CustomerLogin where email = '" + email + "';"; using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); SqliteDataAdapter da = new SqliteDataAdapter(sql, connection); DataSet ds = new DataSet(); da.Fill(ds); //check if email address exists if (ds.Tables[0].Rows.Count == 0) { error_message = "Email Address Not Found!"; return error_message; } string encoded_password = ds.Tables[0].Rows[0]["Password"].ToString(); string decoded_password = Encoder.Decode(encoded_password); if (password.Trim().ToLower() != decoded_password.Trim().ToLower()) { error_message = "Password Not Valid For This Email Address!"; } else { //login successful error_message = null; } } } catch (SqliteException ex) { log.Error("Error with custom customer login", ex); error_message = ex.Message; } catch (Exception ex) { log.Error("Error with custom customer login", ex); } return error_message; } public string GetCustomerEmail(string customerNumber) { string output = null; try { using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); string sql = "select email from CustomerLogin where customerNumber = " + customerNumber; SqliteCommand command = new SqliteCommand(sql, connection); output = command.ExecuteScalar().ToString(); } } catch (Exception ex) { output = ex.Message; } return output; } public DataSet GetCustomerDetails(string customerNumber) { string sql = "select Customers.customerNumber, Customers.customerName, Customers.logoFileName, Customers.contactLastName, Customers.contactFirstName, " + "Customers.phone, Customers.addressLine1, Customers.addressLine2, Customers.city, Customers.state, Customers.postalCode, Customers.country, " + "Customers.salesRepEmployeeNumber, Customers.creditLimit, CustomerLogin.email, CustomerLogin.password, CustomerLogin.question_id, CustomerLogin.answer " + "From Customers, CustomerLogin where Customers.customerNumber = CustomerLogin.customerNumber and Customers.customerNumber = " + customerNumber; DataSet ds = new DataSet(); try { using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); SqliteDataAdapter da = new SqliteDataAdapter(sql, connection); da.Fill(ds); } } catch (Exception ex) { log.Error("Error getting customer details", ex); throw new ApplicationException("Error getting customer details", ex); } return ds; } public DataSet GetOffice(string city) { using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); string sql = "select * from Offices where city = @city"; SqliteDataAdapter da = new SqliteDataAdapter(sql, connection); da.SelectCommand.Parameters.AddWithValue("@city", city); DataSet ds = new DataSet(); da.Fill(ds); return ds; } } public DataSet GetComments(string productCode) { using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); string sql = "select * from Comments where productCode = @productCode"; SqliteDataAdapter da = new SqliteDataAdapter(sql, connection); da.SelectCommand.Parameters.AddWithValue("@productCode", productCode); DataSet ds = new DataSet(); da.Fill(ds); return ds; } } public string AddComment(string productCode, string email, string comment) { string sql = "insert into Comments(productCode, email, comment) values ('" + productCode + "','" + email + "','" + comment + "');"; string output = null; try { using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); SqliteCommand command = new SqliteCommand(sql, connection); command.ExecuteNonQuery(); } } catch (Exception ex) { log.Error("Error adding comment", ex); output = ex.Message; } return output; } public string UpdateCustomerPassword(int customerNumber, string password) { string sql = "update CustomerLogin set password = '" + Encoder.Encode(password) + "' where customerNumber = " + customerNumber; string output = null; try { using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); SqliteCommand command = new SqliteCommand(sql, connection); int rows_added = command.ExecuteNonQuery(); log.Info("Rows Added: " + rows_added + " to comment table"); } } catch (Exception ex) { log.Error("Error updating customer password", ex); output = ex.Message; } return output; } public string[] GetSecurityQuestionAndAnswer(string email) { string sql = "select SecurityQuestions.question_text, CustomerLogin.answer from CustomerLogin, " + "SecurityQuestions where CustomerLogin.email = '" + email + "' and CustomerLogin.question_id = " + "SecurityQuestions.question_id;"; string[] qAndA = new string[2]; using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); SqliteDataAdapter da = new SqliteDataAdapter(sql, connection); DataSet ds = new DataSet(); da.Fill(ds); if (ds.Tables[0].Rows.Count > 0) { DataRow row = ds.Tables[0].Rows[0]; qAndA[0] = row[0].ToString(); qAndA[1] = row[1].ToString(); } } return qAndA; } public string GetPasswordByEmail(string email) { string result = string.Empty; try { using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); //get data string sql = "select * from CustomerLogin where email = '" + email + "';"; SqliteDataAdapter da = new SqliteDataAdapter(sql, connection); DataSet ds = new DataSet(); da.Fill(ds); //check if email address exists if (ds.Tables[0].Rows.Count == 0) { result = "Email Address Not Found!"; } string encoded_password = ds.Tables[0].Rows[0]["Password"].ToString(); string decoded_password = Encoder.Decode(encoded_password); result = decoded_password; } } catch (Exception ex) { result = ex.Message; } return result; } public DataSet GetUsers() { using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); string sql = "select * from CustomerLogin;"; SqliteDataAdapter da = new SqliteDataAdapter(sql, connection); DataSet ds = new DataSet(); da.Fill(ds); return ds; } } public DataSet GetOrders(int customerID) { using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); string sql = "select * from Orders where customerNumber = " + customerID; SqliteDataAdapter da = new SqliteDataAdapter(sql, connection); DataSet ds = new DataSet(); da.Fill(ds); if (ds.Tables[0].Rows.Count == 0) return null; else return ds; } } public DataSet GetProductDetails(string productCode) { string sql = string.Empty; SqliteDataAdapter da; DataSet ds = new DataSet(); using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); sql = "select * from Products where productCode = '" + productCode + "'"; da = new SqliteDataAdapter(sql, connection); da.Fill(ds, "products"); sql = "select * from Comments where productCode = '" + productCode + "'"; da = new SqliteDataAdapter(sql, connection); da.Fill(ds, "comments"); DataRelation dr = new DataRelation("prod_comments", ds.Tables["products"].Columns["productCode"], //category table ds.Tables["comments"].Columns["productCode"], //product table false); ds.Relations.Add(dr); return ds; } } public DataSet GetOrderDetails(int orderNumber) { string sql = "select Customers.customerName, Orders.customerNumber, Orders.orderNumber, Products.productName, " + "OrderDetails.quantityOrdered, OrderDetails.priceEach, Products.productImage " + "from OrderDetails, Products, Orders, Customers where " + "Customers.customerNumber = Orders.customerNumber " + "and OrderDetails.productCode = Products.productCode " + "and Orders.orderNumber = OrderDetails.orderNumber " + "and OrderDetails.orderNumber = " + orderNumber; using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); SqliteDataAdapter da = new SqliteDataAdapter(sql, connection); DataSet ds = new DataSet(); da.Fill(ds); if (ds.Tables[0].Rows.Count == 0) return null; else return ds; } } public DataSet GetPayments(int customerNumber) { using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); string sql = "select * from Payments where customerNumber = " + customerNumber; SqliteDataAdapter da = new SqliteDataAdapter(sql, connection); DataSet ds = new DataSet(); da.Fill(ds); if (ds.Tables[0].Rows.Count == 0) return null; else return ds; } } public DataSet GetProductsAndCategories() { return GetProductsAndCategories(0); } public DataSet GetProductsAndCategories(int catNumber) { //TODO: Rerun the database script. string sql = string.Empty; SqliteDataAdapter da; DataSet ds = new DataSet(); //catNumber is optional. If it is greater than 0, add the clause to both statements. string catClause = string.Empty; if (catNumber >= 1) catClause += " where catNumber = " + catNumber; using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); sql = "select * from Categories" + catClause; da = new SqliteDataAdapter(sql, connection); da.Fill(ds, "categories"); sql = "select * from Products" + catClause; da = new SqliteDataAdapter(sql, connection); da.Fill(ds, "products"); //category / products relationship DataRelation dr = new DataRelation("cat_prods", ds.Tables["categories"].Columns["catNumber"], //category table ds.Tables["products"].Columns["catNumber"], //product table false); ds.Relations.Add(dr); return ds; } } public DataSet GetEmailByName(string name) { string sql = "select firstName, lastName, email from Employees where firstName like '" + name + "%' or lastName like '" + name + "%'"; using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); SqliteDataAdapter da = new SqliteDataAdapter(sql, connection); DataSet ds = new DataSet(); da.Fill(ds); if (ds.Tables[0].Rows.Count == 0) return null; else return ds; } } public string GetEmailByCustomerNumber(string num) { string output = ""; try { using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); string sql = "select email from CustomerLogin where customerNumber = " + num; SqliteCommand cmd = new SqliteCommand(sql, connection); output = (string)cmd.ExecuteScalar(); } } catch (Exception ex) { log.Error("Error getting email by customer number", ex); output = ex.Message; } return output; } public DataSet GetCustomerEmails(string email) { string sql = "select email from CustomerLogin where email like '" + email + "%'"; using (SqliteConnection connection = new SqliteConnection(_connectionString)) { connection.Open(); SqliteDataAdapter da = new SqliteDataAdapter(sql, connection); DataSet ds = new DataSet(); da.Fill(ds); if (ds.Tables[0].Rows.Count == 0) return null; else return ds; } } } }
{ "pile_set_name": "Github" }
/* ======================================================================== * Bootstrap: button.js v3.3.6 * http://getbootstrap.com/javascript/#buttons * ======================================================================== * Copyright 2011-2015 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // BUTTON PUBLIC CLASS DEFINITION // ============================== var Button = function (element, options) { this.$element = $(element) this.options = $.extend({}, Button.DEFAULTS, options) this.isLoading = false } Button.VERSION = '3.3.6' Button.DEFAULTS = { loadingText: 'loading...' } Button.prototype.setState = function (state) { var d = 'disabled' var $el = this.$element var val = $el.is('input') ? 'val' : 'html' var data = $el.data() state += 'Text' if (data.resetText == null) $el.data('resetText', $el[val]()) // push to event loop to allow forms to submit setTimeout($.proxy(function () { $el[val](data[state] == null ? this.options[state] : data[state]) if (state == 'loadingText') { this.isLoading = true $el.addClass(d).attr(d, d) } else if (this.isLoading) { this.isLoading = false $el.removeClass(d).removeAttr(d) } }, this), 0) } Button.prototype.toggle = function () { var changed = true var $parent = this.$element.closest('[data-toggle="buttons"]') if ($parent.length) { var $input = this.$element.find('input') if ($input.prop('type') == 'radio') { if ($input.prop('checked')) changed = false $parent.find('.active').removeClass('active') this.$element.addClass('active') } else if ($input.prop('type') == 'checkbox') { if (($input.prop('checked')) !== this.$element.hasClass('active')) changed = false this.$element.toggleClass('active') } $input.prop('checked', this.$element.hasClass('active')) if (changed) $input.trigger('change') } else { this.$element.attr('aria-pressed', !this.$element.hasClass('active')) this.$element.toggleClass('active') } } // BUTTON PLUGIN DEFINITION // ======================== function Plugin(option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.button') var options = typeof option == 'object' && option if (!data) $this.data('bs.button', (data = new Button(this, options))) if (option == 'toggle') data.toggle() else if (option) data.setState(option) }) } var old = $.fn.button $.fn.button = Plugin $.fn.button.Constructor = Button // BUTTON NO CONFLICT // ================== $.fn.button.noConflict = function () { $.fn.button = old return this } // BUTTON DATA-API // =============== $(document) .on('click.bs.button.data-api', '[data-toggle^="button"]', function (e) { var $btn = $(e.target) if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn') Plugin.call($btn, 'toggle') if (!($(e.target).is('input[type="radio"]') || $(e.target).is('input[type="checkbox"]'))) e.preventDefault() }) .on('focus.bs.button.data-api blur.bs.button.data-api', '[data-toggle^="button"]', function (e) { $(e.target).closest('.btn').toggleClass('focus', /^focus(in)?$/.test(e.type)) }) }(jQuery);
{ "pile_set_name": "Github" }
# FuckGalEngine This is a set of tools aims to translate Galgames (aka Visual Novels) into another language, including resource package unpacking/repacking, program hooking to achieve some modifications and etc. The code is not updated frequently, so maybe not up to date and thus won't work properly for some game engines. If you have any questions (e.g. usage, compile, working principle๏ผŒ reverse engineering and etc.), please raise an [Issue](https://github.com/Inori/FuckGalEngine/issues) on this repository, DO NOT send emails to the contributors.
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <channelml xmlns="http://morphml.org/channelml/schema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:meta="http://morphml.org/metadata/schema" xsi:schemaLocation="http://morphml.org/channelml/schema http://www.neuroml.org/NeuroMLValidator/NeuroMLFiles/Schemata/v1.8.1/Level2/ChannelML_v1.8.1.xsd" units="Physiological Units"> <meta:notes>ChannelML file describing a single synaptic mechanism</meta:notes> <synapse_type name="Serotonin_Glutamate_GJ"> <status value="in_progress"> <meta:contributor> <meta:name>Padraig Gleeson</meta:name> <meta:comment> Note: This is not (yet) based on experimental data from C. elegans</meta:comment> </meta:contributor> </status> <meta:notes>Description of an electrical synapse at a gap junction</meta:notes> <electrical_syn conductance="1.00E-9"/> </synapse_type> </channelml>
{ "pile_set_name": "Github" }
94aac415408c8d775f068ac474d99e15
{ "pile_set_name": "Github" }
import FWCore.ParameterSet.Config as cms from Configuration.Generator.Pythia8CommonSettings_cfi import * from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import * generator = cms.EDFilter("Pythia8GeneratorFilter", pythiaHepMCVerbosity = cms.untracked.bool(False), maxEventsToPrint = cms.untracked.int32(0), pythiaPylistVerbosity = cms.untracked.int32(0), filterEfficiency = cms.untracked.double(1.0), comEnergy = cms.double(8000.0), PythiaParameters = cms.PSet( pythia8CommonSettingsBlock, pythia8CUEP8M1SettingsBlock, processParameters = cms.vstring( 'WeakBosonAndParton:qqbar2Wg = on', 'WeakBosonAndParton:qqbar2Wg = on', '24:onMode = off', '24:onIfAny = 11 12', '24:onIfAny = 13 14', '24:onIfAny = 15 16', 'PhaseSpace:pTHatMin = 3000.', 'PhaseSpace:pTHatMax = 3500.', ), parameterSets = cms.vstring('pythia8CommonSettings', 'pythia8CUEP8M1Settings', 'processParameters', ) ) ) ProductionFilterSequence = cms.Sequence(generator)
{ "pile_set_name": "Github" }
#ifndef BUILDERS_TERRAIN_SURFACEGENERATOR_HPP_DEFINED #define BUILDERS_TERRAIN_SURFACEGENERATOR_HPP_DEFINED #include "builders/terrain/TerraExtras.hpp" #include "builders/terrain/TerraGenerator.hpp" #include "math/Mesh.hpp" #include "math/PolyClip.hpp" #include "math/Polygon.hpp" #include "math/Vector2.hpp" namespace utymap { namespace builders { /// Provides the way to generate terrain mesh. class SurfaceGenerator final : public TerraGenerator { public: SurfaceGenerator(const BuilderContext &context, const utymap::mapcss::Style &style, const utymap::math::IntPath &tileRect); void onNewRegion(const std::string &type, const utymap::entities::Element &element, const utymap::mapcss::Style &style, const std::shared_ptr<Region> &region) override; void generateFrom(const std::vector<Layer> &layers) override; protected: /// Adds geometry to mesh. void addGeometry(int level, utymap::math::Polygon &polygon, const RegionContext &regionContext) override; private: /// Builds foreground surface. void buildForeground(const std::vector<Layer> &layers); /// Builds background surface. void buildBackground(); /// Builds layer. void buildLayer(const Layer &layer); /// Builds mesh using paths data. void buildRegion(const Region &region); /// Adds extras to mesh, e.g. trees, water surface if meshExtras are specified in options. void addExtrasIfNecessary(utymap::math::Mesh &mesh, TerraExtras::Context &extrasContext, const RegionContext &regionContext) const; utymap::math::Clipper foregroundClipper_; utymap::math::Clipper backgroundClipper_; }; } } #endif // BUILDERS_TERRAIN_SURFACEGENERATOR_HPP_DEFINED
{ "pile_set_name": "Github" }
use plotlib::page::Page; use plotlib::repr::Plot; use plotlib::style::{PointMarker, PointStyle}; use plotlib::view::ContinuousView; fn main() { let data = vec![ (-3.0, 2.3), (-1.6, 5.3), (0.3, 0.7), (4.3, -1.4), (6.4, 4.3), (8.5, 3.7), ]; let s1 = Plot::new(data).point_style(PointStyle::new().marker(PointMarker::Circle)); let s2 = Plot::new(vec![(-1.4, 2.5), (7.2, -0.3)]) .point_style(PointStyle::new().marker(PointMarker::Square)); let v = ContinuousView::new() .add(s1) .add(s2) .x_range(-5., 10.) .y_range(-2., 6.) .x_label("Some varying variable") .y_label("The response of something"); println!("{}", Page::single(&v).dimensions(80, 30).to_text().unwrap()); }
{ "pile_set_name": "Github" }
# This file lists all individuals having contributed content to the repository. # For how it is generated, see `hack/generate-authors.sh`. Aanand Prasad <aanand.prasad@gmail.com> Aaron Davidson <aaron@databricks.com> Aaron Feng <aaron.feng@gmail.com> Aaron Hnatiw <aaron@griddio.com> Aaron Huslage <huslage@gmail.com> Aaron L. Xu <liker.xu@foxmail.com> Aaron Lehmann <aaron.lehmann@docker.com> Aaron Welch <welch@packet.net> Aaron.L.Xu <likexu@harmonycloud.cn> Abel Muiรฑo <amuino@gmail.com> Abhijeet Kasurde <akasurde@redhat.com> Abhinandan Prativadi <abhi@docker.com> Abhinav Ajgaonkar <abhinav316@gmail.com> Abhishek Chanda <abhishek.becs@gmail.com> Abhishek Sharma <abhishek@asharma.me> Abin Shahab <ashahab@altiscale.com> Adam Avilla <aavilla@yp.com> Adam Dobrawy <naczelnik@jawnosc.tk> Adam Eijdenberg <adam.eijdenberg@gmail.com> Adam Kunk <adam.kunk@tiaa-cref.org> Adam Miller <admiller@redhat.com> Adam Mills <adam@armills.info> Adam Pointer <adam.pointer@skybettingandgaming.com> Adam Singer <financeCoding@gmail.com> Adam Walz <adam@adamwalz.net> Addam Hardy <addam.hardy@gmail.com> Aditi Rajagopal <arajagopal@us.ibm.com> Aditya <aditya@netroy.in> Adnan Khan <adnkha@amazon.com> Adolfo Ochagavรญa <aochagavia92@gmail.com> Adria Casas <adriacasas88@gmail.com> Adrian Moisey <adrian@changeover.za.net> Adrian Mouat <adrian.mouat@gmail.com> Adrian Oprea <adrian@codesi.nz> Adrien Folie <folie.adrien@gmail.com> Adrien Gallouรซt <adrien@gallouet.fr> Ahmed Kamal <email.ahmedkamal@googlemail.com> Ahmet Alp Balkan <ahmetb@microsoft.com> Aidan Feldman <aidan.feldman@gmail.com> Aidan Hobson Sayers <aidanhs@cantab.net> AJ Bowen <aj@soulshake.net> Ajey Charantimath <ajey.charantimath@gmail.com> ajneu <ajneu@users.noreply.github.com> Akash Gupta <akagup@microsoft.com> Akihiro Matsushima <amatsusbit@gmail.com> Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> Akim Demaille <akim.demaille@docker.com> Akira Koyasu <mail@akirakoyasu.net> Akshay Karle <akshay.a.karle@gmail.com> Al Tobey <al@ooyala.com> alambike <alambike@gmail.com> Alan Scherger <flyinprogrammer@gmail.com> Alan Thompson <cloojure@gmail.com> Albert Callarisa <shark234@gmail.com> Albert Zhang <zhgwenming@gmail.com> Alejandro Gonzรกlez Hevia <alejandrgh11@gmail.com> Aleksa Sarai <asarai@suse.de> Aleksandrs Fadins <aleks@s-ko.net> Alena Prokharchyk <alena@rancher.com> Alessandro Boch <aboch@tetrationanalytics.com> Alessio Biancalana <dottorblaster@gmail.com> Alex Chan <alex@alexwlchan.net> Alex Chen <alexchenunix@gmail.com> Alex Coventry <alx@empirical.com> Alex Crawford <alex.crawford@coreos.com> Alex Ellis <alexellis2@gmail.com> Alex Gaynor <alex.gaynor@gmail.com> Alex Goodman <wagoodman@gmail.com> Alex Olshansky <i@creagenics.com> Alex Samorukov <samm@os2.kiev.ua> Alex Warhawk <ax.warhawk@gmail.com> Alexander Artemenko <svetlyak.40wt@gmail.com> Alexander Boyd <alex@opengroove.org> Alexander Larsson <alexl@redhat.com> Alexander Midlash <amidlash@docker.com> Alexander Morozov <lk4d4@docker.com> Alexander Shopov <ash@kambanaria.org> Alexandre Beslic <alexandre.beslic@gmail.com> Alexandre Garnier <zigarn@gmail.com> Alexandre Gonzรกlez <agonzalezro@gmail.com> Alexandre Jomin <alexandrejomin@gmail.com> Alexandru Sfirlogea <alexandru.sfirlogea@gmail.com> Alexei Margasov <alexei38@yandex.ru> Alexey Guskov <lexag@mail.ru> Alexey Kotlyarov <alexey@infoxchange.net.au> Alexey Shamrin <shamrin@gmail.com> Alexis THOMAS <fr.alexisthomas@gmail.com> Alfred Landrum <alfred.landrum@docker.com> Ali Dehghani <ali.dehghani.g@gmail.com> Alicia Lauerman <alicia@eta.im> Alihan Demir <alihan_6153@hotmail.com> Allen Madsen <blatyo@gmail.com> Allen Sun <allensun.shl@alibaba-inc.com> almoehi <almoehi@users.noreply.github.com> Alvaro Saurin <alvaro.saurin@gmail.com> Alvin Deng <alvin.q.deng@utexas.edu> Alvin Richards <alvin.richards@docker.com> amangoel <amangoel@gmail.com> Amen Belayneh <amenbelayneh@gmail.com> Amir Goldstein <amir73il@aquasec.com> Amit Bakshi <ambakshi@gmail.com> Amit Krishnan <amit.krishnan@oracle.com> Amit Shukla <amit.shukla@docker.com> Amr Gawish <amr.gawish@gmail.com> Amy Lindburg <amy.lindburg@docker.com> Anand Patil <anand.prabhakar.patil@gmail.com> AnandkumarPatel <anandkumarpatel@gmail.com> Anatoly Borodin <anatoly.borodin@gmail.com> Anchal Agrawal <aagrawa4@illinois.edu> Anda Xu <anda.xu@docker.com> Anders Janmyr <anders@janmyr.com> Andre Dublin <81dublin@gmail.com> Andre Granovsky <robotciti@live.com> Andrea Denisse Gรณmez <crypto.andrea@protonmail.ch> Andrea Luzzardi <aluzzardi@gmail.com> Andrea Turli <andrea.turli@gmail.com> Andreas Elvers <andreas@work.de> Andreas Kรถhler <andi5.py@gmx.net> Andreas Savvides <andreas@editd.com> Andreas Tiefenthaler <at@an-ti.eu> Andrei Gherzan <andrei@resin.io> Andrei Vagin <avagin@gmail.com> Andrew C. Bodine <acbodine@us.ibm.com> Andrew Clay Shafer <andrewcshafer@gmail.com> Andrew Duckworth <grillopress@gmail.com> Andrew France <andrew@avito.co.uk> Andrew Gerrand <adg@golang.org> Andrew Guenther <guenther.andrew.j@gmail.com> Andrew He <he.andrew.mail@gmail.com> Andrew Hsu <andrewhsu@docker.com> Andrew Kuklewicz <kookster@gmail.com> Andrew Macgregor <andrew.macgregor@agworld.com.au> Andrew Macpherson <hopscotch23@gmail.com> Andrew Martin <sublimino@gmail.com> Andrew McDonnell <bugs@andrewmcdonnell.net> Andrew Munsell <andrew@wizardapps.net> Andrew Pennebaker <andrew.pennebaker@gmail.com> Andrew Po <absourd.noise@gmail.com> Andrew Weiss <andrew.weiss@docker.com> Andrew Williams <williams.andrew@gmail.com> Andrews Medina <andrewsmedina@gmail.com> Andrey Kolomentsev <andrey.kolomentsev@docker.com> Andrey Petrov <andrey.petrov@shazow.net> Andrey Stolbovsky <andrey.stolbovsky@gmail.com> Andrรฉ Martins <aanm90@gmail.com> andy <ztao@tibco-support.com> Andy Chambers <anchambers@paypal.com> andy diller <dillera@gmail.com> Andy Goldstein <agoldste@redhat.com> Andy Kipp <andy@rstudio.com> Andy Rothfusz <github@developersupport.net> Andy Smith <github@anarkystic.com> Andy Wilson <wilson.andrew.j+github@gmail.com> Anes Hasicic <anes.hasicic@gmail.com> Anil Belur <askb23@gmail.com> Anil Madhavapeddy <anil@recoil.org> Ankit Jain <ajatkj@yahoo.co.in> Ankush Agarwal <ankushagarwal11@gmail.com> Anonmily <michelle@michelleliu.io> Anran Qiao <anran.qiao@daocloud.io> Anshul Pundir <anshul.pundir@docker.com> Anthon van der Neut <anthon@mnt.org> Anthony Baire <Anthony.Baire@irisa.fr> Anthony Bishopric <git@anthonybishopric.com> Anthony Dahanne <anthony.dahanne@gmail.com> Anthony Sottile <asottile@umich.edu> Anton Lรถfgren <anton.lofgren@gmail.com> Anton Nikitin <anton.k.nikitin@gmail.com> Anton Polonskiy <anton.polonskiy@gmail.com> Anton Tiurin <noxiouz@yandex.ru> Antonio Murdaca <antonio.murdaca@gmail.com> Antonis Kalipetis <akalipetis@gmail.com> Antony Messerli <amesserl@rackspace.com> Anuj Bahuguna <anujbahuguna.dev@gmail.com> Anusha Ragunathan <anusha.ragunathan@docker.com> apocas <petermdias@gmail.com> Arash Deshmeh <adeshmeh@ca.ibm.com> ArikaChen <eaglesora@gmail.com> Arko Dasgupta <arko.dasgupta@docker.com> Arnaud Lefebvre <a.lefebvre@outlook.fr> Arnaud Porterie <arnaud.porterie@docker.com> Arnaud Rebillout <arnaud.rebillout@collabora.com> Arthur Barr <arthur.barr@uk.ibm.com> Arthur Gautier <baloo@gandi.net> Artur Meyster <arthurfbi@yahoo.com> Arun Gupta <arun.gupta@gmail.com> Asad Saeeduddin <masaeedu@gmail.com> Asbjรธrn Enge <asbjorn@hanafjedle.net> averagehuman <averagehuman@users.noreply.github.com> Avi Das <andas222@gmail.com> Avi Kivity <avi@scylladb.com> Avi Miller <avi.miller@oracle.com> Avi Vaid <avaid1996@gmail.com> ayoshitake <airandfingers@gmail.com> Azat Khuyiyakhmetov <shadow_uz@mail.ru> Bardia Keyoumarsi <bkeyouma@ucsc.edu> Barnaby Gray <barnaby@pickle.me.uk> Barry Allard <barry.allard@gmail.com> Bartล‚omiej Piotrowski <b@bpiotrowski.pl> Bastiaan Bakker <bbakker@xebia.com> bdevloed <boris.de.vloed@gmail.com> Ben Bonnefoy <frenchben@docker.com> Ben Firshman <ben@firshman.co.uk> Ben Golub <ben.golub@dotcloud.com> Ben Gould <ben@bengould.co.uk> Ben Hall <ben@benhall.me.uk> Ben Sargent <ben@brokendigits.com> Ben Severson <BenSeverson@users.noreply.github.com> Ben Toews <mastahyeti@gmail.com> Ben Wiklund <ben@daisyowl.com> Benjamin Atkin <ben@benatkin.com> Benjamin Baker <Benjamin.baker@utexas.edu> Benjamin Boudreau <boudreau.benjamin@gmail.com> Benjamin Yolken <yolken@stripe.com> Benoit Chesneau <bchesneau@gmail.com> Bernerd Schaefer <bj.schaefer@gmail.com> Bernhard M. Wiedemann <bwiedemann@suse.de> Bert Goethals <bert@bertg.be> Bevisy Zhang <binbin36520@gmail.com> Bharath Thiruveedula <bharath_ves@hotmail.com> Bhiraj Butala <abhiraj.butala@gmail.com> Bhumika Bayani <bhumikabayani@gmail.com> Bilal Amarni <bilal.amarni@gmail.com> Bill Wang <ozbillwang@gmail.com> Bily Zhang <xcoder@tenxcloud.com> Bin Liu <liubin0329@gmail.com> Bingshen Wang <bingshen.wbs@alibaba-inc.com> Blake Geno <blakegeno@gmail.com> Boaz Shuster <ripcurld.github@gmail.com> bobby abbott <ttobbaybbob@gmail.com> Boris Pruessmann <boris@pruessmann.org> Boshi Lian <farmer1992@gmail.com> Bouke Haarsma <bouke@webatoom.nl> Boyd Hemphill <boyd@feedmagnet.com> boynux <boynux@gmail.com> Bradley Cicenas <bradley.cicenas@gmail.com> Bradley Wright <brad@intranation.com> Brandon Liu <bdon@bdon.org> Brandon Philips <brandon.philips@coreos.com> Brandon Rhodes <brandon@rhodesmill.org> Brendan Dixon <brendand@microsoft.com> Brent Salisbury <brent.salisbury@docker.com> Brett Higgins <brhiggins@arbor.net> Brett Kochendorfer <brett.kochendorfer@gmail.com> Brett Randall <javabrett@gmail.com> Brian (bex) Exelbierd <bexelbie@redhat.com> Brian Bland <brian.bland@docker.com> Brian DeHamer <brian@dehamer.com> Brian Dorsey <brian@dorseys.org> Brian Flad <bflad417@gmail.com> Brian Goff <cpuguy83@gmail.com> Brian McCallister <brianm@skife.org> Brian Olsen <brian@maven-group.org> Brian Schwind <brianmschwind@gmail.com> Brian Shumate <brian@couchbase.com> Brian Torres-Gil <brian@dralth.com> Brian Trump <btrump@yelp.com> Brice Jaglin <bjaglin@teads.tv> Briehan Lombaard <briehan.lombaard@gmail.com> Brielle Broder <bbroder@google.com> Bruno Bigras <bigras.bruno@gmail.com> Bruno Binet <bruno.binet@gmail.com> Bruno Gazzera <bgazzera@paginar.com> Bruno Reniรฉ <brutasse@gmail.com> Bruno Tavares <btavare@thoughtworks.com> Bryan Bess <squarejaw@bsbess.com> Bryan Boreham <bjboreham@gmail.com> Bryan Matsuo <bryan.matsuo@gmail.com> Bryan Murphy <bmurphy1976@gmail.com> Burke Libbey <burke@libbey.me> Byung Kang <byung.kang.ctr@amrdec.army.mil> Caleb Spare <cespare@gmail.com> Calen Pennington <cale@edx.org> Cameron Boehmer <cameron.boehmer@gmail.com> Cameron Spear <cameronspear@gmail.com> Campbell Allen <campbell.allen@gmail.com> Candid Dauth <cdauth@cdauth.eu> Cao Weiwei <cao.weiwei30@zte.com.cn> Carl Henrik Lunde <chlunde@ping.uio.no> Carl Loa Odin <carlodin@gmail.com> Carl X. Su <bcbcarl@gmail.com> Carlo Mion <mion00@gmail.com> Carlos Alexandro Becker <caarlos0@gmail.com> Carlos de Paula <me@carlosedp.com> Carlos Sanchez <carlos@apache.org> Carol Fager-Higgins <carol.fager-higgins@docker.com> Cary <caryhartline@users.noreply.github.com> Casey Bisson <casey.bisson@joyent.com> Catalin Pirvu <pirvu.catalin94@gmail.com> Ce Gao <ce.gao@outlook.com> Cedric Davies <cedricda@microsoft.com> Cezar Sa Espinola <cezarsa@gmail.com> Chad Swenson <chadswen@gmail.com> Chance Zibolski <chance.zibolski@gmail.com> Chander Govindarajan <chandergovind@gmail.com> Chanhun Jeong <keyolk@gmail.com> Chao Wang <wangchao.fnst@cn.fujitsu.com> Charles Chan <charleswhchan@users.noreply.github.com> Charles Hooper <charles.hooper@dotcloud.com> Charles Law <claw@conduce.com> Charles Lindsay <chaz@chazomatic.us> Charles Merriam <charles.merriam@gmail.com> Charles Sarrazin <charles@sarraz.in> Charles Smith <charles.smith@docker.com> Charlie Drage <charlie@charliedrage.com> Charlie Lewis <charliel@lab41.org> Chase Bolt <chase.bolt@gmail.com> ChaYoung You <yousbe@gmail.com> Chen Chao <cc272309126@gmail.com> Chen Chuanliang <chen.chuanliang@zte.com.cn> Chen Hanxiao <chenhanxiao@cn.fujitsu.com> Chen Min <chenmin46@huawei.com> Chen Mingjie <chenmingjie0828@163.com> Chen Qiu <cheney-90@hotmail.com> Cheng-mean Liu <soccerl@microsoft.com> Chengfei Shang <cfshang@alauda.io> Chengguang Xu <cgxu519@gmx.com> chenyuzhu <chenyuzhi@oschina.cn> Chetan Birajdar <birajdar.chetan@gmail.com> Chewey <prosto-chewey@users.noreply.github.com> Chia-liang Kao <clkao@clkao.org> chli <chli@freewheel.tv> Cholerae Hu <choleraehyq@gmail.com> Chris Alfonso <calfonso@redhat.com> Chris Armstrong <chris@opdemand.com> Chris Dias <cdias@microsoft.com> Chris Dituri <csdituri@gmail.com> Chris Fordham <chris@fordham-nagy.id.au> Chris Gavin <chris@chrisgavin.me> Chris Gibson <chris@chrisg.io> Chris Khoo <chris.khoo@gmail.com> Chris McKinnel <chris.mckinnel@tangentlabs.co.uk> Chris McKinnel <chrismckinnel@gmail.com> Chris Price <chris.price@docker.com> Chris Seto <chriskseto@gmail.com> Chris Snow <chsnow123@gmail.com> Chris St. Pierre <chris.a.st.pierre@gmail.com> Chris Stivers <chris@stivers.us> Chris Swan <chris.swan@iee.org> Chris Telfer <ctelfer@docker.com> Chris Wahl <github@wahlnetwork.com> Chris Weyl <cweyl@alumni.drew.edu> Chris White <me@cwprogram.com> Christian Berendt <berendt@b1-systems.de> Christian Brauner <christian.brauner@ubuntu.com> Christian Bรถhme <developement@boehme3d.de> Christian Muehlhaeuser <muesli@gmail.com> Christian Persson <saser@live.se> Christian Rotzoll <ch.rotzoll@gmail.com> Christian Simon <simon@swine.de> Christian Stefanescu <st.chris@gmail.com> Christophe Mehay <cmehay@online.net> Christophe Troestler <christophe.Troestler@umons.ac.be> Christophe Vidal <kriss@krizalys.com> Christopher Biscardi <biscarch@sketcht.com> Christopher Crone <christopher.crone@docker.com> Christopher Currie <codemonkey+github@gmail.com> Christopher Jones <tophj@linux.vnet.ibm.com> Christopher Latham <sudosurootdev@gmail.com> Christopher Rigor <crigor@gmail.com> Christy Perez <christy@linux.vnet.ibm.com> Chun Chen <ramichen@tencent.com> Ciro S. Costa <ciro.costa@usp.br> Clayton Coleman <ccoleman@redhat.com> Clinton Kitson <clintonskitson@gmail.com> Cody Roseborough <crrosebo@amazon.com> Coenraad Loubser <coenraad@wish.org.za> Colin Dunklau <colin.dunklau@gmail.com> Colin Hebert <hebert.colin@gmail.com> Colin Panisset <github@clabber.com> Colin Rice <colin@daedrum.net> Colin Walters <walters@verbum.org> Collin Guarino <collin.guarino@gmail.com> Colm Hally <colmhally@gmail.com> companycy <companycy@gmail.com> Corbin Coleman <corbin.coleman@docker.com> Corey Farrell <git@cfware.com> Cory Forsyth <cory.forsyth@gmail.com> cressie176 <github@stephen-cresswell.net> CrimsonGlory <CrimsonGlory@users.noreply.github.com> Cristian Staretu <cristian.staretu@gmail.com> cristiano balducci <cristiano.balducci@gmail.com> Cruceru Calin-Cristian <crucerucalincristian@gmail.com> CUI Wei <ghostplant@qq.com> Cyprian Gracz <cyprian.gracz@micro-jumbo.eu> Cyril F <cyrilf7x@gmail.com> Daan van Berkel <daan.v.berkel.1980@gmail.com> Daehyeok Mun <daehyeok@gmail.com> Dafydd Crosby <dtcrsby@gmail.com> dalanlan <dalanlan925@gmail.com> Damian Smyth <damian@dsau.co> Damien Nadรฉ <github@livna.org> Damien Nozay <damien.nozay@gmail.com> Damjan Georgievski <gdamjan@gmail.com> Dan Anolik <dan@anolik.net> Dan Buch <d.buch@modcloth.com> Dan Cotora <dan@bluevision.ro> Dan Feldman <danf@jfrog.com> Dan Griffin <dgriffin@peer1.com> Dan Hirsch <thequux@upstandinghackers.com> Dan Keder <dan.keder@gmail.com> Dan Levy <dan@danlevy.net> Dan McPherson <dmcphers@redhat.com> Dan Stine <sw@stinemail.com> Dan Williams <me@deedubs.com> Dani Hodovic <dani.hodovic@gmail.com> Dani Louca <dani.louca@docker.com> Daniel Antlinger <d.antlinger@gmx.at> Daniel Dao <dqminh@cloudflare.com> Daniel Exner <dex@dragonslave.de> Daniel Farrell <dfarrell@redhat.com> Daniel Garcia <daniel@danielgarcia.info> Daniel Gasienica <daniel@gasienica.ch> Daniel Grunwell <mwgrunny@gmail.com> Daniel Hiltgen <daniel.hiltgen@docker.com> Daniel J Walsh <dwalsh@redhat.com> Daniel Menet <membership@sontags.ch> Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> Daniel Nephin <dnephin@docker.com> Daniel Norberg <dano@spotify.com> Daniel Nordberg <dnordberg@gmail.com> Daniel Robinson <gottagetmac@gmail.com> Daniel S <dan.streby@gmail.com> Daniel Sweet <danieljsweet@icloud.com> Daniel Von Fange <daniel@leancoder.com> Daniel Watkins <daniel@daniel-watkins.co.uk> Daniel X Moore <yahivin@gmail.com> Daniel YC Lin <dlin.tw@gmail.com> Daniel Zhang <jmzwcn@gmail.com> Danny Berger <dpb587@gmail.com> Danny Milosavljevic <dannym@scratchpost.org> Danny Yates <danny@codeaholics.org> Danyal Khaliq <danyal.khaliq@tenpearls.com> Darren Coxall <darren@darrencoxall.com> Darren Shepherd <darren.s.shepherd@gmail.com> Darren Stahl <darst@microsoft.com> Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com> Davanum Srinivas <davanum@gmail.com> Dave Barboza <dbarboza@datto.com> Dave Goodchild <buddhamagnet@gmail.com> Dave Henderson <dhenderson@gmail.com> Dave MacDonald <mindlapse@gmail.com> Dave Tucker <dt@docker.com> David Anderson <dave@natulte.net> David Calavera <david.calavera@gmail.com> David Chung <david.chung@docker.com> David Corking <dmc-source@dcorking.com> David Cramer <davcrame@cisco.com> David Currie <david_currie@uk.ibm.com> David Davis <daviddavis@redhat.com> David Dooling <dooling@gmail.com> David Gageot <david@gageot.net> David Gebler <davidgebler@gmail.com> David Glasser <glasser@davidglasser.net> David Lawrence <david.lawrence@docker.com> David Lechner <david@lechnology.com> David M. Karr <davidmichaelkarr@gmail.com> David Mackey <tdmackey@booleanhaiku.com> David Mat <david@davidmat.com> David Mcanulty <github@hellspark.com> David McKay <david@rawkode.com> David P Hilton <david.hilton.p@gmail.com> David Pelaez <pelaez89@gmail.com> David R. Jenni <david.r.jenni@gmail.com> David Rรถthlisberger <david@rothlis.net> David Sheets <dsheets@docker.com> David Sissitka <me@dsissitka.com> David Trott <github@davidtrott.com> David Wang <00107082@163.com> David Williamson <david.williamson@docker.com> David Xia <dxia@spotify.com> David Young <yangboh@cn.ibm.com> Davide Ceretti <davide.ceretti@hogarthww.com> Dawn Chen <dawnchen@google.com> dbdd <wangtong2712@gmail.com> dcylabs <dcylabs@gmail.com> Debayan De <debayande@users.noreply.github.com> Deborah Gertrude Digges <deborah.gertrude.digges@gmail.com> deed02392 <georgehafiz@gmail.com> Deep Debroy <ddebroy@docker.com> Deng Guangxing <dengguangxing@huawei.com> Deni Bertovic <deni@kset.org> Denis Defreyne <denis@soundcloud.com> Denis Gladkikh <denis@gladkikh.email> Denis Ollier <larchunix@users.noreply.github.com> Dennis Chen <barracks510@gmail.com> Dennis Chen <dennis.chen@arm.com> Dennis Docter <dennis@d23.nl> Derek <crq@kernel.org> Derek <crquan@gmail.com> Derek Ch <denc716@gmail.com> Derek McGowan <derek@mcgstyle.net> Deric Crago <deric.crago@gmail.com> Deshi Xiao <dxiao@redhat.com> devmeyster <arthurfbi@yahoo.com> Devvyn Murphy <devvyn@devvyn.com> Dharmit Shah <shahdharmit@gmail.com> Dhawal Yogesh Bhanushali <dbhanushali@vmware.com> Diego Romero <idiegoromero@gmail.com> Diego Siqueira <dieg0@live.com> Dieter Reuter <dieter.reuter@me.com> Dillon Dixon <dillondixon@gmail.com> Dima Stopel <dima@twistlock.com> Dimitri John Ledkov <dimitri.j.ledkov@intel.com> Dimitris Mandalidis <dimitris.mandalidis@gmail.com> Dimitris Rozakis <dimrozakis@gmail.com> Dimitry Andric <d.andric@activevideo.com> Dinesh Subhraveti <dineshs@altiscale.com> Ding Fei <dingfei@stars.org.cn> Diogo Monica <diogo@docker.com> DiuDiugirl <sophia.wang@pku.edu.cn> Djibril Konรฉ <kone.djibril@gmail.com> dkumor <daniel@dkumor.com> Dmitri Logvinenko <dmitri.logvinenko@gmail.com> Dmitri Shuralyov <shurcooL@gmail.com> Dmitry Demeshchuk <demeshchuk@gmail.com> Dmitry Gusev <dmitry.gusev@gmail.com> Dmitry Kononenko <d@dm42.ru> Dmitry Sharshakov <d3dx12.xx@gmail.com> Dmitry Shyshkin <dmitry@shyshkin.org.ua> Dmitry Smirnov <onlyjob@member.fsf.org> Dmitry V. Krivenok <krivenok.dmitry@gmail.com> Dmitry Vorobev <dimahabr@gmail.com> Dolph Mathews <dolph.mathews@gmail.com> Dominic Tubach <dominic.tubach@to.com> Dominic Yin <yindongchao@inspur.com> Dominik Dingel <dingel@linux.vnet.ibm.com> Dominik Finkbeiner <finkes93@gmail.com> Dominik Honnef <dominik@honnef.co> Don Kirkby <donkirkby@users.noreply.github.com> Don Kjer <don.kjer@gmail.com> Don Spaulding <donspauldingii@gmail.com> Donald Huang <don.hcd@gmail.com> Dong Chen <dongluo.chen@docker.com> Donghwa Kim <shanytt@gmail.com> Donovan Jones <git@gamma.net.nz> Doron Podoleanu <doronp@il.ibm.com> Doug Davis <dug@us.ibm.com> Doug MacEachern <dougm@vmware.com> Doug Tangren <d.tangren@gmail.com> Douglas Curtis <dougcurtis1@gmail.com> Dr Nic Williams <drnicwilliams@gmail.com> dragon788 <dragon788@users.noreply.github.com> Draลพen Luฤanin <kermit666@gmail.com> Drew Erny <drew.erny@docker.com> Drew Hubl <drew.hubl@gmail.com> Dustin Sallings <dustin@spy.net> Ed Costello <epc@epcostello.com> Edmund Wagner <edmund-wagner@web.de> Eiichi Tsukata <devel@etsukata.com> Eike Herzbach <eike@herzbach.net> Eivin Giske Skaaren <eivinsn@axis.com> Eivind Uggedal <eivind@uggedal.com> Elan Ruusamรคe <glen@pld-linux.org> Elango Sivanandam <elango.siva@docker.com> Elena Morozova <lelenanam@gmail.com> Eli Uriegas <eli.uriegas@docker.com> Elias Faxรถ <elias.faxo@tre.se> Elias Probst <mail@eliasprobst.eu> Elijah Zupancic <elijah@zupancic.name> eluck <mail@eluck.me> Elvir Kuric <elvirkuric@gmail.com> Emil Davtyan <emil2k@gmail.com> Emil Hernvall <emil@quench.at> Emily Maier <emily@emilymaier.net> Emily Rose <emily@contactvibe.com> Emir Ozer <emirozer@yandex.com> Enguerran <engcolson@gmail.com> Eohyung Lee <liquidnuker@gmail.com> epeterso <epeterson@breakpoint-labs.com> Eric Barch <barch@tomesoftware.com> Eric Curtin <ericcurtin17@gmail.com> Eric G. Noriega <enoriega@vizuri.com> Eric Hanchrow <ehanchrow@ine.com> Eric Lee <thenorthsecedes@gmail.com> Eric Myhre <hash@exultant.us> Eric Paris <eparis@redhat.com> Eric Rafaloff <erafaloff@gmail.com> Eric Rosenberg <ehaydenr@gmail.com> Eric Sage <eric.david.sage@gmail.com> Eric Soderstrom <ericsoderstrom@gmail.com> Eric Yang <windfarer@gmail.com> Eric-Olivier Lamey <eo@lamey.me> Erica Windisch <erica@windisch.us> Erik Bray <erik.m.bray@gmail.com> Erik Dubbelboer <erik@dubbelboer.com> Erik Hollensbe <github@hollensbe.org> Erik Inge Bolsรธ <knan@redpill-linpro.com> Erik Kristensen <erik@erikkristensen.com> Erik St. Martin <alakriti@gmail.com> Erik Weathers <erikdw@gmail.com> Erno Hopearuoho <erno.hopearuoho@gmail.com> Erwin van der Koogh <info@erronis.nl> Ethan Bell <ebgamer29@gmail.com> Ethan Mosbaugh <ethan@replicated.com> Euan Kemp <euan.kemp@coreos.com> Eugen Krizo <eugen.krizo@gmail.com> Eugene Yakubovich <eugene.yakubovich@coreos.com> Evan Allrich <evan@unguku.com> Evan Carmi <carmi@users.noreply.github.com> Evan Hazlett <ejhazlett@gmail.com> Evan Krall <krall@yelp.com> Evan Phoenix <evan@fallingsnow.net> Evan Wies <evan@neomantra.net> Evelyn Xu <evelynhsu21@gmail.com> Everett Toews <everett.toews@rackspace.com> Evgeny Shmarnev <shmarnev@gmail.com> Evgeny Vereshchagin <evvers@ya.ru> Ewa Czechowska <ewa@ai-traders.com> Eystein Mรฅlรธy Stenberg <eystein.maloy.stenberg@cfengine.com> ezbercih <cem.ezberci@gmail.com> Ezra Silvera <ezra@il.ibm.com> Fabian Kramm <kramm@covexo.com> Fabian Lauer <kontakt@softwareschmiede-saar.de> Fabian Raetz <fabian.raetz@gmail.com> Fabiano Rosas <farosas@br.ibm.com> Fabio Falci <fabiofalci@gmail.com> Fabio Kung <fabio.kung@gmail.com> Fabio Rapposelli <fabio@vmware.com> Fabio Rehm <fgrehm@gmail.com> Fabrizio Regini <freegenie@gmail.com> Fabrizio Soppelsa <fsoppelsa@mirantis.com> Faiz Khan <faizkhan00@gmail.com> falmp <chico.lopes@gmail.com> Fangming Fang <fangming.fang@arm.com> Fangyuan Gao <21551127@zju.edu.cn> fanjiyun <fan.jiyun@zte.com.cn> Fareed Dudhia <fareeddudhia@googlemail.com> Fathi Boudra <fathi.boudra@linaro.org> Federico Gimenez <fgimenez@coit.es> Felipe Oliveira <felipeweb.programador@gmail.com> Felipe Ruhland <felipe.ruhland@gmail.com> Felix Abecassis <fabecassis@nvidia.com> Felix Geisendoฬˆrfer <felix@debuggable.com> Felix Hupfeld <felix@quobyte.com> Felix Rabe <felix@rabe.io> Felix Ruess <felix.ruess@gmail.com> Felix Schindler <fschindler@weluse.de> Feng Yan <fy2462@gmail.com> Fengtu Wang <wangfengtu@huawei.com> Ferenc Szabo <pragmaticfrank@gmail.com> Fernando <fermayo@gmail.com> Fero Volar <alian@alian.info> Ferran Rodenas <frodenas@gmail.com> Filipe Brandenburger <filbranden@google.com> Filipe Oliveira <contato@fmoliveira.com.br> Flavio Castelli <fcastelli@suse.com> Flavio Crisciani <flavio.crisciani@docker.com> Florian <FWirtz@users.noreply.github.com> Florian Klein <florian.klein@free.fr> Florian Maier <marsmensch@users.noreply.github.com> Florian Noeding <noeding@adobe.com> Florian Weingarten <flo@hackvalue.de> Florin Asavoaie <florin.asavoaie@gmail.com> Florin Patan <florinpatan@gmail.com> fonglh <fonglh@gmail.com> Foysal Iqbal <foysal.iqbal.fb@gmail.com> Francesc Campoy <campoy@google.com> Francesco Mari <mari.francesco@gmail.com> Francis Chuang <francis.chuang@boostport.com> Francisco Carriedo <fcarriedo@gmail.com> Francisco Souza <f@souza.cc> Frank Groeneveld <frank@ivaldi.nl> Frank Herrmann <fgh@4gh.tv> Frank Macreery <frank@macreery.com> Frank Rosquin <frank.rosquin+github@gmail.com> frankyang <yyb196@gmail.com> Fred Lifton <fred.lifton@docker.com> Frederick F. Kautz IV <fkautz@redhat.com> Frederik Loeffert <frederik@zitrusmedia.de> Frederik Nordahl Jul Sabroe <frederikns@gmail.com> Freek Kalter <freek@kalteronline.org> Frieder Bluemle <frieder.bluemle@gmail.com> Fu JinLin <withlin@yeah.net> Fรฉlix Baylac-Jacquรฉ <baylac.felix@gmail.com> Fรฉlix Cantournet <felix.cantournet@cloudwatt.com> Gabe Rosenhouse <gabe@missionst.com> Gabor Nagy <mail@aigeruth.hu> Gabriel Linder <linder.gabriel@gmail.com> Gabriel Monroy <gabriel@opdemand.com> Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com> Gaetan de Villele <gdevillele@gmail.com> Galen Sampson <galen.sampson@gmail.com> Gang Qiao <qiaohai8866@gmail.com> Gareth Rushgrove <gareth@morethanseven.net> Garrett Barboza <garrett@garrettbarboza.com> Gary Schaetz <gary@schaetzkc.com> Gaurav <gaurav.gosec@gmail.com> gautam, prasanna <prasannagautam@gmail.com> Gaรซl PORTAY <gael.portay@savoirfairelinux.com> Genki Takiuchi <genki@s21g.com> GennadySpb <lipenkov@gmail.com> Geoffrey Bachelet <grosfrais@gmail.com> Geon Kim <geon0250@gmail.com> George Kontridze <george@bugsnag.com> George MacRorie <gmacr31@gmail.com> George Xie <georgexsh@gmail.com> Georgi Hristozov <georgi@forkbomb.nl> Gereon Frey <gereon.frey@dynport.de> German DZ <germ@ndz.com.ar> Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl> Gerwim Feiken <g.feiken@tfe.nl> Ghislain Bourgeois <ghislain.bourgeois@gmail.com> Giampaolo Mancini <giampaolo@trampolineup.com> Gianluca Borello <g.borello@gmail.com> Gildas Cuisinier <gildas.cuisinier@gcuisinier.net> Giovan Isa Musthofa <giovanism@outlook.co.id> gissehel <public-devgit-dantus@gissehel.org> Giuseppe Mazzotta <gdm85@users.noreply.github.com> Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org> Gleb M Borisov <borisov.gleb@gmail.com> Glyn Normington <gnormington@gopivotal.com> GoBella <caili_welcome@163.com> Goffert van Gool <goffert@phusion.nl> Goldwyn Rodrigues <rgoldwyn@suse.com> Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com> Gosuke Miyashita <gosukenator@gmail.com> Gou Rao <gou@portworx.com> Govinda Fichtner <govinda.fichtner@googlemail.com> Grant Millar <grant@cylo.io> Grant Reaber <grant.reaber@gmail.com> Graydon Hoare <graydon@pobox.com> Greg Fausak <greg@tacodata.com> Greg Pflaum <gpflaum@users.noreply.github.com> Greg Stephens <greg@udon.org> Greg Thornton <xdissent@me.com> Grzegorz Jaล›kiewicz <gj.jaskiewicz@gmail.com> Guilhem Lettron <guilhem+github@lettron.fr> Guilherme Salgado <gsalgado@gmail.com> Guillaume Dufour <gdufour.prestataire@voyages-sncf.com> Guillaume J. Charmes <guillaume.charmes@docker.com> guoxiuyan <guoxiuyan@huawei.com> Guri <odg0318@gmail.com> Gurjeet Singh <gurjeet@singh.im> Guruprasad <lgp171188@gmail.com> Gustav Sinder <gustav.sinder@gmail.com> gwx296173 <gaojing3@huawei.com> Gรผnter Zรถchbauer <guenter@gzoechbauer.com> Haichao Yang <yang.haichao@zte.com.cn> haikuoliu <haikuo@amazon.com> Hakan ร–zler <hakan.ozler@kodcu.com> Hamish Hutchings <moredhel@aoeu.me> Hans Kristian Flaatten <hans@starefossen.com> Hans Rรธdtang <hansrodtang@gmail.com> Hao Shu Wei <haosw@cn.ibm.com> Hao Zhang <21521210@zju.edu.cn> Harald Albers <github@albersweb.de> Harald Niesche <harald@niesche.de> Harley Laue <losinggeneration@gmail.com> Harold Cooper <hrldcpr@gmail.com> Harrison Turton <harrisonturton@gmail.com> Harry Zhang <harryz@hyper.sh> Harshal Patil <harshal.patil@in.ibm.com> Harshal Patil <harshalp@linux.vnet.ibm.com> He Simei <hesimei@zju.edu.cn> He Xiaoxi <tossmilestone@gmail.com> He Xin <he_xinworld@126.com> heartlock <21521209@zju.edu.cn> Hector Castro <hectcastro@gmail.com> Helen Xie <chenjg@harmonycloud.cn> Henning Sprang <henning.sprang@gmail.com> Hiroshi Hatake <hatake@clear-code.com> Hiroyuki Sasagawa <hs19870702@gmail.com> Hobofan <goisser94@gmail.com> Hollie Teal <hollie@docker.com> Hong Xu <hong@topbug.net> Hongbin Lu <hongbin034@gmail.com> Hongxu Jia <hongxu.jia@windriver.com> hsinko <21551195@zju.edu.cn> Hu Keping <hukeping@huawei.com> Hu Tao <hutao@cn.fujitsu.com> HuanHuan Ye <logindaveye@gmail.com> Huanzhong Zhang <zhanghuanzhong90@gmail.com> Huayi Zhang <irachex@gmail.com> Hugo Duncan <hugo@hugoduncan.org> Hugo Marisco <0x6875676f@gmail.com> Hunter Blanks <hunter@twilio.com> huqun <huqun@zju.edu.cn> Huu Nguyen <huu@prismskylabs.com> hyeongkyu.lee <hyeongkyu.lee@navercorp.com> Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com> Iago Lรณpez Galeiras <iago@kinvolk.io> Ian Babrou <ibobrik@gmail.com> Ian Bishop <ianbishop@pace7.com> Ian Bull <irbull@gmail.com> Ian Calvert <ianjcalvert@gmail.com> Ian Campbell <ian.campbell@docker.com> Ian Chen <ianre657@gmail.com> Ian Lee <IanLee1521@gmail.com> Ian Main <imain@redhat.com> Ian Philpot <ian.philpot@microsoft.com> Ian Truslove <ian.truslove@gmail.com> Iavael <iavaelooeyt@gmail.com> Icaro Seara <icaro.seara@gmail.com> Ignacio Capurro <icapurrofagian@gmail.com> Igor Dolzhikov <bluesriverz@gmail.com> Igor Karpovich <i.karpovich@currencysolutions.com> Iliana Weller <iweller@amazon.com> Ilkka Laukkanen <ilkka@ilkka.io> Ilya Dmitrichenko <errordeveloper@gmail.com> Ilya Gusev <mail@igusev.ru> Ilya Khlopotov <ilya.khlopotov@gmail.com> imre Fitos <imre.fitos+github@gmail.com> inglesp <peter.inglesby@gmail.com> Ingo Gottwald <in.gottwald@gmail.com> Innovimax <innovimax@gmail.com> Isaac Dupree <antispam@idupree.com> Isabel Jimenez <contact.isabeljimenez@gmail.com> Isao Jonas <isao.jonas@gmail.com> Iskander Sharipov <quasilyte@gmail.com> Ivan Babrou <ibobrik@gmail.com> Ivan Fraixedes <ifcdev@gmail.com> Ivan Grcic <igrcic@gmail.com> Ivan Markin <sw@nogoegst.net> J Bruni <joaohbruni@yahoo.com.br> J. Nunn <jbnunn@gmail.com> Jack Danger Canty <jackdanger@squareup.com> Jack Laxson <jackjrabbit@gmail.com> Jacob Atzen <jacob@jacobatzen.dk> Jacob Edelman <edelman.jd@gmail.com> Jacob Tomlinson <jacob@tom.linson.uk> Jacob Vallejo <jakeev@amazon.com> Jacob Wen <jian.w.wen@oracle.com> Jaivish Kothari <janonymous.codevulture@gmail.com> Jake Champlin <jake.champlin.27@gmail.com> Jake Moshenko <jake@devtable.com> Jake Sanders <jsand@google.com> jakedt <jake@devtable.com> James Allen <jamesallen0108@gmail.com> James Carey <jecarey@us.ibm.com> James Carr <james.r.carr@gmail.com> James DeFelice <james.defelice@ishisystems.com> James Harrison Fisher <jameshfisher@gmail.com> James Kyburz <james.kyburz@gmail.com> James Kyle <james@jameskyle.org> James Lal <james@lightsofapollo.com> James Mills <prologic@shortcircuit.net.au> James Nesbitt <james.nesbitt@wunderkraut.com> James Nugent <james@jen20.com> James Turnbull <james@lovedthanlost.net> James Watkins-Harvey <jwatkins@progi-media.com> Jamie Hannaford <jamie@limetree.org> Jamshid Afshar <jafshar@yahoo.com> Jan Keromnes <janx@linux.com> Jan Koprowski <jan.koprowski@gmail.com> Jan Pazdziora <jpazdziora@redhat.com> Jan Toebes <jan@toebes.info> Jan-Gerd Tenberge <janten@gmail.com> Jan-Jaap Driessen <janjaapdriessen@gmail.com> Jana Radhakrishnan <mrjana@docker.com> Jannick Fahlbusch <git@jf-projects.de> Januar Wayong <januar@gmail.com> Jared Biel <jared.biel@bolderthinking.com> Jared Hocutt <jaredh@netapp.com> Jaroslaw Zabiello <hipertracker@gmail.com> jaseg <jaseg@jaseg.net> Jasmine Hegman <jasmine@jhegman.com> Jason Divock <jdivock@gmail.com> Jason Giedymin <jasong@apache.org> Jason Green <Jason.Green@AverInformatics.Com> Jason Hall <imjasonh@gmail.com> Jason Heiss <jheiss@aput.net> Jason Livesay <ithkuil@gmail.com> Jason McVetta <jason.mcvetta@gmail.com> Jason Plum <jplum@devonit.com> Jason Shepherd <jason@jasonshepherd.net> Jason Smith <jasonrichardsmith@gmail.com> Jason Sommer <jsdirv@gmail.com> Jason Stangroome <jason@codeassassin.com> jaxgeller <jacksongeller@gmail.com> Jay <imjching@hotmail.com> Jay <teguhwpurwanto@gmail.com> Jay Kamat <github@jgkamat.33mail.com> Jean Rouge <rougej+github@gmail.com> Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com> Jean-Baptiste Dalido <jeanbaptiste@appgratis.com> Jean-Christophe Berthon <huygens@berthon.eu> Jean-Paul Calderone <exarkun@twistedmatrix.com> Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr> Jean-Tiare Le Bigot <jt@yadutaf.fr> Jeeva S. Chelladhurai <sjeeva@gmail.com> Jeff Anderson <jeff@docker.com> Jeff Hajewski <jeff.hajewski@gmail.com> Jeff Johnston <jeff.johnston.mn@gmail.com> Jeff Lindsay <progrium@gmail.com> Jeff Mickey <j@codemac.net> Jeff Minard <jeff@creditkarma.com> Jeff Nickoloff <jeff.nickoloff@gmail.com> Jeff Silberman <jsilberm@gmail.com> Jeff Welch <whatthejeff@gmail.com> Jeffrey Bolle <jeffreybolle@gmail.com> Jeffrey Morgan <jmorganca@gmail.com> Jeffrey van Gogh <jvg@google.com> Jenny Gebske <jennifer@gebske.de> Jeremy Chambers <jeremy@thehipbot.com> Jeremy Grosser <jeremy@synack.me> Jeremy Price <jprice.rhit@gmail.com> Jeremy Qian <vanpire110@163.com> Jeremy Unruh <jeremybunruh@gmail.com> Jeremy Yallop <yallop@docker.com> Jeroen Franse <jeroenfranse@gmail.com> Jeroen Jacobs <github@jeroenj.be> Jesse Dearing <jesse.dearing@gmail.com> Jesse Dubay <jesse@thefortytwo.net> Jessica Frazelle <acidburn@microsoft.com> Jezeniel Zapanta <jpzapanta22@gmail.com> Jhon Honce <jhonce@redhat.com> Ji.Zhilong <zhilongji@gmail.com> Jian Liao <jliao@alauda.io> Jian Zhang <zhangjian.fnst@cn.fujitsu.com> Jiang Jinyang <jjyruby@gmail.com> Jie Luo <luo612@zju.edu.cn> Jihyun Hwang <jhhwang@telcoware.com> Jilles Oldenbeuving <ojilles@gmail.com> Jim Alateras <jima@comware.com.au> Jim Ehrismann <jim.ehrismann@docker.com> Jim Galasyn <jim.galasyn@docker.com> Jim Minter <jminter@redhat.com> Jim Perrin <jperrin@centos.org> Jimmy Cuadra <jimmy@jimmycuadra.com> Jimmy Puckett <jimmy.puckett@spinen.com> Jimmy Song <rootsongjc@gmail.com> Jinsoo Park <cellpjs@gmail.com> Jintao Zhang <zhangjintao9020@gmail.com> Jiri Appl <jiria@microsoft.com> Jiri Popelka <jpopelka@redhat.com> Jiuyue Ma <majiuyue@huawei.com> Jiล™รญ ลฝupka <jzupka@redhat.com> Joao Fernandes <joao.fernandes@docker.com> Joao Trindade <trindade.joao@gmail.com> Joe Beda <joe.github@bedafamily.com> Joe Doliner <jdoliner@pachyderm.io> Joe Ferguson <joe@infosiftr.com> Joe Gordon <joe.gordon0@gmail.com> Joe Shaw <joe@joeshaw.org> Joe Van Dyk <joe@tanga.com> Joel Friedly <joelfriedly@gmail.com> Joel Handwell <joelhandwell@gmail.com> Joel Hansson <joel.hansson@ecraft.com> Joel Wurtz <jwurtz@jolicode.com> Joey Geiger <jgeiger@gmail.com> Joey Geiger <jgeiger@users.noreply.github.com> Joey Gibson <joey@joeygibson.com> Joffrey F <joffrey@docker.com> Johan Euphrosine <proppy@google.com> Johan Rydberg <johan.rydberg@gmail.com> Johanan Lieberman <johanan.lieberman@gmail.com> Johannes 'fish' Ziemke <github@freigeist.org> John Costa <john.costa@gmail.com> John Feminella <jxf@jxf.me> John Gardiner Myers <jgmyers@proofpoint.com> John Gossman <johngos@microsoft.com> John Harris <john@johnharris.io> John Howard <github@lowenna.com> John Laswell <john.n.laswell@gmail.com> John Maguire <jmaguire@duosecurity.com> John Mulhausen <john@docker.com> John OBrien III <jobrieniii@yahoo.com> John Starks <jostarks@microsoft.com> John Stephens <johnstep@docker.com> John Tims <john.k.tims@gmail.com> John V. Martinez <jvmatl@gmail.com> John Warwick <jwarwick@gmail.com> John Willis <john.willis@docker.com> Jon Johnson <jonjohnson@google.com> Jon Surrell <jon.surrell@gmail.com> Jon Wedaman <jweede@gmail.com> Jonas Dohse <jonas@dohse.ch> Jonas Pfenniger <jonas@pfenniger.name> Jonathan A. Schweder <jonathanschweder@gmail.com> Jonathan A. Sternberg <jonathansternberg@gmail.com> Jonathan Boulle <jonathanboulle@gmail.com> Jonathan Camp <jonathan@irondojo.com> Jonathan Choy <jonathan.j.choy@gmail.com> Jonathan Dowland <jon+github@alcopop.org> Jonathan Lebon <jlebon@redhat.com> Jonathan Lomas <jonathan@floatinglomas.ca> Jonathan McCrohan <jmccrohan@gmail.com> Jonathan Mueller <j.mueller@apoveda.ch> Jonathan Pares <jonathanpa@users.noreply.github.com> Jonathan Rudenberg <jonathan@titanous.com> Jonathan Stoppani <jonathan.stoppani@divio.com> Jonh Wendell <jonh.wendell@redhat.com> Joni Sar <yoni@cocycles.com> Joost Cassee <joost@cassee.net> Jordan Arentsen <blissdev@gmail.com> Jordan Jennings <jjn2009@gmail.com> Jordan Sissel <jls@semicomplete.com> Jorge Marin <chipironcin@users.noreply.github.com> Jorit Kleine-Mรถllhoff <joppich@bricknet.de> Jose Diaz-Gonzalez <email@josediazgonzalez.com> Joseph Anthony Pasquale Holsten <joseph@josephholsten.com> Joseph Hager <ajhager@gmail.com> Joseph Kern <jkern@semafour.net> Joseph Rothrock <rothrock@rothrock.org> Josh <jokajak@gmail.com> Josh Bodah <jb3689@yahoo.com> Josh Bonczkowski <josh.bonczkowski@gmail.com> Josh Chorlton <jchorlton@gmail.com> Josh Eveleth <joshe@opendns.com> Josh Hawn <josh.hawn@docker.com> Josh Horwitz <horwitz@addthis.com> Josh Poimboeuf <jpoimboe@redhat.com> Josh Soref <jsoref@gmail.com> Josh Wilson <josh.wilson@fivestars.com> Josiah Kiehl <jkiehl@riotgames.com> Josรฉ Tomรกs Albornoz <jojo@eljojo.net> Joyce Jang <mail@joycejang.com> JP <jpellerin@leapfrogonline.com> Julian Taylor <jtaylor.debian@googlemail.com> Julien Barbier <write0@gmail.com> Julien Bisconti <veggiemonk@users.noreply.github.com> Julien Bordellier <julienbordellier@gmail.com> Julien Dubois <julien.dubois@gmail.com> Julien Kassar <github@kassisol.com> Julien Maitrehenry <julien.maitrehenry@me.com> Julien Pervillรฉ <julien.perville@perfect-memory.com> Julio Montes <imc.coder@gmail.com> Jun-Ru Chang <jrjang@gmail.com> Jussi Nummelin <jussi.nummelin@gmail.com> Justas Brazauskas <brazauskasjustas@gmail.com> Justen Martin <jmart@the-coder.com> Justin Cormack <justin.cormack@docker.com> Justin Force <justin.force@gmail.com> Justin Menga <justin.menga@gmail.com> Justin Plock <jplock@users.noreply.github.com> Justin Simonelis <justin.p.simonelis@gmail.com> Justin Terry <juterry@microsoft.com> Justyn Temme <justyntemme@gmail.com> Jyrki Puttonen <jyrkiput@gmail.com> Jรฉrรฉmy Leherpeur <amenophis@leherpeur.net> Jรฉrรดme Petazzoni <jerome.petazzoni@docker.com> Jรถrg Thalheim <joerg@higgsboson.tk> K. Heller <pestophagous@gmail.com> Kai Blin <kai@samba.org> Kai Qiang Wu (Kennan) <wkq5325@gmail.com> Kamil Domaล„ski <kamil@domanski.co> Kamjar Gerami <kami.gerami@gmail.com> Kanstantsin Shautsou <kanstantsin.sha@gmail.com> Kara Alexandra <kalexandra@us.ibm.com> Karan Lyons <karan@karanlyons.com> Kareem Khazem <karkhaz@karkhaz.com> kargakis <kargakis@users.noreply.github.com> Karl Grzeszczak <karlgrz@gmail.com> Karol Duleba <mr.fuxi@gmail.com> Karthik Karanth <karanth.karthik@gmail.com> Karthik Nayak <karthik.188@gmail.com> Kasper Fabรฆch Brandt <poizan@poizan.dk> Kate Heddleston <kate.heddleston@gmail.com> Katie McLaughlin <katie@glasnt.com> Kato Kazuyoshi <kato.kazuyoshi@gmail.com> Katrina Owen <katrina.owen@gmail.com> Kawsar Saiyeed <kawsar.saiyeed@projiris.com> Kay Yan <kay.yan@daocloud.io> kayrus <kay.diam@gmail.com> Kazuhiro Sera <seratch@gmail.com> Ke Li <kel@splunk.com> Ke Xu <leonhartx.k@gmail.com> Kei Ohmura <ohmura.kei@gmail.com> Keith Hudgins <greenman@greenman.org> Keli Hu <dev@keli.hu> Ken Cochrane <kencochrane@gmail.com> Ken Herner <kherner@progress.com> Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com> Ken Reese <krrgithub@gmail.com> Kenfe-Mickaรซl Laventure <mickael.laventure@gmail.com> Kenjiro Nakayama <nakayamakenjiro@gmail.com> Kent Johnson <kentoj@gmail.com> Kenta Tada <Kenta.Tada@sony.com> Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com> Kevin Burke <kev@inburke.com> Kevin Clark <kevin.clark@gmail.com> Kevin Feyrer <kevin.feyrer@btinternet.com> Kevin J. Lynagh <kevin@keminglabs.com> Kevin Jing Qiu <kevin@idempotent.ca> Kevin Kern <kaiwentan@harmonycloud.cn> Kevin Menard <kevin@nirvdrum.com> Kevin Meredith <kevin.m.meredith@gmail.com> Kevin P. Kucharczyk <kevinkucharczyk@gmail.com> Kevin Parsons <kevpar@microsoft.com> Kevin Richardson <kevin@kevinrichardson.co> Kevin Shi <kshi@andrew.cmu.edu> Kevin Wallace <kevin@pentabarf.net> Kevin Yap <me@kevinyap.ca> Keyvan Fatehi <keyvanfatehi@gmail.com> kies <lleelm@gmail.com> Kim BKC Carlbacker <kim.carlbacker@gmail.com> Kim Eik <kim@heldig.org> Kimbro Staken <kstaken@kstaken.com> Kir Kolyshkin <kolyshkin@gmail.com> Kiran Gangadharan <kiran.daredevil@gmail.com> Kirill SIbirev <l0kix2@gmail.com> knappe <tyler.knappe@gmail.com> Kohei Tsuruta <coheyxyz@gmail.com> Koichi Shiraishi <k@zchee.io> Konrad Kleine <konrad.wilhelm.kleine@gmail.com> Konstantin Gribov <grossws@gmail.com> Konstantin L <sw.double@gmail.com> Konstantin Pelykh <kpelykh@zettaset.com> Krasi Georgiev <krasi@vip-consult.solutions> Krasimir Georgiev <support@vip-consult.co.uk> Kris-Mikael Krister <krismikael@protonmail.com> Kristian Haugene <kristian.haugene@capgemini.com> Kristina Zabunova <triara.xiii@gmail.com> Krystian Wojcicki <kwojcicki@sympatico.ca> Kun Zhang <zkazure@gmail.com> Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp> Kunal Tyagi <tyagi.kunal@live.com> Kyle Conroy <kyle.j.conroy@gmail.com> Kyle Linden <linden.kyle@gmail.com> Kyle Wuolle <kyle.wuolle@gmail.com> kyu <leehk1227@gmail.com> Lachlan Coote <lcoote@vmware.com> Lai Jiangshan <jiangshanlai@gmail.com> Lajos Papp <lajos.papp@sequenceiq.com> Lakshan Perera <lakshan@laktek.com> Lalatendu Mohanty <lmohanty@redhat.com> Lance Chen <cyen0312@gmail.com> Lance Kinley <lkinley@loyaltymethods.com> Lars Butler <Lars.Butler@gmail.com> Lars Kellogg-Stedman <lars@redhat.com> Lars R. Damerow <lars@pixar.com> Lars-Magnus Skog <ralphtheninja@riseup.net> Laszlo Meszaros <lacienator@gmail.com> Laura Frank <ljfrank@gmail.com> Laurent Erignoux <lerignoux@gmail.com> Laurie Voss <github@seldo.com> Leandro Siqueira <leandro.siqueira@gmail.com> Lee Chao <932819864@qq.com> Lee, Meng-Han <sunrisedm4@gmail.com> leeplay <hyeongkyu.lee@navercorp.com> Lei Gong <lgong@alauda.io> Lei Jitang <leijitang@huawei.com> Len Weincier <len@cloudafrica.net> Lennie <github@consolejunkie.net> Leo Gallucci <elgalu3@gmail.com> Leszek Kowalski <github@leszekkowalski.pl> Levi Blackstone <levi.blackstone@rackspace.com> Levi Gross <levi@levigross.com> Lewis Daly <lewisdaly@me.com> Lewis Marshall <lewis@lmars.net> Lewis Peckover <lew+github@lew.io> Li Yi <denverdino@gmail.com> Liam Macgillavry <liam@kumina.nl> Liana Lo <liana.lixia@gmail.com> Liang Mingqiang <mqliang.zju@gmail.com> Liang-Chi Hsieh <viirya@gmail.com> Liao Qingwei <liaoqingwei@huawei.com> Lifubang <lifubang@acmcoder.com> Lihua Tang <lhtang@alauda.io> Lily Guo <lily.guo@docker.com> limsy <seongyeol37@gmail.com> Lin Lu <doraalin@163.com> LingFaKe <lingfake@huawei.com> Linus Heckemann <lheckemann@twig-world.com> Liran Tal <liran.tal@gmail.com> Liron Levin <liron@twistlock.com> Liu Bo <bo.li.liu@oracle.com> Liu Hua <sdu.liu@huawei.com> liwenqi <vikilwq@zju.edu.cn> lixiaobing10051267 <li.xiaobing1@zte.com.cn> Liz Zhang <lizzha@microsoft.com> LIZAO LI <lzlarryli@gmail.com> Lizzie Dixon <_@lizzie.io> Lloyd Dewolf <foolswisdom@gmail.com> Lokesh Mandvekar <lsm5@fedoraproject.org> longliqiang88 <394564827@qq.com> Lorenz Leutgeb <lorenz.leutgeb@gmail.com> Lorenzo Fontana <fontanalorenz@gmail.com> Lotus Fenn <fenn.lotus@gmail.com> Louis Delossantos <ldelossa.ld@gmail.com> Louis Opter <kalessin@kalessin.fr> Luca Favatella <luca.favatella@erlang-solutions.com> Luca Marturana <lucamarturana@gmail.com> Luca Orlandi <luca.orlandi@gmail.com> Luca-Bogdan Grigorescu <Luca-Bogdan Grigorescu> Lucas Chan <lucas-github@lucaschan.com> Lucas Chi <lucas@teacherspayteachers.com> Lucas Molas <lmolas@fundacionsadosky.org.ar> Lucas Silvestre <lukas.silvestre@gmail.com> Luciano Mores <leslau@gmail.com> Luis Martรญnez de Bartolomรฉ Izquierdo <lmartinez@biicode.com> Luiz Svoboda <luizek@gmail.com> Lukas Heeren <lukas-heeren@hotmail.com> Lukas Waslowski <cr7pt0gr4ph7@gmail.com> lukaspustina <lukas.pustina@centerdevice.com> Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com> Luke Marsden <me@lukemarsden.net> Lyn <energylyn@zju.edu.cn> Lynda O'Leary <lyndaoleary29@gmail.com> lzhfromutsc <lzhfromustc@gmail.com> Lรฉnaรฏc Huard <lhuard@amadeus.com> Ma Mรผller <mueller-ma@users.noreply.github.com> Ma Shimiao <mashimiao.fnst@cn.fujitsu.com> Mabin <bin.ma@huawei.com> Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com> Madhav Puri <madhav.puri@gmail.com> Madhu Venugopal <madhu@socketplane.io> Mageee <fangpuyi@foxmail.com> Mahesh Tiyyagura <tmahesh@gmail.com> malnick <malnick@gmail..com> Malte Janduda <mail@janduda.net> Manfred Touron <m@42.am> Manfred Zabarauskas <manfredas@zabarauskas.com> Manjunath A Kumatagi <mkumatag@in.ibm.com> Mansi Nahar <mmn4185@rit.edu> Manuel Meurer <manuel@krautcomputing.com> Manuel Rรผger <manuel@rueg.eu> Manuel Woelker <github@manuel.woelker.org> mapk0y <mapk0y@gmail.com> Marc Abramowitz <marc@marc-abramowitz.com> Marc Kuo <kuomarc2@gmail.com> Marc Tamsky <mtamsky@gmail.com> Marcel Edmund Franke <marcel.edmund.franke@gmail.com> Marcelo Horacio Fortino <info@fortinux.com> Marcelo Salazar <chelosalazar@gmail.com> Marco Hennings <marco.hennings@freiheit.com> Marcus Cobden <mcobden@cisco.com> Marcus Farkas <toothlessgear@finitebox.com> Marcus Linke <marcus.linke@gmx.de> Marcus Martins <marcus@docker.com> Marcus Ramberg <marcus@nordaaker.com> Marek Goldmann <marek.goldmann@gmail.com> Marian Marinov <mm@yuhu.biz> Marianna Tessel <mtesselh@gmail.com> Mario Loriedo <mario.loriedo@gmail.com> Marius Gundersen <me@mariusgundersen.net> Marius Sturm <marius@graylog.com> Marius Voila <marius.voila@gmail.com> Mark Allen <mrallen1@yahoo.com> Mark Jeromin <mark.jeromin@sysfrog.net> Mark McGranaghan <mmcgrana@gmail.com> Mark McKinstry <mmckinst@umich.edu> Mark Milstein <mark@epiloque.com> Mark Oates <fl0yd@me.com> Mark Parker <godefroi@users.noreply.github.com> Mark West <markewest@gmail.com> Markan Patel <mpatel678@gmail.com> Marko Mikulicic <mmikulicic@gmail.com> Marko Tibold <marko@tibold.nl> Markus Fix <lispmeister@gmail.com> Markus Kortlang <hyp3rdino@googlemail.com> Martijn Dwars <ikben@martijndwars.nl> Martijn van Oosterhout <kleptog@svana.org> Martin Honermeyer <maze@strahlungsfrei.de> Martin Kelly <martin@surround.io> Martin Mosegaard Amdisen <martin.amdisen@praqma.com> Martin Muzatko <martin@happy-css.com> Martin Redmond <redmond.martin@gmail.com> Mary Anthony <mary.anthony@docker.com> Masahito Zembutsu <zembutsu@users.noreply.github.com> Masato Ohba <over.rye@gmail.com> Masayuki Morita <minamijoyo@gmail.com> Mason Malone <mason.malone@gmail.com> Mateusz Sulima <sulima.mateusz@gmail.com> Mathias Monnerville <mathias@monnerville.com> Mathieu Champlon <mathieu.champlon@docker.com> Mathieu Le Marec - Pasquet <kiorky@cryptelium.net> Mathieu Parent <math.parent@gmail.com> Matt Apperson <me@mattapperson.com> Matt Bachmann <bachmann.matt@gmail.com> Matt Bentley <matt.bentley@docker.com> Matt Haggard <haggardii@gmail.com> Matt Hoyle <matt@deployable.co> Matt McCormick <matt.mccormick@kitware.com> Matt Moore <mattmoor@google.com> Matt Richardson <matt@redgumtech.com.au> Matt Rickard <mrick@google.com> Matt Robenolt <matt@ydekproductions.com> Matt Schurenko <matt.schurenko@gmail.com> Matt Williams <mattyw@me.com> Matthew Heon <mheon@redhat.com> Matthew Lapworth <matthewl@bit-shift.net> Matthew Mayer <matthewkmayer@gmail.com> Matthew Mosesohn <raytrac3r@gmail.com> Matthew Mueller <mattmuelle@gmail.com> Matthew Riley <mattdr@google.com> Matthias Klumpp <matthias@tenstral.net> Matthias Kรผhnle <git.nivoc@neverbox.com> Matthias Rampke <mr@soundcloud.com> Matthieu Hauglustaine <matt.hauglustaine@gmail.com> Mattias Jernberg <nostrad@gmail.com> Mauricio Garavaglia <mauricio@medallia.com> mauriyouth <mauriyouth@gmail.com> Max Shytikov <mshytikov@gmail.com> Maxim Fedchyshyn <sevmax@gmail.com> Maxim Ivanov <ivanov.maxim@gmail.com> Maxim Kulkin <mkulkin@mirantis.com> Maxim Treskin <zerthurd@gmail.com> Maxime Petazzoni <max@signalfuse.com> Maximiliano Maccanti <maccanti@amazon.com> Maxwell <csuhp007@gmail.com> Meaglith Ma <genedna@gmail.com> meejah <meejah@meejah.ca> Megan Kostick <mkostick@us.ibm.com> Mehul Kar <mehul.kar@gmail.com> Mei ChunTao <mei.chuntao@zte.com.cn> Mengdi Gao <usrgdd@gmail.com> Mert YazฤฑcฤฑoฤŸlu <merty@users.noreply.github.com> mgniu <mgniu@dataman-inc.com> Micah Zoltu <micah@newrelic.com> Michael A. Smith <michael@smith-li.com> Michael Bridgen <mikeb@squaremobius.net> Michael Brown <michael@netdirect.ca> Michael Chiang <mchiang@docker.com> Michael Crosby <michael@docker.com> Michael Currie <mcurrie@bruceforceresearch.com> Michael Friis <friism@gmail.com> Michael Gorsuch <gorsuch@github.com> Michael Grauer <michael.grauer@kitware.com> Michael Holzheu <holzheu@linux.vnet.ibm.com> Michael Hudson-Doyle <michael.hudson@canonical.com> Michael Huettermann <michael@huettermann.net> Michael Irwin <mikesir87@gmail.com> Michael Kรคufl <docker@c.michael-kaeufl.de> Michael Neale <michael.neale@gmail.com> Michael Nussbaum <michael.nussbaum@getbraintree.com> Michael Prokop <github@michael-prokop.at> Michael Scharf <github@scharf.gr> Michael Spetsiotis <michael_spets@hotmail.com> Michael Stapelberg <michael+gh@stapelberg.de> Michael Steinert <mike.steinert@gmail.com> Michael Thies <michaelthies78@gmail.com> Michael West <mwest@mdsol.com> Michael Zhao <michael.zhao@arm.com> Michal Fojtik <mfojtik@redhat.com> Michal Gebauer <mishak@mishak.net> Michal Jemala <michal.jemala@gmail.com> Michal Minรกล™ <miminar@redhat.com> Michal Wieczorek <wieczorek-michal@wp.pl> Michaรซl Pailloncy <mpapo.dev@gmail.com> Michaล‚ Czeraszkiewicz <czerasz@gmail.com> Michaล‚ Gryko <github@odkurzacz.org> Michiel de Jong <michiel@unhosted.org> Mickaรซl Fortunato <morsi.morsicus@gmail.com> Mickaรซl Remars <mickael@remars.com> Miguel Angel Fernรกndez <elmendalerenda@gmail.com> Miguel Morales <mimoralea@gmail.com> Mihai Borobocea <MihaiBorob@gmail.com> Mihuleacc Sergiu <mihuleac.sergiu@gmail.com> Mike Brown <brownwm@us.ibm.com> Mike Casas <mkcsas0@gmail.com> Mike Chelen <michael.chelen@gmail.com> Mike Danese <mikedanese@google.com> Mike Dillon <mike@embody.org> Mike Dougherty <mike.dougherty@docker.com> Mike Estes <mike.estes@logos.com> Mike Gaffney <mike@uberu.com> Mike Goelzer <mike.goelzer@docker.com> Mike Leone <mleone896@gmail.com> Mike Lundy <mike@fluffypenguin.org> Mike MacCana <mike.maccana@gmail.com> Mike Naberezny <mike@naberezny.com> Mike Snitzer <snitzer@redhat.com> mikelinjie <294893458@qq.com> Mikhail Sobolev <mss@mawhrin.net> Miklos Szegedi <miklos.szegedi@cloudera.com> Milind Chawre <milindchawre@gmail.com> Miloslav Trmaฤ <mitr@redhat.com> mingqing <limingqing@cyou-inc.com> Mingzhen Feng <fmzhen@zju.edu.cn> Misty Stanley-Jones <misty@docker.com> Mitch Capper <mitch.capper@gmail.com> Mizuki Urushida <z11111001011@gmail.com> mlarcher <github@ringabell.org> Mohammad Banikazemi <mb@us.ibm.com> Mohammad Nasirifar <farnasirim@gmail.com> Mohammed Aaqib Ansari <maaquib@gmail.com> Mohit Soni <mosoni@ebay.com> Moorthy RS <rsmoorthy@gmail.com> Morgan Bauer <mbauer@us.ibm.com> Morgante Pell <morgante.pell@morgante.net> Morgy93 <thomas@ulfertsprygoda.de> Morten Siebuhr <sbhr@sbhr.dk> Morton Fox <github@qslw.com> Moysรฉs Borges <moysesb@gmail.com> mrfly <mr.wrfly@gmail.com> Mrunal Patel <mrunalp@gmail.com> Muayyad Alsadi <alsadi@gmail.com> Mustafa Akฤฑn <mustafa91@gmail.com> Muthukumar R <muthur@gmail.com> Mรกximo Cuadros <mcuadros@gmail.com> Mรฉdi-Rรฉmi Hashim <medimatrix@users.noreply.github.com> Nace Oroz <orkica@gmail.com> Nahum Shalman <nshalman@omniti.com> Nakul Pathak <nakulpathak3@hotmail.com> Nalin Dahyabhai <nalin@redhat.com> Nan Monnand Deng <monnand@gmail.com> Naoki Orii <norii@cs.cmu.edu> Natalie Parker <nparker@omnifone.com> Natanael Copa <natanael.copa@docker.com> Natasha Jarus <linuxmercedes@gmail.com> Nate Brennand <nate.brennand@clever.com> Nate Eagleson <nate@nateeag.com> Nate Jones <nate@endot.org> Nathan Hsieh <hsieh.nathan@gmail.com> Nathan Kleyn <nathan@nathankleyn.com> Nathan LeClaire <nathan.leclaire@docker.com> Nathan McCauley <nathan.mccauley@docker.com> Nathan Williams <nathan@teamtreehouse.com> Naveed Jamil <naveed.jamil@tenpearls.com> Neal McBurnett <neal@mcburnett.org> Neil Horman <nhorman@tuxdriver.com> Neil Peterson <neilpeterson@outlook.com> Nelson Chen <crazysim@gmail.com> Neyazul Haque <nuhaque@gmail.com> Nghia Tran <nghia@google.com> Niall O'Higgins <niallo@unworkable.org> Nicholas E. Rabenau <nerab@gmx.at> Nick Adcock <nick.adcock@docker.com> Nick DeCoursin <n.decoursin@foodpanda.com> Nick Irvine <nfirvine@nfirvine.com> Nick Neisen <nwneisen@gmail.com> Nick Parker <nikaios@gmail.com> Nick Payne <nick@kurai.co.uk> Nick Russo <nicholasjamesrusso@gmail.com> Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk> Nick Stinemates <nick@stinemates.org> NickrenREN <yuquan.ren@easystack.cn> Nicola Kabar <nicolaka@gmail.com> Nicolas Borboรซn <ponsfrilus@gmail.com> Nicolas De Loof <nicolas.deloof@gmail.com> Nicolas Dudebout <nicolas.dudebout@gatech.edu> Nicolas Goy <kuon@goyman.com> Nicolas Kaiser <nikai@nikai.net> Nicolas Sterchele <sterchele.nicolas@gmail.com> Nicolas V Castet <nvcastet@us.ibm.com> Nicolรกs Hock Isaza <nhocki@gmail.com> Nigel Poulton <nigelpoulton@hotmail.com> Nik Nyby <nikolas@gnu.org> Nikhil Chawla <chawlanikhil24@gmail.com> NikolaMandic <mn080202@gmail.com> Nikolas Garofil <nikolas.garofil@uantwerpen.be> Nikolay Milovanov <nmil@itransformers.net> Nirmal Mehta <nirmalkmehta@gmail.com> Nishant Totla <nishanttotla@gmail.com> NIWA Hideyuki <niwa.niwa@nifty.ne.jp> Noah Meyerhans <nmeyerha@amazon.com> Noah Treuhaft <noah.treuhaft@docker.com> NobodyOnSE <ich@sektor.selfip.com> noducks <onemannoducks@gmail.com> Nolan Darilek <nolan@thewordnerd.info> Noriki Nakamura <noriki.nakamura@miraclelinux.com> nponeccop <andy.melnikov@gmail.com> Nuutti Kotivuori <naked@iki.fi> nzwsch <hi@nzwsch.com> O.S. Tezer <ostezer@gmail.com> objectified <objectified@gmail.com> Odin Ugedal <odin@ugedal.com> Oguz Bilgic <fisyonet@gmail.com> Oh Jinkyun <tintypemolly@gmail.com> Ohad Schneider <ohadschn@users.noreply.github.com> ohmystack <jun.jiang02@ele.me> Ole Reifschneider <mail@ole-reifschneider.de> Oliver Neal <ItsVeryWindy@users.noreply.github.com> Oliver Reason <oli@overrateddev.co> Olivier Gambier <dmp42@users.noreply.github.com> Olle Jonsson <olle.jonsson@gmail.com> Olli Janatuinen <olli.janatuinen@gmail.com> Olly Pomeroy <oppomeroy@gmail.com> Omri Shiv <Omri.Shiv@teradata.com> Oriol Francรจs <oriolfa@gmail.com> Oskar Niburski <oskarniburski@gmail.com> Otto Kekรคlรคinen <otto@seravo.fi> Ouyang Liduo <oyld0210@163.com> Ovidio Mallo <ovidio.mallo@gmail.com> Panagiotis Moustafellos <pmoust@elastic.co> Paolo G. Giarrusso <p.giarrusso@gmail.com> Pascal <pascalgn@users.noreply.github.com> Pascal Bach <pascal.bach@siemens.com> Pascal Borreli <pascal@borreli.com> Pascal Hartig <phartig@rdrei.net> Patrick Bรถรคnziger <patrick.baenziger@bsi-software.com> Patrick Devine <patrick.devine@docker.com> Patrick Hemmer <patrick.hemmer@gmail.com> Patrick Stapleton <github@gdi2290.com> Patrik Cyvoct <patrik@ptrk.io> pattichen <craftsbear@gmail.com> Paul <paul9869@gmail.com> paul <paul@inkling.com> Paul Annesley <paul@annesley.cc> Paul Bellamy <paul.a.bellamy@gmail.com> Paul Bowsher <pbowsher@globalpersonals.co.uk> Paul Furtado <pfurtado@hubspot.com> Paul Hammond <paul@paulhammond.org> Paul Jimenez <pj@place.org> Paul Kehrer <paul.l.kehrer@gmail.com> Paul Lietar <paul@lietar.net> Paul Liljenberg <liljenberg.paul@gmail.com> Paul Morie <pmorie@gmail.com> Paul Nasrat <pnasrat@gmail.com> Paul Weaver <pauweave@cisco.com> Paulo Ribeiro <paigr.io@gmail.com> Pavel Lobashov <ShockwaveNN@gmail.com> Pavel Matฤ›ja <pavel@verotel.cz> Pavel Pletenev <cpp.create@gmail.com> Pavel Pospisil <pospispa@gmail.com> Pavel Sutyrin <pavel.sutyrin@gmail.com> Pavel Tikhomirov <ptikhomirov@virtuozzo.com> Pavlos Ratis <dastergon@gentoo.org> Pavol Vargovcik <pallly.vargovcik@gmail.com> Pawel Konczalski <mail@konczalski.de> Peeyush Gupta <gpeeyush@linux.vnet.ibm.com> Peggy Li <peggyli.224@gmail.com> Pei Su <sillyousu@gmail.com> Peng Tao <bergwolf@gmail.com> Penghan Wang <ph.wang@daocloud.io> Per Weijnitz <per.weijnitz@gmail.com> perhapszzy@sina.com <perhapszzy@sina.com> Peter Bourgon <peter@bourgon.org> Peter Braden <peterbraden@peterbraden.co.uk> Peter Bรผcker <peter.buecker@pressrelations.de> Peter Choi <phkchoi89@gmail.com> Peter Dave Hello <hsu@peterdavehello.org> Peter Edge <peter.edge@gmail.com> Peter Ericson <pdericson@gmail.com> Peter Esbensen <pkesbensen@gmail.com> Peter Jaffe <pjaffe@nevo.com> Peter Kang <peter@spell.run> Peter Malmgren <ptmalmgren@gmail.com> Peter Salvatore <peter@psftw.com> Peter Volpe <petervo@redhat.com> Peter Waller <p@pwaller.net> Petr ล vihlรญk <svihlik.petr@gmail.com> Phil <underscorephil@gmail.com> Phil Estes <estesp@linux.vnet.ibm.com> Phil Spitler <pspitler@gmail.com> Philip Alexander Etling <paetling@gmail.com> Philip Monroe <phil@philmonroe.com> Philipp Gillรฉ <philipp.gille@gmail.com> Philipp Wahala <philipp.wahala@gmail.com> Philipp Weissensteiner <mail@philippweissensteiner.com> Phillip Alexander <git@phillipalexander.io> phineas <phin@phineas.io> pidster <pid@pidster.com> Piergiuliano Bossi <pgbossi@gmail.com> Pierre <py@poujade.org> Pierre Carrier <pierre@meteor.com> Pierre Dal-Pra <dalpra.pierre@gmail.com> Pierre Wacrenier <pierre.wacrenier@gmail.com> Pierre-Alain RIVIERE <pariviere@ippon.fr> Piotr Bogdan <ppbogdan@gmail.com> pixelistik <pixelistik@users.noreply.github.com> Porjo <porjo38@yahoo.com.au> Poul Kjeldager Sรธrensen <pks@s-innovations.net> Pradeep Chhetri <pradeep@indix.com> Pradip Dhara <pradipd@microsoft.com> Prasanna Gautam <prasannagautam@gmail.com> Pratik Karki <prertik@outlook.com> Prayag Verma <prayag.verma@gmail.com> Priya Wadhwa <priyawadhwa@google.com> Projjol Banerji <probaner23@gmail.com> Przemek Hejman <przemyslaw.hejman@gmail.com> Pure White <daniel48@126.com> pysqz <randomq@126.com> Qiang Huang <h.huangqiang@huawei.com> Qinglan Peng <qinglanpeng@zju.edu.cn> qudongfang <qudongfang@gmail.com> Quentin Brossard <qbrossard@gmail.com> Quentin Perez <qperez@ocs.online.net> Quentin Tayssier <qtayssier@gmail.com> r0n22 <cameron.regan@gmail.com> Radostin Stoyanov <rstoyanov1@gmail.com> Rafal Jeczalik <rjeczalik@gmail.com> Rafe Colton <rafael.colton@gmail.com> Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Raghuram Devarakonda <draghuram@gmail.com> Raja Sami <raja.sami@tenpearls.com> Rajat Pandit <rp@rajatpandit.com> Rajdeep Dua <dua_rajdeep@yahoo.com> Ralf Sippl <ralf.sippl@gmail.com> Ralle <spam@rasmusa.net> Ralph Bean <rbean@redhat.com> Ramkumar Ramachandra <artagnon@gmail.com> Ramon Brooker <rbrooker@aetherealmind.com> Ramon van Alteren <ramon@vanalteren.nl> RaviTeja Pothana <ravi-teja@live.com> Ray Tsang <rayt@google.com> ReadmeCritic <frankensteinbot@gmail.com> Recursive Madman <recursive.madman@gmx.de> Reficul <xuzhenglun@gmail.com> Regan McCooey <rmccooey27@aol.com> Remi Rampin <remirampin@gmail.com> Remy Suen <remy.suen@gmail.com> Renato Riccieri Santos Zannon <renato.riccieri@gmail.com> Renaud Gaubert <rgaubert@nvidia.com> Rhys Hiltner <rhys@twitch.tv> Ri Xu <xuri.me@gmail.com> Ricardo N Feliciano <FelicianoTech@gmail.com> Rich Moyse <rich@moyse.us> Rich Seymour <rseymour@gmail.com> Richard <richard.scothern@gmail.com> Richard Burnison <rburnison@ebay.com> Richard Harvey <richard@squarecows.com> Richard Mathie <richard.mathie@amey.co.uk> Richard Metzler <richard@paadee.com> Richard Scothern <richard.scothern@gmail.com> Richo Healey <richo@psych0tik.net> Rick Bradley <rick@users.noreply.github.com> Rick van de Loo <rickvandeloo@gmail.com> Rick Wieman <git@rickw.nl> Rik Nijessen <rik@keefo.nl> Riku Voipio <riku.voipio@linaro.org> Riley Guerin <rileytg.dev@gmail.com> Ritesh H Shukla <sritesh@vmware.com> Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com> Rob Gulewich <rgulewich@netflix.com> Rob Vesse <rvesse@dotnetrdf.org> Robert Bachmann <rb@robertbachmann.at> Robert Bittle <guywithnose@gmail.com> Robert Obryk <robryk@gmail.com> Robert Schneider <mail@shakeme.info> Robert Stern <lexandro2000@gmail.com> Robert Terhaar <rterhaar@atlanticdynamic.com> Robert Wallis <smilingrob@gmail.com> Robert Wang <robert@arctic.tw> Roberto G. Hashioka <roberto.hashioka@docker.com> Roberto Muรฑoz Fernรกndez <robertomf@gmail.com> Robin Naundorf <r.naundorf@fh-muenster.de> Robin Schneider <ypid@riseup.net> Robin Speekenbrink <robin@kingsquare.nl> Robin Thoni <robin@rthoni.com> robpc <rpcann@gmail.com> Rodolfo Carvalho <rhcarvalho@gmail.com> Rodrigo Vaz <rodrigo.vaz@gmail.com> Roel Van Nyen <roel.vannyen@gmail.com> Roger Peppe <rogpeppe@gmail.com> Rohit Jnagal <jnagal@google.com> Rohit Kadam <rohit.d.kadam@gmail.com> Rohit Kapur <rkapur@flatiron.com> Rojin George <rojingeorge@huawei.com> Roland HuรŸ <roland@jolokia.org> Roland Kammerer <roland.kammerer@linbit.com> Roland Moriz <rmoriz@users.noreply.github.com> Roma Sokolov <sokolov.r.v@gmail.com> Roman Dudin <katrmr@gmail.com> Roman Strashkin <roman.strashkin@gmail.com> Ron Smits <ron.smits@gmail.com> Ron Williams <ron.a.williams@gmail.com> Rong Gao <gaoronggood@163.com> Rong Zhang <rongzhang@alauda.io> Rongxiang Song <tinysong1226@gmail.com> root <docker-dummy@example.com> root <root@lxdebmas.marist.edu> root <root@ubuntu-14.04-amd64-vbox> root <root@webm215.cluster016.ha.ovh.net> Rory Hunter <roryhunter2@gmail.com> Rory McCune <raesene@gmail.com> Ross Boucher <rboucher@gmail.com> Rovanion Luckey <rovanion.luckey@gmail.com> Royce Remer <royceremer@gmail.com> Rozhnov Alexandr <nox73@ya.ru> Rudolph Gottesheim <r.gottesheim@loot.at> Rui Cao <ruicao@alauda.io> Rui Lopes <rgl@ruilopes.com> Ruilin Li <liruilin4@huawei.com> Runshen Zhu <runshen.zhu@gmail.com> Russ Magee <rmagee@gmail.com> Ryan Abrams <rdabrams@gmail.com> Ryan Anderson <anderson.ryanc@gmail.com> Ryan Aslett <github@mixologic.com> Ryan Belgrave <rmb1993@gmail.com> Ryan Detzel <ryan.detzel@gmail.com> Ryan Fowler <rwfowler@gmail.com> Ryan Liu <ryanlyy@me.com> Ryan McLaughlin <rmclaughlin@insidesales.com> Ryan O'Donnell <odonnellryanc@gmail.com> Ryan Seto <ryanseto@yak.net> Ryan Simmen <ryan.simmen@gmail.com> Ryan Stelly <ryan.stelly@live.com> Ryan Thomas <rthomas@atlassian.com> Ryan Trauntvein <rtrauntvein@novacoast.com> Ryan Wallner <ryan.wallner@clusterhq.com> Ryan Zhang <ryan.zhang@docker.com> ryancooper7 <ryan.cooper7@gmail.com> RyanDeng <sheldon.d1018@gmail.com> Ryo Nakao <nakabonne@gmail.com> Rรฉmy Greinhofer <remy.greinhofer@livelovely.com> s. rannou <mxs@sbrk.org> s00318865 <sunyuan3@huawei.com> Sabin Basyal <sabin.basyal@gmail.com> Sachin Joshi <sachin_jayant_joshi@hotmail.com> Sagar Hani <sagarhani33@gmail.com> Sainath Grandhi <sainath.grandhi@intel.com> Sakeven Jiang <jc5930@sina.cn> Salahuddin Khan <salah@docker.com> Sally O'Malley <somalley@redhat.com> Sam Abed <sam.abed@gmail.com> Sam Alba <sam.alba@gmail.com> Sam Bailey <cyprix@cyprix.com.au> Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk> Sam Neirinck <sam@samneirinck.com> Sam Reis <sreis@atlassian.com> Sam Rijs <srijs@airpost.net> Sam Whited <sam@samwhited.com> Sambuddha Basu <sambuddhabasu1@gmail.com> Sami Wagiaalla <swagiaal@redhat.com> Samuel Andaya <samuel@andaya.net> Samuel Dion-Girardeau <samuel.diongirardeau@gmail.com> Samuel Karp <skarp@amazon.com> Samuel PHAN <samuel-phan@users.noreply.github.com> Sandeep Bansal <sabansal@microsoft.com> Sankar เฎšเฎ™เฏเฎ•เฎฐเฏ <sankar.curiosity@gmail.com> Sanket Saurav <sanketsaurav@gmail.com> Santhosh Manohar <santhosh@docker.com> sapphiredev <se.imas.kr@gmail.com> Sargun Dhillon <sargun@netflix.com> Sascha Andres <sascha.andres@outlook.com> Sascha Grunert <sgrunert@suse.com> SataQiu <qiushida@beyondcent.com> Satnam Singh <satnam@raintown.org> Satoshi Amemiya <satoshi_amemiya@voyagegroup.com> Satoshi Tagomori <tagomoris@gmail.com> Scott Bessler <scottbessler@gmail.com> Scott Collier <emailscottcollier@gmail.com> Scott Johnston <scott@docker.com> Scott Stamp <scottstamp851@gmail.com> Scott Walls <sawalls@umich.edu> sdreyesg <sdreyesg@gmail.com> Sean Christopherson <sean.j.christopherson@intel.com> Sean Cronin <seancron@gmail.com> Sean Lee <seanlee@tw.ibm.com> Sean McIntyre <s.mcintyre@xverba.ca> Sean OMeara <sean@chef.io> Sean P. Kane <skane@newrelic.com> Sean Rodman <srodman7689@gmail.com> Sebastiaan van Steenis <mail@superseb.nl> Sebastiaan van Stijn <github@gone.nl> Senthil Kumar Selvaraj <senthil.thecoder@gmail.com> Senthil Kumaran <senthil@uthcode.com> SeongJae Park <sj38.park@gmail.com> Seongyeol Lim <seongyeol37@gmail.com> Serge Hallyn <serge.hallyn@ubuntu.com> Sergey Alekseev <sergey.alekseev.minsk@gmail.com> Sergey Evstifeev <sergey.evstifeev@gmail.com> Sergii Kabashniuk <skabashnyuk@codenvy.com> Sergio Lopez <slp@redhat.com> Serhat Gรผlรงiรงek <serhat25@gmail.com> SeungUkLee <lsy931106@gmail.com> Sevki Hasirci <s@sevki.org> Shane Canon <scanon@lbl.gov> Shane da Silva <shane@dasilva.io> Shaun Kaasten <shaunk@gmail.com> shaunol <shaunol@gmail.com> Shawn Landden <shawn@churchofgit.com> Shawn Siefkas <shawn.siefkas@meredith.com> shawnhe <shawnhe@shawnhedeMacBook-Pro.local> Shayne Wang <shaynexwang@gmail.com> Shekhar Gulati <shekhargulati84@gmail.com> Sheng Yang <sheng@yasker.org> Shengbo Song <thomassong@tencent.com> Shev Yan <yandong_8212@163.com> Shih-Yuan Lee <fourdollars@gmail.com> Shijiang Wei <mountkin@gmail.com> Shijun Qin <qinshijun16@mails.ucas.ac.cn> Shishir Mahajan <shishir.mahajan@redhat.com> Shoubhik Bose <sbose78@gmail.com> Shourya Sarcar <shourya.sarcar@gmail.com> Shu-Wai Chow <shu-wai.chow@seattlechildrens.org> shuai-z <zs.broccoli@gmail.com> Shukui Yang <yangshukui@huawei.com> Shuwei Hao <haosw@cn.ibm.com> Sian Lerk Lau <kiawin@gmail.com> Sidhartha Mani <sidharthamn@gmail.com> sidharthamani <sid@rancher.com> Silas Sewell <silas@sewell.org> Silvan Jegen <s.jegen@gmail.com> Simaฬƒo Reis <smnrsti@gmail.com> Simei He <hesimei@zju.edu.cn> Simon Barendse <simon.barendse@gmail.com> Simon Eskildsen <sirup@sirupsen.com> Simon Ferquel <simon.ferquel@docker.com> Simon Leinen <simon.leinen@gmail.com> Simon Menke <simon.menke@gmail.com> Simon Taranto <simon.taranto@gmail.com> Simon Vikstrom <pullreq@devsn.se> Sindhu S <sindhus@live.in> Sjoerd Langkemper <sjoerd-github@linuxonly.nl> skanehira <sho19921005@gmail.com> Solganik Alexander <solganik@gmail.com> Solomon Hykes <solomon@docker.com> Song Gao <song@gao.io> Soshi Katsuta <soshi.katsuta@gmail.com> Soulou <leo@unbekandt.eu> Spencer Brown <spencer@spencerbrown.org> Spencer Smith <robertspencersmith@gmail.com> Sridatta Thatipamala <sthatipamala@gmail.com> Sridhar Ratnakumar <sridharr@activestate.com> Srini Brahmaroutu <srbrahma@us.ibm.com> Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com> Staf Wagemakers <staf@wagemakers.be> Stanislav Bondarenko <stanislav.bondarenko@gmail.com> Steeve Morin <steeve.morin@gmail.com> Stefan Berger <stefanb@linux.vnet.ibm.com> Stefan J. Wernli <swernli@microsoft.com> Stefan Praszalowicz <stefan@greplin.com> Stefan S. <tronicum@user.github.com> Stefan Scherer <stefan.scherer@docker.com> Stefan Staudenmeyer <doerte@instana.com> Stefan Weil <sw@weilnetz.de> Stephan Spindler <shutefan@gmail.com> Stephen Benjamin <stephen@redhat.com> Stephen Crosby <stevecrozz@gmail.com> Stephen Day <stephen.day@docker.com> Stephen Drake <stephen@xenolith.net> Stephen Rust <srust@blockbridge.com> Steve Desmond <steve@vtsv.ca> Steve Dougherty <steve@asksteved.com> Steve Durrheimer <s.durrheimer@gmail.com> Steve Francia <steve.francia@gmail.com> Steve Koch <stevekochscience@gmail.com> Steven Burgess <steven.a.burgess@hotmail.com> Steven Erenst <stevenerenst@gmail.com> Steven Hartland <steven.hartland@multiplay.co.uk> Steven Iveson <sjiveson@outlook.com> Steven Merrill <steven.merrill@gmail.com> Steven Richards <steven@axiomzen.co> Steven Taylor <steven.taylor@me.com> Stig Larsson <stig@larsson.dev> Subhajit Ghosh <isubuz.g@gmail.com> Sujith Haridasan <sujith.h@gmail.com> Sun Gengze <690388648@qq.com> Sun Jianbo <wonderflow.sun@gmail.com> Sune Keller <sune.keller@gmail.com> Sunny Gogoi <indiasuny000@gmail.com> Suryakumar Sudar <surya.trunks@gmail.com> Sven Dowideit <SvenDowideit@home.org.au> Swapnil Daingade <swapnil.daingade@gmail.com> Sylvain Baubeau <sbaubeau@redhat.com> Sylvain Bellemare <sylvain@ascribe.io> Sรฉbastien <sebastien@yoozio.com> Sรฉbastien HOUZร‰ <cto@verylastroom.com> Sรฉbastien Luttringer <seblu@seblu.net> Sรฉbastien Stormacq <sebsto@users.noreply.github.com> Tabakhase <mail@tabakhase.com> Tadej Janeลพ <tadej.j@nez.si> TAGOMORI Satoshi <tagomoris@gmail.com> tang0th <tang0th@gmx.com> Tangi Colin <tangicolin@gmail.com> Tatsuki Sugiura <sugi@nemui.org> Tatsushi Inagaki <e29253@jp.ibm.com> Taylan Isikdemir <taylani@google.com> Taylor Jones <monitorjbl@gmail.com> Ted M. Young <tedyoung@gmail.com> Tehmasp Chaudhri <tehmasp@gmail.com> Tejaswini Duggaraju <naduggar@microsoft.com> Tejesh Mehta <tejesh.mehta@gmail.com> terryding77 <550147740@qq.com> tgic <farmer1992@gmail.com> Thatcher Peskens <thatcher@docker.com> theadactyl <thea.lamkin@gmail.com> Thell 'Bo' Fowler <thell@tbfowler.name> Thermionix <bond711@gmail.com> Thijs Terlouw <thijsterlouw@gmail.com> Thomas Bikeev <thomas.bikeev@mac.com> Thomas Frรถssman <thomasf@jossystem.se> Thomas Gazagnaire <thomas@gazagnaire.org> Thomas Grainger <tagrain@gmail.com> Thomas Hansen <thomas.hansen@gmail.com> Thomas Leonard <thomas.leonard@docker.com> Thomas Lรฉveil <thomasleveil@gmail.com> Thomas Orozco <thomas@orozco.fr> Thomas Riccardi <riccardi@systran.fr> Thomas Schroeter <thomas@cliqz.com> Thomas Sjรถgren <konstruktoid@users.noreply.github.com> Thomas Swift <tgs242@gmail.com> Thomas Tanaka <thomas.tanaka@oracle.com> Thomas Texier <sharkone@en-mousse.org> Ti Zhou <tizhou1986@gmail.com> Tianon Gravi <admwiggin@gmail.com> Tianyi Wang <capkurmagati@gmail.com> Tibor Vass <teabee89@gmail.com> Tiffany Jernigan <tiffany.f.j@gmail.com> Tiffany Low <tiffany@box.com> Tim <elatllat@gmail.com> Tim Bart <tim@fewagainstmany.com> Tim Bosse <taim@bosboot.org> Tim Dettrick <t.dettrick@uq.edu.au> Tim Dรผsterhus <tim@bastelstu.be> Tim Hockin <thockin@google.com> Tim Potter <tpot@hpe.com> Tim Ruffles <oi@truffles.me.uk> Tim Smith <timbot@google.com> Tim Terhorst <mynamewastaken+git@gmail.com> Tim Wang <timwangdev@gmail.com> Tim Waugh <twaugh@redhat.com> Tim Wraight <tim.wraight@tangentlabs.co.uk> Tim Zju <21651152@zju.edu.cn> timfeirg <kkcocogogo@gmail.com> Timothy Hobbs <timothyhobbs@seznam.cz> tjwebb123 <tjwebb123@users.noreply.github.com> tobe <tobegit3hub@gmail.com> Tobias Bieniek <Tobias.Bieniek@gmx.de> Tobias Bradtke <webwurst@gmail.com> Tobias Gesellchen <tobias@gesellix.de> Tobias Klauser <tklauser@distanz.ch> Tobias Munk <schmunk@usrbin.de> Tobias Schmidt <ts@soundcloud.com> Tobias Schwab <tobias.schwab@dynport.de> Todd Crane <todd@toddcrane.com> Todd Lunter <tlunter@gmail.com> Todd Whiteman <todd.whiteman@joyent.com> Toli Kuznets <toli@docker.com> Tom Barlow <tomwbarlow@gmail.com> Tom Booth <tombooth@gmail.com> Tom Denham <tom@tomdee.co.uk> Tom Fotherby <tom+github@peopleperhour.com> Tom Howe <tom.howe@enstratius.com> Tom Hulihan <hulihan.tom159@gmail.com> Tom Maaswinkel <tom.maaswinkel@12wiki.eu> Tom Sweeney <tsweeney@redhat.com> Tom Wilkie <tom.wilkie@gmail.com> Tom X. Tobin <tomxtobin@tomxtobin.com> Tomas Tomecek <ttomecek@redhat.com> Tomasz Kopczynski <tomek@kopczynski.net.pl> Tomasz Lipinski <tlipinski@users.noreply.github.com> Tomasz Nurkiewicz <nurkiewicz@gmail.com> Tommaso Visconti <tommaso.visconti@gmail.com> Tomรกลก Hrฤka <thrcka@redhat.com> Tonny Xu <tonny.xu@gmail.com> Tony Abboud <tdabboud@hotmail.com> Tony Daws <tony@daws.ca> Tony Miller <mcfiredrill@gmail.com> toogley <toogley@mailbox.org> Torstein Husebรธ <torstein@huseboe.net> Toฬƒnis Tiigi <tonistiigi@gmail.com> tpng <benny.tpng@gmail.com> tracylihui <793912329@qq.com> Trapier Marshall <trapier.marshall@docker.com> Travis Cline <travis.cline@gmail.com> Travis Thieman <travis.thieman@gmail.com> Trent Ogren <tedwardo2@gmail.com> Trevor <trevinwoodstock@gmail.com> Trevor Pounds <trevor.pounds@gmail.com> Trevor Sullivan <pcgeek86@gmail.com> Trishna Guha <trishnaguha17@gmail.com> Tristan Carel <tristan@cogniteev.com> Troy Denton <trdenton@gmail.com> Tycho Andersen <tycho@docker.com> Tyler Brock <tyler.brock@gmail.com> Tyler Brown <tylers.pile@gmail.com> Tzu-Jung Lee <roylee17@gmail.com> uhayate <uhayate.gong@daocloud.io> Ulysse Carion <ulyssecarion@gmail.com> Umesh Yadav <umesh4257@gmail.com> Utz Bacher <utz.bacher@de.ibm.com> vagrant <vagrant@ubuntu-14.04-amd64-vbox> Vaidas Jablonskis <jablonskis@gmail.com> vanderliang <lansheng@meili-inc.com> Veres Lajos <vlajos@gmail.com> Victor Algaze <valgaze@gmail.com> Victor Coisne <victor.coisne@dotcloud.com> Victor Costan <costan@gmail.com> Victor I. Wood <viw@t2am.com> Victor Lyuboslavsky <victor@victoreda.com> Victor Marmol <vmarmol@google.com> Victor Palma <palma.victor@gmail.com> Victor Vieux <victor.vieux@docker.com> Victoria Bialas <victoria.bialas@docker.com> Vijaya Kumar K <vijayak@caviumnetworks.com> Vikram bir Singh <vikrambir.singh@docker.com> Viktor Stanchev <me@viktorstanchev.com> Viktor Vojnovski <viktor.vojnovski@amadeus.com> VinayRaghavanKS <raghavan.vinay@gmail.com> Vincent Batts <vbatts@redhat.com> Vincent Bernat <Vincent.Bernat@exoscale.ch> Vincent Demeester <vincent.demeester@docker.com> Vincent Giersch <vincent.giersch@ovh.net> Vincent Mayers <vincent.mayers@inbloom.org> Vincent Woo <me@vincentwoo.com> Vinod Kulkarni <vinod.kulkarni@gmail.com> Vishal Doshi <vishal.doshi@gmail.com> Vishnu Kannan <vishnuk@google.com> Vitaly Ostrosablin <vostrosablin@virtuozzo.com> Vitor Monteiro <vmrmonteiro@gmail.com> Vivek Agarwal <me@vivek.im> Vivek Dasgupta <vdasgupt@redhat.com> Vivek Goyal <vgoyal@redhat.com> Vladimir Bulyga <xx@ccxx.cc> Vladimir Kirillov <proger@wilab.org.ua> Vladimir Pouzanov <farcaller@google.com> Vladimir Rutsky <altsysrq@gmail.com> Vladimir Varankin <nek.narqo+git@gmail.com> VladimirAus <v_roudakov@yahoo.com> Vlastimil Zeman <vlastimil.zeman@diffblue.com> Vojtech Vitek (V-Teq) <vvitek@redhat.com> waitingkuo <waitingkuo0527@gmail.com> Walter Leibbrandt <github@wrl.co.za> Walter Stanish <walter@pratyeka.org> Wang Chao <chao.wang@ucloud.cn> Wang Guoliang <liangcszzu@163.com> Wang Jie <wangjie5@chinaskycloud.com> Wang Long <long.wanglong@huawei.com> Wang Ping <present.wp@icloud.com> Wang Xing <hzwangxing@corp.netease.com> Wang Yuexiao <wang.yuexiao@zte.com.cn> Ward Vandewege <ward@jhvc.com> WarheadsSE <max@warheads.net> Wassim Dhif <wassimdhif@gmail.com> Wayne Chang <wayne@neverfear.org> Wayne Song <wsong@docker.com> Weerasak Chongnguluam <singpor@gmail.com> Wei Fu <fuweid89@gmail.com> Wei Wu <wuwei4455@gmail.com> Wei-Ting Kuo <waitingkuo0527@gmail.com> weipeng <weipeng@tuscloud.io> weiyan <weiyan3@huawei.com> Weiyang Zhu <cnresonant@gmail.com> Wen Cheng Ma <wenchma@cn.ibm.com> Wendel Fleming <wfleming@usc.edu> Wenjun Tang <tangwj2@lenovo.com> Wenkai Yin <yinw@vmware.com> Wentao Zhang <zhangwentao234@huawei.com> Wenxuan Zhao <viz@linux.com> Wenyu You <21551128@zju.edu.cn> Wenzhi Liang <wenzhi.liang@gmail.com> Wes Morgan <cap10morgan@gmail.com> Wewang Xiaorenfine <wang.xiaoren@zte.com.cn> Wiktor Kwapisiewicz <wiktor@metacode.biz> Will Dietz <w@wdtz.org> Will Rouesnel <w.rouesnel@gmail.com> Will Weaver <monkey@buildingbananas.com> willhf <willhf@gmail.com> William Delanoue <william.delanoue@gmail.com> William Henry <whenry@redhat.com> William Hubbs <w.d.hubbs@gmail.com> William Martin <wmartin@pivotal.io> William Riancho <wr.wllm@gmail.com> William Thurston <thurstw@amazon.com> WiseTrem <shepelyov.g@gmail.com> Wolfgang Powisch <powo@powo.priv.at> Wonjun Kim <wonjun.kim@navercorp.com> xamyzhao <x.amy.zhao@gmail.com> Xian Chaobo <xianchaobo@huawei.com> Xianglin Gao <xlgao@zju.edu.cn> Xianlu Bird <xianlubird@gmail.com> Xiao YongBiao <xyb4638@gmail.com> XiaoBing Jiang <s7v7nislands@gmail.com> Xiaodong Zhang <a4012017@sina.com> Xiaoxi He <xxhe@alauda.io> Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn> Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn> xichengliudui <1693291525@qq.com> xiekeyang <xiekeyang@huawei.com> Ximo Guanter Gonzรกlbez <joaquin.guantergonzalbez@telefonica.com> Xinbo Weng <xihuanbo_0521@zju.edu.cn> Xinfeng Liu <xinfeng.liu@gmail.com> Xinzi Zhou <imdreamrunner@gmail.com> Xiuming Chen <cc@cxm.cc> Xuecong Liao <satorulogic@gmail.com> xuzhaokui <cynicholas@gmail.com> Yadnyawalkya Tale <ytale@redhat.com> Yahya <ya7yaz@gmail.com> YAMADA Tsuyoshi <tyamada@minimum2scp.org> Yamasaki Masahide <masahide.y@gmail.com> Yan Feng <yanfeng2@huawei.com> Yang Bai <hamo.by@gmail.com> Yang Pengfei <yangpengfei4@huawei.com> yangchenliang <yangchenliang@huawei.com> Yanqiang Miao <miao.yanqiang@zte.com.cn> Yao Zaiyong <yaozaiyong@hotmail.com> Yash Murty <yashmurty@gmail.com> Yassine Tijani <yasstij11@gmail.com> Yasunori Mahata <nori@mahata.net> Yazhong Liu <yorkiefixer@gmail.com> Yestin Sun <sunyi0804@gmail.com> Yi EungJun <eungjun.yi@navercorp.com> Yibai Zhang <xm1994@gmail.com> Yihang Ho <hoyihang5@gmail.com> Ying Li <ying.li@docker.com> Yohei Ueda <yohei@jp.ibm.com> Yong Tang <yong.tang.github@outlook.com> Yongxin Li <yxli@alauda.io> Yongzhi Pan <panyongzhi@gmail.com> Yosef Fertel <yfertel@gmail.com> You-Sheng Yang (ๆฅŠๆœ‰ๅ‹) <vicamo@gmail.com> youcai <omegacoleman@gmail.com> Youcef YEKHLEF <yyekhlef@gmail.com> Yu Changchun <yuchangchun1@huawei.com> Yu Chengxia <yuchengxia@huawei.com> Yu Peng <yu.peng36@zte.com.cn> Yu-Ju Hong <yjhong@google.com> Yuan Sun <sunyuan3@huawei.com> Yuanhong Peng <pengyuanhong@huawei.com> Yue Zhang <zy675793960@yeah.net> Yuhao Fang <fangyuhao@gmail.com> Yuichiro Kaneko <spiketeika@gmail.com> Yunxiang Huang <hyxqshk@vip.qq.com> Yurii Rashkovskii <yrashk@gmail.com> Yusuf Tarฤฑk Gรผnaydฤฑn <yusuf_tarik@hotmail.com> Yves Junqueira <yves.junqueira@gmail.com> Zac Dover <zdover@redhat.com> Zach Borboa <zachborboa@gmail.com> Zachary Jaffee <zjaffee@us.ibm.com> Zain Memon <zain@inzain.net> Zaiste! <oh@zaiste.net> Zane DeGraffenried <zane.deg@gmail.com> Zefan Li <lizefan@huawei.com> Zen Lin(Zhinan Lin) <linzhinan@huawei.com> Zhang Kun <zkazure@gmail.com> Zhang Wei <zhangwei555@huawei.com> Zhang Wentao <zhangwentao234@huawei.com> ZhangHang <stevezhang2014@gmail.com> zhangxianwei <xianwei.zw@alibaba-inc.com> Zhenan Ye <21551168@zju.edu.cn> zhenghenghuo <zhenghenghuo@zju.edu.cn> Zhenhai Gao <gaozh1988@live.com> Zhenkun Bi <bi.zhenkun@zte.com.cn> Zhou Hao <zhouhao@cn.fujitsu.com> Zhoulin Xie <zhoulin.xie@daocloud.io> Zhu Guihua <zhugh.fnst@cn.fujitsu.com> Zhu Kunjia <zhu.kunjia@zte.com.cn> Zhuoyun Wei <wzyboy@wzyboy.org> Ziheng Liu <lzhfromustc@gmail.com> Zilin Du <zilin.du@gmail.com> zimbatm <zimbatm@zimbatm.com> Ziming Dong <bnudzm@foxmail.com> ZJUshuaizhou <21551191@zju.edu.cn> zmarouf <zeid.marouf@gmail.com> Zoltan Tombol <zoltan.tombol@gmail.com> Zou Yu <zouyu7@huawei.com> zqh <zqhxuyuan@gmail.com> Zuhayr Elahi <zuhayr.elahi@docker.com> Zunayed Ali <zunayed@gmail.com> รlex Gonzรกlez <agonzalezro@gmail.com> รlvaro Lรกzaro <alvaro.lazaro.g@gmail.com> รtila Camurรงa Alves <camurca.home@gmail.com> ๅฐนๅ‰ๅณฐ <jifeng.yin@gmail.com> ๅพไฟŠๆฐ <paco.xu@daocloud.io> ๆ…•้™ถ <jihui.xjh@alibaba-inc.com> ๆ้€š <yufeng.pyf@alibaba-inc.com> ้ป„่‰ณ็บข00139573 <huang.yanhong@zte.com.cn>
{ "pile_set_name": "Github" }
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.25.0 // protoc v3.13.0 // source: google/cloud/datacatalog/v1beta1/table_spec.proto package datacatalog import ( reflect "reflect" sync "sync" proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // This is a compile-time assertion that a sufficiently up-to-date version // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 // Table source type. type TableSourceType int32 const ( // Default unknown type. TableSourceType_TABLE_SOURCE_TYPE_UNSPECIFIED TableSourceType = 0 // Table view. TableSourceType_BIGQUERY_VIEW TableSourceType = 2 // BigQuery native table. TableSourceType_BIGQUERY_TABLE TableSourceType = 5 ) // Enum value maps for TableSourceType. var ( TableSourceType_name = map[int32]string{ 0: "TABLE_SOURCE_TYPE_UNSPECIFIED", 2: "BIGQUERY_VIEW", 5: "BIGQUERY_TABLE", } TableSourceType_value = map[string]int32{ "TABLE_SOURCE_TYPE_UNSPECIFIED": 0, "BIGQUERY_VIEW": 2, "BIGQUERY_TABLE": 5, } ) func (x TableSourceType) Enum() *TableSourceType { p := new(TableSourceType) *p = x return p } func (x TableSourceType) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } func (TableSourceType) Descriptor() protoreflect.EnumDescriptor { return file_google_cloud_datacatalog_v1beta1_table_spec_proto_enumTypes[0].Descriptor() } func (TableSourceType) Type() protoreflect.EnumType { return &file_google_cloud_datacatalog_v1beta1_table_spec_proto_enumTypes[0] } func (x TableSourceType) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } // Deprecated: Use TableSourceType.Descriptor instead. func (TableSourceType) EnumDescriptor() ([]byte, []int) { return file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDescGZIP(), []int{0} } // Describes a BigQuery table. type BigQueryTableSpec struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Output only. The table source type. TableSourceType TableSourceType `protobuf:"varint,1,opt,name=table_source_type,json=tableSourceType,proto3,enum=google.cloud.datacatalog.v1beta1.TableSourceType" json:"table_source_type,omitempty"` // Output only. // // Types that are assignable to TypeSpec: // *BigQueryTableSpec_ViewSpec // *BigQueryTableSpec_TableSpec TypeSpec isBigQueryTableSpec_TypeSpec `protobuf_oneof:"type_spec"` } func (x *BigQueryTableSpec) Reset() { *x = BigQueryTableSpec{} if protoimpl.UnsafeEnabled { mi := &file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *BigQueryTableSpec) String() string { return protoimpl.X.MessageStringOf(x) } func (*BigQueryTableSpec) ProtoMessage() {} func (x *BigQueryTableSpec) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use BigQueryTableSpec.ProtoReflect.Descriptor instead. func (*BigQueryTableSpec) Descriptor() ([]byte, []int) { return file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDescGZIP(), []int{0} } func (x *BigQueryTableSpec) GetTableSourceType() TableSourceType { if x != nil { return x.TableSourceType } return TableSourceType_TABLE_SOURCE_TYPE_UNSPECIFIED } func (m *BigQueryTableSpec) GetTypeSpec() isBigQueryTableSpec_TypeSpec { if m != nil { return m.TypeSpec } return nil } func (x *BigQueryTableSpec) GetViewSpec() *ViewSpec { if x, ok := x.GetTypeSpec().(*BigQueryTableSpec_ViewSpec); ok { return x.ViewSpec } return nil } func (x *BigQueryTableSpec) GetTableSpec() *TableSpec { if x, ok := x.GetTypeSpec().(*BigQueryTableSpec_TableSpec); ok { return x.TableSpec } return nil } type isBigQueryTableSpec_TypeSpec interface { isBigQueryTableSpec_TypeSpec() } type BigQueryTableSpec_ViewSpec struct { // Table view specification. This field should only be populated if // `table_source_type` is `BIGQUERY_VIEW`. ViewSpec *ViewSpec `protobuf:"bytes,2,opt,name=view_spec,json=viewSpec,proto3,oneof"` } type BigQueryTableSpec_TableSpec struct { // Spec of a BigQuery table. This field should only be populated if // `table_source_type` is `BIGQUERY_TABLE`. TableSpec *TableSpec `protobuf:"bytes,3,opt,name=table_spec,json=tableSpec,proto3,oneof"` } func (*BigQueryTableSpec_ViewSpec) isBigQueryTableSpec_TypeSpec() {} func (*BigQueryTableSpec_TableSpec) isBigQueryTableSpec_TypeSpec() {} // Table view specification. type ViewSpec struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Output only. The query that defines the table view. ViewQuery string `protobuf:"bytes,1,opt,name=view_query,json=viewQuery,proto3" json:"view_query,omitempty"` } func (x *ViewSpec) Reset() { *x = ViewSpec{} if protoimpl.UnsafeEnabled { mi := &file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ViewSpec) String() string { return protoimpl.X.MessageStringOf(x) } func (*ViewSpec) ProtoMessage() {} func (x *ViewSpec) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ViewSpec.ProtoReflect.Descriptor instead. func (*ViewSpec) Descriptor() ([]byte, []int) { return file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDescGZIP(), []int{1} } func (x *ViewSpec) GetViewQuery() string { if x != nil { return x.ViewQuery } return "" } // Normal BigQuery table spec. type TableSpec struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Output only. If the table is a dated shard, i.e., with name pattern `[prefix]YYYYMMDD`, // `grouped_entry` is the Data Catalog resource name of the date sharded // grouped entry, for example, // `projects/{project_id}/locations/{location}/entrygroups/{entry_group_id}/entries/{entry_id}`. // Otherwise, `grouped_entry` is empty. GroupedEntry string `protobuf:"bytes,1,opt,name=grouped_entry,json=groupedEntry,proto3" json:"grouped_entry,omitempty"` } func (x *TableSpec) Reset() { *x = TableSpec{} if protoimpl.UnsafeEnabled { mi := &file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *TableSpec) String() string { return protoimpl.X.MessageStringOf(x) } func (*TableSpec) ProtoMessage() {} func (x *TableSpec) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use TableSpec.ProtoReflect.Descriptor instead. func (*TableSpec) Descriptor() ([]byte, []int) { return file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDescGZIP(), []int{2} } func (x *TableSpec) GetGroupedEntry() string { if x != nil { return x.GroupedEntry } return "" } // Spec for a group of BigQuery tables with name pattern `[prefix]YYYYMMDD`. // Context: // https://cloud.google.com/bigquery/docs/partitioned-tables#partitioning_versus_sharding type BigQueryDateShardedSpec struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Output only. The Data Catalog resource name of the dataset entry the current table // belongs to, for example, // `projects/{project_id}/locations/{location}/entrygroups/{entry_group_id}/entries/{entry_id}`. Dataset string `protobuf:"bytes,1,opt,name=dataset,proto3" json:"dataset,omitempty"` // Output only. The table name prefix of the shards. The name of any given shard is // `[table_prefix]YYYYMMDD`, for example, for shard `MyTable20180101`, the // `table_prefix` is `MyTable`. TablePrefix string `protobuf:"bytes,2,opt,name=table_prefix,json=tablePrefix,proto3" json:"table_prefix,omitempty"` // Output only. Total number of shards. ShardCount int64 `protobuf:"varint,3,opt,name=shard_count,json=shardCount,proto3" json:"shard_count,omitempty"` } func (x *BigQueryDateShardedSpec) Reset() { *x = BigQueryDateShardedSpec{} if protoimpl.UnsafeEnabled { mi := &file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *BigQueryDateShardedSpec) String() string { return protoimpl.X.MessageStringOf(x) } func (*BigQueryDateShardedSpec) ProtoMessage() {} func (x *BigQueryDateShardedSpec) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use BigQueryDateShardedSpec.ProtoReflect.Descriptor instead. func (*BigQueryDateShardedSpec) Descriptor() ([]byte, []int) { return file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDescGZIP(), []int{3} } func (x *BigQueryDateShardedSpec) GetDataset() string { if x != nil { return x.Dataset } return "" } func (x *BigQueryDateShardedSpec) GetTablePrefix() string { if x != nil { return x.TablePrefix } return "" } func (x *BigQueryDateShardedSpec) GetShardCount() int64 { if x != nil { return x.ShardCount } return 0 } var File_google_cloud_datacatalog_v1beta1_table_spec_proto protoreflect.FileDescriptor var file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDesc = []byte{ 0x0a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x02, 0x0a, 0x11, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x70, 0x65, 0x63, 0x12, 0x62, 0x0a, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x49, 0x0a, 0x09, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x08, 0x76, 0x69, 0x65, 0x77, 0x53, 0x70, 0x65, 0x63, 0x12, 0x4c, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x70, 0x65, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x2e, 0x0a, 0x08, 0x56, 0x69, 0x65, 0x77, 0x53, 0x70, 0x65, 0x63, 0x12, 0x22, 0x0a, 0x0a, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x76, 0x69, 0x65, 0x77, 0x51, 0x75, 0x65, 0x72, 0x79, 0x22, 0x5a, 0x0a, 0x09, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x70, 0x65, 0x63, 0x12, 0x4d, 0x0a, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x03, 0xfa, 0x41, 0x22, 0x0a, 0x20, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0xab, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x65, 0x64, 0x53, 0x70, 0x65, 0x63, 0x12, 0x42, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x03, 0xfa, 0x41, 0x22, 0x0a, 0x20, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, 0x26, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x2a, 0x5b, 0x0a, 0x0f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x42, 0x49, 0x47, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x42, 0x49, 0x47, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x42, 0xe4, 0x01, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x50, 0x01, 0x5a, 0x4b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x3b, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x56, 0x31, 0x42, 0x65, 0x74, 0x61, 0x31, 0xca, 0x02, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5c, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0xea, 0x02, 0x23, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDescOnce sync.Once file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDescData = file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDesc ) func file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDescGZIP() []byte { file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDescOnce.Do(func() { file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDescData) }) return file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDescData } var file_google_cloud_datacatalog_v1beta1_table_spec_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_google_cloud_datacatalog_v1beta1_table_spec_proto_goTypes = []interface{}{ (TableSourceType)(0), // 0: google.cloud.datacatalog.v1beta1.TableSourceType (*BigQueryTableSpec)(nil), // 1: google.cloud.datacatalog.v1beta1.BigQueryTableSpec (*ViewSpec)(nil), // 2: google.cloud.datacatalog.v1beta1.ViewSpec (*TableSpec)(nil), // 3: google.cloud.datacatalog.v1beta1.TableSpec (*BigQueryDateShardedSpec)(nil), // 4: google.cloud.datacatalog.v1beta1.BigQueryDateShardedSpec } var file_google_cloud_datacatalog_v1beta1_table_spec_proto_depIdxs = []int32{ 0, // 0: google.cloud.datacatalog.v1beta1.BigQueryTableSpec.table_source_type:type_name -> google.cloud.datacatalog.v1beta1.TableSourceType 2, // 1: google.cloud.datacatalog.v1beta1.BigQueryTableSpec.view_spec:type_name -> google.cloud.datacatalog.v1beta1.ViewSpec 3, // 2: google.cloud.datacatalog.v1beta1.BigQueryTableSpec.table_spec:type_name -> google.cloud.datacatalog.v1beta1.TableSpec 3, // [3:3] is the sub-list for method output_type 3, // [3:3] is the sub-list for method input_type 3, // [3:3] is the sub-list for extension type_name 3, // [3:3] is the sub-list for extension extendee 0, // [0:3] is the sub-list for field type_name } func init() { file_google_cloud_datacatalog_v1beta1_table_spec_proto_init() } func file_google_cloud_datacatalog_v1beta1_table_spec_proto_init() { if File_google_cloud_datacatalog_v1beta1_table_spec_proto != nil { return } if !protoimpl.UnsafeEnabled { file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BigQueryTableSpec); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ViewSpec); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TableSpec); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BigQueryDateShardedSpec); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes[0].OneofWrappers = []interface{}{ (*BigQueryTableSpec_ViewSpec)(nil), (*BigQueryTableSpec_TableSpec)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDesc, NumEnums: 1, NumMessages: 4, NumExtensions: 0, NumServices: 0, }, GoTypes: file_google_cloud_datacatalog_v1beta1_table_spec_proto_goTypes, DependencyIndexes: file_google_cloud_datacatalog_v1beta1_table_spec_proto_depIdxs, EnumInfos: file_google_cloud_datacatalog_v1beta1_table_spec_proto_enumTypes, MessageInfos: file_google_cloud_datacatalog_v1beta1_table_spec_proto_msgTypes, }.Build() File_google_cloud_datacatalog_v1beta1_table_spec_proto = out.File file_google_cloud_datacatalog_v1beta1_table_spec_proto_rawDesc = nil file_google_cloud_datacatalog_v1beta1_table_spec_proto_goTypes = nil file_google_cloud_datacatalog_v1beta1_table_spec_proto_depIdxs = nil }
{ "pile_set_name": "Github" }
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !gccgo #include "textflag.h" // // System call support for ARM, NetBSD // // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. TEXT ยทSyscall(SB),NOSPLIT,$0-28 B syscallยทSyscall(SB) TEXT ยทSyscall6(SB),NOSPLIT,$0-40 B syscallยทSyscall6(SB) TEXT ยทSyscall9(SB),NOSPLIT,$0-52 B syscallยทSyscall9(SB) TEXT ยทRawSyscall(SB),NOSPLIT,$0-28 B syscallยทRawSyscall(SB) TEXT ยทRawSyscall6(SB),NOSPLIT,$0-40 B syscallยทRawSyscall6(SB)
{ "pile_set_name": "Github" }
class JenkinsJobBuilder < Formula desc "Configure Jenkins jobs with YAML files stored in Git" homepage "http://ci.openstack.org/jjb.html" url "https://pypi.python.org/packages/source/j/jenkins-job-builder/jenkins-job-builder-1.4.0.tar.gz" sha256 "0b3bfdb53a2771d510142f72a8a2d60eac03fcfce04ccff2e6d079df06bef183" bottle do cellar :any_skip_relocation sha256 "7e0499eac469059136549b7023d138d905251f3c9ea7aa2b6433b20a4c8428b7" => :el_capitan sha256 "d539a2a47a6fff54dfe6ebd5bb71a3baa98565440e6193478267b2e9073ec2a3" => :yosemite sha256 "fb4e9595407e97956ec5630477464a7a3909e1c5f161164b3ecd762642bf1c2d" => :mavericks end depends_on :python if MacOS.version <= :snow_leopard resource "pyyaml" do url "https://pypi.python.org/packages/source/P/PyYAML/PyYAML-3.11.tar.gz" sha256 "c36c938a872e5ff494938b33b14aaa156cb439ec67548fcab3535bb78b0846e8" end resource "ordereddict" do url "https://pypi.python.org/packages/source/o/ordereddict/ordereddict-1.1.tar.gz" sha256 "1c35b4ac206cef2d24816c89f89cf289dd3d38cf7c449bb3fab7bf6d43f01b1f" end resource "python-jenkins" do url "https://pypi.python.org/packages/source/p/python-jenkins/python-jenkins-0.4.12.tar.gz" sha256 "673868980f4b2312447843a86b61e18777a16a1adf5eb9cdfd56cbbfa3e50ee4" end resource "pbr" do url "https://pypi.python.org/packages/source/p/pbr/pbr-0.11.1.tar.gz" sha256 "701ab2922c29ca6004e3a4aab968728f33224968de9b51e432be2ee3340c2309" end resource "six" do url "https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz" sha256 "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a" end resource "pip" do url "https://pypi.python.org/packages/source/p/pip/pip-7.1.2.tar.gz" sha256 "ca047986f0528cfa975a14fb9f7f106271d4e0c3fe1ddced6c1db2e7ae57a477" end resource "multi_key_dict" do url "https://pypi.python.org/packages/source/m/multi_key_dict/multi_key_dict-2.0.3.tar.gz" sha256 "deebdec17aa30a1c432cb3f437e81f8621e1c0542a0c0617a74f71e232e9939e" end def install ENV.prepend_create_path "PYTHONPATH", libexec/"vendor/lib/python2.7/site-packages" %w[pyyaml ordereddict python-jenkins pbr six pip multi_key_dict].each do |r| resource(r).stage do system "python", *Language::Python.setup_install_args(libexec/"vendor") end end ENV.prepend_create_path "PYTHONPATH", libexec/"lib/python2.7/site-packages" system "python", *Language::Python.setup_install_args(libexec) bin.install Dir[libexec/"bin/*"] bin.env_script_all_files(libexec/"bin", :PYTHONPATH => ENV["PYTHONPATH"]) end test do assert_match(/Managed by Jenkins Job Builder/, pipe_output("#{bin}/jenkins-jobs test /dev/stdin", "- job:\n name: test-job\n\n", 0)) end end
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <shape xmlns:android="http://schemas.android.com/apk/res/android" android:shape="oval"> <size android:width="12dp" android:height="12dp" /> <solid android:color="#FFF" /> </shape>
{ "pile_set_name": "Github" }
'use strict'; module.exports = [];
{ "pile_set_name": "Github" }
๏ปฟ/* Copyright (c) 2003-2015, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or http://ckeditor.com/license */ CKEDITOR.plugins.setLang( 'link', 'km', { acccessKey: 'แžŸแŸ„แžšโ€‹แž…แžผแž›', advanced: 'แž€แž˜แŸ’แžšแžทแžโ€‹แžแŸ’แž–แžŸแŸ‹', advisoryContentType: 'แž”แŸ’แžšแž—แŸแž‘แžขแžแŸ’แžแž”แž‘โ€‹แž”แŸ’แžšแžนแž€แŸ’แžŸแžถ', advisoryTitle: 'แž…แŸ†แžŽแž„แž‡แžพแž„โ€‹แž”แŸ’แžšแžนแž€แŸ’แžŸแžถ', anchor: { toolbar: 'แž™แžปแžแŸ’แž€แžถ', menu: 'แž€แŸ‚โ€‹แž™แžปแžแŸ’แž€แžถ', title: 'แž›แž€แŸ’แžแžŽแŸˆโ€‹แž™แžปแžแŸ’แž€แžถ', name: 'แžˆแŸ’แž˜แŸ„แŸ‡โ€‹แž™แžปแžแŸ’แž€แžถ', errorName: 'แžŸแžผแž˜โ€‹แž”แž‰แŸ’แž…แžผแž›โ€‹แžˆแŸ’แž˜แŸ„แŸ‡โ€‹แž™แžปแžแŸ’แž€แžถ', remove: 'แžŠแž€โ€‹แž™แžปแžแŸ’แž€แžถโ€‹แž…แŸแž‰' }, anchorId: 'แžแžถแž˜ ID แž’แžถแžแžป', anchorName: 'แžแžถแž˜โ€‹แžˆแŸ’แž˜แŸ„แŸ‡โ€‹แž™แžปแžแŸ’แž€แžถ', charset: 'แž›แŸแžแž€แžผแžแžขแž€แŸ’แžŸแžšแžšแž”แžŸแŸ‹แžˆแŸ’แž“แžถแž”แŸ‹', cssClasses: 'Stylesheet Classes', emailAddress: 'แžขแžถแžŸแž™แžŠแŸ’แž‹แžถแž“โ€‹แžขแŸŠแžธแž˜แŸ‚แž›', emailBody: 'แžแžฝโ€‹แžขแžแŸ’แžแž”แž‘', emailSubject: 'แž”แŸ’แžšแž’แžถแž“แž”แž‘โ€‹แžŸแžถแžš', id: 'Id', info: 'แž–แŸแžแŸŒแž˜แžถแž“โ€‹แž–แžธโ€‹แžแŸ†แžŽ', langCode: 'แž€แžผแžŠโ€‹แž—แžถแžŸแžถ', langDir: 'แž‘แžทแžŸแžŠแŸ…แž—แžถแžŸแžถ', langDirLTR: 'แž–แžธแž†แŸ’แžœแŸแž„แž‘แŸ…แžŸแŸ’แžแžถแŸ†(LTR)', langDirRTL: 'แž–แžธแžŸแŸ’แžแžถแŸ†แž‘แŸ…แž†แŸ’แžœแŸแž„(RTL)', menu: 'แž€แŸ‚โ€‹แžแŸ†แžŽ', name: 'แžˆแŸ’แž˜แŸ„แŸ‡', noAnchors: '(แž˜แžทแž“โ€‹แž˜แžถแž“โ€‹แž™แžปแžแŸ’แž€แžถโ€‹แž“แŸ…โ€‹แž€แŸ’แž“แžปแž„โ€‹แžฏแž€แžŸแžถแžšโ€‹แžขแžแŸ’แžแžแž”แž‘โ€‹แž‘แŸ)', noEmail: 'แžŸแžผแž˜โ€‹แž”แž‰แŸ’แž…แžผแž›โ€‹แžขแžถแžŸแž™แžŠแŸ’แž‹แžถแž“โ€‹แžขแŸŠแžธแž˜แŸ‚แž›', noUrl: 'แžŸแžผแž˜โ€‹แž”แž‰แŸ’แž…แžผแž›โ€‹แžแŸ†แžŽ URL', other: '<แž•แŸ’แžŸแŸแž„โ€‹แž‘แŸ€แž>', popupDependent: 'Dependent (Netscape)', popupFeatures: 'แž˜แžปแžโ€‹แž„แžถแžšโ€‹แž•แžปแžŸโ€‹แž•แŸ’แž‘แžถแŸ†แž„โ€‹แžœแžธแž“แžŠแžผโ€‹แžกแžพแž„', popupFullScreen: 'แž–แŸแž‰โ€‹แžขแŸแž€แŸ’แžšแž„แŸ‹ (IE)', popupLeft: 'แž‘แžธแžแžถแŸ†แž„แžแžถแž„แž†แŸ’แžœแŸแž„', popupLocationBar: 'แžšแž”แžถแžšโ€‹แž‘แžธแžแžถแŸ†แž„', popupMenuBar: 'แžšแž”แžถแžšโ€‹แž˜แŸ‰แžบแž“แžปแž™', popupResizable: 'แžขแžถแž…โ€‹แž”แŸ’แžŠแžผแžšโ€‹แž‘แŸ†แž แŸ†', popupScrollBars: 'แžšแž”แžถแžšโ€‹แžšแŸ†แž€แžทแž›', popupStatusBar: 'แžšแž”แžถแžšโ€‹แžŸแŸ’แžแžถแž“แž—แžถแž–', popupToolbar: 'แžšแž”แžถแžšโ€‹แžงแž”แž€แžšแžŽแŸ', popupTop: 'แž‘แžธแžแžถแŸ†แž„โ€‹แž€แŸ†แž–แžผแž›', rel: 'แžŸแž˜แŸ’แž–แž“แŸ’แž’โ€‹แž—แžถแž–', selectAnchor: 'แžšแžพแžŸโ€‹แž™แž€โ€‹แž™แžปแžแŸ’แž€แžถโ€‹แž˜แžฝแž™', styles: 'แžŸแŸ’แž‘แžธแž›', tabIndex: 'แž›แŸแž Tab', target: 'แž‚แŸ„แž›แžŠแŸ…', targetFrame: '<แžŸแŸŠแžปแž˜>', targetFrameName: 'แžˆแŸ’แž˜แŸ„แŸ‡โ€‹แžŸแŸŠแžปแž˜โ€‹แž‡แžถโ€‹แž‚แŸ„แž›โ€‹แžŠแŸ…', targetPopup: '<แžœแžธแž“แžŠแžผโ€‹แž•แžปแžŸโ€‹แžกแžพแž„>', targetPopupName: 'แžˆแŸ’แž˜แŸ„แŸ‡โ€‹แžœแžธแž“แžŠแžผแžโ€‹แž•แžปแžŸโ€‹แžกแžพแž„', title: 'แžแŸ†แžŽ', toAnchor: 'แžโ€‹แž—แŸ’แž‡แžถแž”แŸ‹โ€‹แž‘แŸ…โ€‹แž™แžปแžแŸ’แž€แžถโ€‹แž€แŸ’แž“แžปแž„โ€‹แžขแžแŸ’แžแž”แž‘', toEmail: 'แžขแŸŠแžธแž˜แŸ‚แž›', toUrl: 'URL', toolbar: 'แžแŸ†แžŽ', type: 'แž”แŸ’แžšแž—แŸแž‘โ€‹แžแŸ†แžŽ', unlink: 'แž•แŸ’แžŠแžถแž…แŸ‹โ€‹แžแŸ†แžŽ', upload: 'แž•แŸ’แž‘แžปแž€โ€‹แžกแžพแž„' } );
{ "pile_set_name": "Github" }
//===-- LiveIntervalAnalysis.h - Live Interval Analysis ---------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the LiveInterval analysis pass. Given some numbering of // each the machine instructions (in this implemention depth-first order) an // interval [i, j) is said to be a live interval for register v if there is no // instruction with number j' > j such that v is live at j' and there is no // instruction with number i' < i such that v is live at i'. In this // implementation intervals can have holes, i.e. an interval might look like // [1,20), [50,65), [1000,1001). // //===----------------------------------------------------------------------===// #ifndef LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H #define LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/LiveInterval.h" #include "llvm/CodeGen/SlotIndexes.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/IndexedMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Allocator.h" #include <cmath> #include <iterator> namespace llvm { class AliasAnalysis; class LiveRangeCalc; class LiveVariables; class MachineDominatorTree; class MachineLoopInfo; class TargetRegisterInfo; class MachineRegisterInfo; class TargetInstrInfo; class TargetRegisterClass; class VirtRegMap; class LiveIntervals : public MachineFunctionPass { MachineFunction* MF; MachineRegisterInfo* MRI; const TargetMachine* TM; const TargetRegisterInfo* TRI; const TargetInstrInfo* TII; AliasAnalysis *AA; LiveVariables* LV; SlotIndexes* Indexes; MachineDominatorTree *DomTree; LiveRangeCalc *LRCalc; /// Special pool allocator for VNInfo's (LiveInterval val#). /// VNInfo::Allocator VNInfoAllocator; /// Live interval pointers for all the virtual registers. IndexedMap<LiveInterval*, VirtReg2IndexFunctor> VirtRegIntervals; /// AllocatableRegs - A bit vector of allocatable registers. BitVector AllocatableRegs; /// ReservedRegs - A bit vector of reserved registers. BitVector ReservedRegs; /// RegMaskSlots - Sorted list of instructions with register mask operands. /// Always use the 'r' slot, RegMasks are normal clobbers, not early /// clobbers. SmallVector<SlotIndex, 8> RegMaskSlots; /// RegMaskBits - This vector is parallel to RegMaskSlots, it holds a /// pointer to the corresponding register mask. This pointer can be /// recomputed as: /// /// MI = Indexes->getInstructionFromIndex(RegMaskSlot[N]); /// unsigned OpNum = findRegMaskOperand(MI); /// RegMaskBits[N] = MI->getOperand(OpNum).getRegMask(); /// /// This is kept in a separate vector partly because some standard /// libraries don't support lower_bound() with mixed objects, partly to /// improve locality when searching in RegMaskSlots. /// Also see the comment in LiveInterval::find(). SmallVector<const uint32_t*, 8> RegMaskBits; /// For each basic block number, keep (begin, size) pairs indexing into the /// RegMaskSlots and RegMaskBits arrays. /// Note that basic block numbers may not be layout contiguous, that's why /// we can't just keep track of the first register mask in each basic /// block. SmallVector<std::pair<unsigned, unsigned>, 8> RegMaskBlocks; /// RegUnitIntervals - Keep a live interval for each register unit as a way /// of tracking fixed physreg interference. SmallVector<LiveInterval*, 0> RegUnitIntervals; public: static char ID; // Pass identification, replacement for typeid LiveIntervals(); virtual ~LiveIntervals(); // Calculate the spill weight to assign to a single instruction. static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth); LiveInterval &getInterval(unsigned Reg) { LiveInterval *LI = VirtRegIntervals[Reg]; assert(LI && "Interval does not exist for virtual register"); return *LI; } const LiveInterval &getInterval(unsigned Reg) const { return const_cast<LiveIntervals*>(this)->getInterval(Reg); } bool hasInterval(unsigned Reg) const { return VirtRegIntervals.inBounds(Reg) && VirtRegIntervals[Reg]; } /// isAllocatable - is the physical register reg allocatable in the current /// function? bool isAllocatable(unsigned reg) const { return AllocatableRegs.test(reg); } /// isReserved - is the physical register reg reserved in the current /// function bool isReserved(unsigned reg) const { return ReservedRegs.test(reg); } // Interval creation. LiveInterval &getOrCreateInterval(unsigned Reg) { if (!hasInterval(Reg)) { VirtRegIntervals.grow(Reg); VirtRegIntervals[Reg] = createInterval(Reg); } return getInterval(Reg); } // Interval removal. void removeInterval(unsigned Reg) { delete VirtRegIntervals[Reg]; VirtRegIntervals[Reg] = 0; } /// addLiveRangeToEndOfBlock - Given a register and an instruction, /// adds a live range from that instruction to the end of its MBB. LiveRange addLiveRangeToEndOfBlock(unsigned reg, MachineInstr* startInst); /// shrinkToUses - After removing some uses of a register, shrink its live /// range to just the remaining uses. This method does not compute reaching /// defs for new uses, and it doesn't remove dead defs. /// Dead PHIDef values are marked as unused. /// New dead machine instructions are added to the dead vector. /// Return true if the interval may have been separated into multiple /// connected components. bool shrinkToUses(LiveInterval *li, SmallVectorImpl<MachineInstr*> *dead = 0); SlotIndexes *getSlotIndexes() const { return Indexes; } AliasAnalysis *getAliasAnalysis() const { return AA; } /// isNotInMIMap - returns true if the specified machine instr has been /// removed or was never entered in the map. bool isNotInMIMap(const MachineInstr* Instr) const { return !Indexes->hasIndex(Instr); } /// Returns the base index of the given instruction. SlotIndex getInstructionIndex(const MachineInstr *instr) const { return Indexes->getInstructionIndex(instr); } /// Returns the instruction associated with the given index. MachineInstr* getInstructionFromIndex(SlotIndex index) const { return Indexes->getInstructionFromIndex(index); } /// Return the first index in the given basic block. SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const { return Indexes->getMBBStartIdx(mbb); } /// Return the last index in the given basic block. SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const { return Indexes->getMBBEndIdx(mbb); } bool isLiveInToMBB(const LiveInterval &li, const MachineBasicBlock *mbb) const { return li.liveAt(getMBBStartIdx(mbb)); } bool isLiveOutOfMBB(const LiveInterval &li, const MachineBasicBlock *mbb) const { return li.liveAt(getMBBEndIdx(mbb).getPrevSlot()); } MachineBasicBlock* getMBBFromIndex(SlotIndex index) const { return Indexes->getMBBFromIndex(index); } SlotIndex InsertMachineInstrInMaps(MachineInstr *MI) { return Indexes->insertMachineInstrInMaps(MI); } void RemoveMachineInstrFromMaps(MachineInstr *MI) { Indexes->removeMachineInstrFromMaps(MI); } void ReplaceMachineInstrInMaps(MachineInstr *MI, MachineInstr *NewMI) { Indexes->replaceMachineInstrInMaps(MI, NewMI); } bool findLiveInMBBs(SlotIndex Start, SlotIndex End, SmallVectorImpl<MachineBasicBlock*> &MBBs) const { return Indexes->findLiveInMBBs(Start, End, MBBs); } VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; } virtual void getAnalysisUsage(AnalysisUsage &AU) const; virtual void releaseMemory(); /// runOnMachineFunction - pass entry point virtual bool runOnMachineFunction(MachineFunction&); /// print - Implement the dump method. virtual void print(raw_ostream &O, const Module* = 0) const; /// isReMaterializable - Returns true if every definition of MI of every /// val# of the specified interval is re-materializable. Also returns true /// by reference if all of the defs are load instructions. bool isReMaterializable(const LiveInterval &li, const SmallVectorImpl<LiveInterval*> *SpillIs, bool &isLoad); /// intervalIsInOneMBB - If LI is confined to a single basic block, return /// a pointer to that block. If LI is live in to or out of any block, /// return NULL. MachineBasicBlock *intervalIsInOneMBB(const LiveInterval &LI) const; /// addKillFlags - Add kill flags to any instruction that kills a virtual /// register. void addKillFlags(); /// handleMove - call this method to notify LiveIntervals that /// instruction 'mi' has been moved within a basic block. This will update /// the live intervals for all operands of mi. Moves between basic blocks /// are not supported. void handleMove(MachineInstr* MI); /// moveIntoBundle - Update intervals for operands of MI so that they /// begin/end on the SlotIndex for BundleStart. /// /// Requires MI and BundleStart to have SlotIndexes, and assumes /// existing liveness is accurate. BundleStart should be the first /// instruction in the Bundle. void handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart); // Register mask functions. // // Machine instructions may use a register mask operand to indicate that a // large number of registers are clobbered by the instruction. This is // typically used for calls. // // For compile time performance reasons, these clobbers are not recorded in // the live intervals for individual physical registers. Instead, // LiveIntervalAnalysis maintains a sorted list of instructions with // register mask operands. /// getRegMaskSlots - Returns a sorted array of slot indices of all /// instructions with register mask operands. ArrayRef<SlotIndex> getRegMaskSlots() const { return RegMaskSlots; } /// getRegMaskSlotsInBlock - Returns a sorted array of slot indices of all /// instructions with register mask operands in the basic block numbered /// MBBNum. ArrayRef<SlotIndex> getRegMaskSlotsInBlock(unsigned MBBNum) const { std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum]; return getRegMaskSlots().slice(P.first, P.second); } /// getRegMaskBits() - Returns an array of register mask pointers /// corresponding to getRegMaskSlots(). ArrayRef<const uint32_t*> getRegMaskBits() const { return RegMaskBits; } /// getRegMaskBitsInBlock - Returns an array of mask pointers corresponding /// to getRegMaskSlotsInBlock(MBBNum). ArrayRef<const uint32_t*> getRegMaskBitsInBlock(unsigned MBBNum) const { std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum]; return getRegMaskBits().slice(P.first, P.second); } /// checkRegMaskInterference - Test if LI is live across any register mask /// instructions, and compute a bit mask of physical registers that are not /// clobbered by any of them. /// /// Returns false if LI doesn't cross any register mask instructions. In /// that case, the bit vector is not filled in. bool checkRegMaskInterference(LiveInterval &LI, BitVector &UsableRegs); // Register unit functions. // // Fixed interference occurs when MachineInstrs use physregs directly // instead of virtual registers. This typically happens when passing // arguments to a function call, or when instructions require operands in // fixed registers. // // Each physreg has one or more register units, see MCRegisterInfo. We // track liveness per register unit to handle aliasing registers more // efficiently. /// getRegUnit - Return the live range for Unit. /// It will be computed if it doesn't exist. LiveInterval &getRegUnit(unsigned Unit) { LiveInterval *LI = RegUnitIntervals[Unit]; if (!LI) { // Compute missing ranges on demand. RegUnitIntervals[Unit] = LI = new LiveInterval(Unit, HUGE_VALF); computeRegUnitInterval(LI); } return *LI; } /// getCachedRegUnit - Return the live range for Unit if it has already /// been computed, or NULL if it hasn't been computed yet. LiveInterval *getCachedRegUnit(unsigned Unit) { return RegUnitIntervals[Unit]; } private: /// computeIntervals - Compute live intervals. void computeIntervals(); /// handleRegisterDef - update intervals for a register def /// (calls handleVirtualRegisterDef) void handleRegisterDef(MachineBasicBlock *MBB, MachineBasicBlock::iterator MI, SlotIndex MIIdx, MachineOperand& MO, unsigned MOIdx); /// isPartialRedef - Return true if the specified def at the specific index /// is partially re-defining the specified live interval. A common case of /// this is a definition of the sub-register. bool isPartialRedef(SlotIndex MIIdx, MachineOperand &MO, LiveInterval &interval); /// handleVirtualRegisterDef - update intervals for a virtual /// register def void handleVirtualRegisterDef(MachineBasicBlock *MBB, MachineBasicBlock::iterator MI, SlotIndex MIIdx, MachineOperand& MO, unsigned MOIdx, LiveInterval& interval); static LiveInterval* createInterval(unsigned Reg); void printInstrs(raw_ostream &O) const; void dumpInstrs() const; void computeLiveInRegUnits(); void computeRegUnitInterval(LiveInterval*); class HMEditor; }; } // End llvm namespace #endif
{ "pile_set_name": "Github" }
#!/sbin/openrc-run WORK_DIR="/" # btrfs filesytem root directory DB_SIZE=8 # this default setting uses about 140M of memory BEESSTATUS=/run/$SVCNAME/$SVCNAME.status BEESHOME="$WORK_DIR/.beeshome" DB_PATH="$BEESHOME/beeshash.dat" NEW_SIZE=$(expr $DB_SIZE \* 16777216) name=$SVCNAME command=/bin/$SVCNAME command_args="$WORK_DIR" command_background=yes pidfile=/run/$SVCNAME/$SVCNAME.pid export BEESSTATUS start_pre() { checkpath -d /run/$SVCNAME if [ ! -d "$BEESHOME" ]; then btrfs sub cre "$BEESHOME" fi touch "$DB_PATH" OLD_SIZE="$(wc -c < "$DB_PATH" | sed 's/\t/ /g' | cut -d' ' -f1)" if [ "$OLD_SIZE" != "$NEW_SIZE" ]; then truncate -s $NEW_SIZE $DB_PATH fi }
{ "pile_set_name": "Github" }
/* Implementation by the Keccak, Keyak and Ketje Teams, namely, Guido Bertoni, Joan Daemen, Michaรซl Peeters, Gilles Van Assche and Ronny Van Keer, hereby denoted as "the implementer". For more information, feedback or questions, please refer to our websites: http://keccak.noekeon.org/ http://keyak.noekeon.org/ http://ketje.noekeon.org/ To the extent possible under law, the implementer has waived all copyright and related or neighboring rights to the source code in this file. http://creativecommons.org/publicdomain/zero/1.0/ */ #if (defined(FullUnrolling)) #define rounds24 \ prepareTheta \ thetaRhoPiChiIotaPrepareTheta( 0, A, E) \ thetaRhoPiChiIotaPrepareTheta( 1, E, A) \ thetaRhoPiChiIotaPrepareTheta( 2, A, E) \ thetaRhoPiChiIotaPrepareTheta( 3, E, A) \ thetaRhoPiChiIotaPrepareTheta( 4, A, E) \ thetaRhoPiChiIotaPrepareTheta( 5, E, A) \ thetaRhoPiChiIotaPrepareTheta( 6, A, E) \ thetaRhoPiChiIotaPrepareTheta( 7, E, A) \ thetaRhoPiChiIotaPrepareTheta( 8, A, E) \ thetaRhoPiChiIotaPrepareTheta( 9, E, A) \ thetaRhoPiChiIotaPrepareTheta(10, A, E) \ thetaRhoPiChiIotaPrepareTheta(11, E, A) \ thetaRhoPiChiIotaPrepareTheta(12, A, E) \ thetaRhoPiChiIotaPrepareTheta(13, E, A) \ thetaRhoPiChiIotaPrepareTheta(14, A, E) \ thetaRhoPiChiIotaPrepareTheta(15, E, A) \ thetaRhoPiChiIotaPrepareTheta(16, A, E) \ thetaRhoPiChiIotaPrepareTheta(17, E, A) \ thetaRhoPiChiIotaPrepareTheta(18, A, E) \ thetaRhoPiChiIotaPrepareTheta(19, E, A) \ thetaRhoPiChiIotaPrepareTheta(20, A, E) \ thetaRhoPiChiIotaPrepareTheta(21, E, A) \ thetaRhoPiChiIotaPrepareTheta(22, A, E) \ thetaRhoPiChiIota(23, E, A) \ #define rounds12 \ prepareTheta \ thetaRhoPiChiIotaPrepareTheta(12, A, E) \ thetaRhoPiChiIotaPrepareTheta(13, E, A) \ thetaRhoPiChiIotaPrepareTheta(14, A, E) \ thetaRhoPiChiIotaPrepareTheta(15, E, A) \ thetaRhoPiChiIotaPrepareTheta(16, A, E) \ thetaRhoPiChiIotaPrepareTheta(17, E, A) \ thetaRhoPiChiIotaPrepareTheta(18, A, E) \ thetaRhoPiChiIotaPrepareTheta(19, E, A) \ thetaRhoPiChiIotaPrepareTheta(20, A, E) \ thetaRhoPiChiIotaPrepareTheta(21, E, A) \ thetaRhoPiChiIotaPrepareTheta(22, A, E) \ thetaRhoPiChiIota(23, E, A) \ #elif (Unrolling == 12) #define rounds24 \ prepareTheta \ for(i=0; i<24; i+=12) { \ thetaRhoPiChiIotaPrepareTheta(i , A, E) \ thetaRhoPiChiIotaPrepareTheta(i+ 1, E, A) \ thetaRhoPiChiIotaPrepareTheta(i+ 2, A, E) \ thetaRhoPiChiIotaPrepareTheta(i+ 3, E, A) \ thetaRhoPiChiIotaPrepareTheta(i+ 4, A, E) \ thetaRhoPiChiIotaPrepareTheta(i+ 5, E, A) \ thetaRhoPiChiIotaPrepareTheta(i+ 6, A, E) \ thetaRhoPiChiIotaPrepareTheta(i+ 7, E, A) \ thetaRhoPiChiIotaPrepareTheta(i+ 8, A, E) \ thetaRhoPiChiIotaPrepareTheta(i+ 9, E, A) \ thetaRhoPiChiIotaPrepareTheta(i+10, A, E) \ thetaRhoPiChiIotaPrepareTheta(i+11, E, A) \ } \ #define rounds12 \ prepareTheta \ thetaRhoPiChiIotaPrepareTheta(12, A, E) \ thetaRhoPiChiIotaPrepareTheta(13, E, A) \ thetaRhoPiChiIotaPrepareTheta(14, A, E) \ thetaRhoPiChiIotaPrepareTheta(15, E, A) \ thetaRhoPiChiIotaPrepareTheta(16, A, E) \ thetaRhoPiChiIotaPrepareTheta(17, E, A) \ thetaRhoPiChiIotaPrepareTheta(18, A, E) \ thetaRhoPiChiIotaPrepareTheta(19, E, A) \ thetaRhoPiChiIotaPrepareTheta(20, A, E) \ thetaRhoPiChiIotaPrepareTheta(21, E, A) \ thetaRhoPiChiIotaPrepareTheta(22, A, E) \ thetaRhoPiChiIota(23, E, A) \ #elif (Unrolling == 6) #define rounds24 \ prepareTheta \ for(i=0; i<24; i+=6) { \ thetaRhoPiChiIotaPrepareTheta(i , A, E) \ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \ thetaRhoPiChiIotaPrepareTheta(i+2, A, E) \ thetaRhoPiChiIotaPrepareTheta(i+3, E, A) \ thetaRhoPiChiIotaPrepareTheta(i+4, A, E) \ thetaRhoPiChiIotaPrepareTheta(i+5, E, A) \ } \ #define rounds12 \ prepareTheta \ for(i=12; i<24; i+=6) { \ thetaRhoPiChiIotaPrepareTheta(i , A, E) \ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \ thetaRhoPiChiIotaPrepareTheta(i+2, A, E) \ thetaRhoPiChiIotaPrepareTheta(i+3, E, A) \ thetaRhoPiChiIotaPrepareTheta(i+4, A, E) \ thetaRhoPiChiIotaPrepareTheta(i+5, E, A) \ } \ #elif (Unrolling == 4) #define rounds24 \ prepareTheta \ for(i=0; i<24; i+=4) { \ thetaRhoPiChiIotaPrepareTheta(i , A, E) \ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \ thetaRhoPiChiIotaPrepareTheta(i+2, A, E) \ thetaRhoPiChiIotaPrepareTheta(i+3, E, A) \ } \ #define rounds12 \ prepareTheta \ for(i=12; i<24; i+=4) { \ thetaRhoPiChiIotaPrepareTheta(i , A, E) \ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \ thetaRhoPiChiIotaPrepareTheta(i+2, A, E) \ thetaRhoPiChiIotaPrepareTheta(i+3, E, A) \ } \ #elif (Unrolling == 3) #define rounds24 \ prepareTheta \ for(i=0; i<24; i+=3) { \ thetaRhoPiChiIotaPrepareTheta(i , A, E) \ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \ thetaRhoPiChiIotaPrepareTheta(i+2, A, E) \ copyStateVariables(A, E) \ } \ #define rounds12 \ prepareTheta \ for(i=12; i<24; i+=3) { \ thetaRhoPiChiIotaPrepareTheta(i , A, E) \ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \ thetaRhoPiChiIotaPrepareTheta(i+2, A, E) \ copyStateVariables(A, E) \ } \ #elif (Unrolling == 2) #define rounds24 \ prepareTheta \ for(i=0; i<24; i+=2) { \ thetaRhoPiChiIotaPrepareTheta(i , A, E) \ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \ } \ #define rounds12 \ prepareTheta \ for(i=12; i<24; i+=2) { \ thetaRhoPiChiIotaPrepareTheta(i , A, E) \ thetaRhoPiChiIotaPrepareTheta(i+1, E, A) \ } \ #elif (Unrolling == 1) #define rounds24 \ prepareTheta \ for(i=0; i<24; i++) { \ thetaRhoPiChiIotaPrepareTheta(i , A, E) \ copyStateVariables(A, E) \ } \ #define rounds12 \ prepareTheta \ for(i=12; i<24; i++) { \ thetaRhoPiChiIotaPrepareTheta(i , A, E) \ copyStateVariables(A, E) \ } \ #else #error "Unrolling is not correctly specified!" #endif
{ "pile_set_name": "Github" }
--- redirect: ember-inspector/index ---
{ "pile_set_name": "Github" }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ .page-tabs .page-list ul, .section-tabs ul { margin: 0; padding: 0; background: rgba(0, 0, 0, 0.0125); border-bottom: 1px solid rgba(0, 0, 0, 0.05); } .page-tabs .page-list ul + ul, .section-tabs ul + ul { font-size: 0.75em; } .page-tabs .page-list li, .section-tabs li { display: inline-block; list-style: none; } .page-tabs .page-list li a[href], .section-tabs li a { display: block; color: black; text-decoration: none; padding: 0.75em 1em; } .page-tabs .page-list li a[href]:visited { color: black; } .page-tabs .page-list li a[href]:hover, .section-tabs li a:hover { background-color: #CDA; cursor: pointer; } .page-tabs .page-list li a[href].current, .page-tabs .page-list li a[href].current:hover, .section-tabs li a.current, .section-tabs li a.current:hover { background: rgba(0,0,0,0.3); cursor: default; }
{ "pile_set_name": "Github" }
๏ปฟ// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT License. // See the LICENSE file in the project root for more information. using System; using System.Reactive.Concurrency; using System.Reactive.Subjects; using System.Threading; using System.Threading.Tasks; using Microsoft.Reactive.Testing; using Xunit; namespace ReactiveTests.Tests { public partial class AsyncSubjectTest : ReactiveTest { [Fact] public void Subscribe_ArgumentChecking() { ReactiveAssert.Throws<ArgumentNullException>(() => new AsyncSubject<int>().Subscribe(null)); } [Fact] public void OnError_ArgumentChecking() { ReactiveAssert.Throws<ArgumentNullException>(() => new AsyncSubject<int>().OnError(null)); } [Fact] public void Infinite() { var scheduler = new TestScheduler(); var xs = scheduler.CreateHotObservable( OnNext(70, 1), OnNext(110, 2), OnNext(220, 3), OnNext(270, 4), OnNext(340, 5), OnNext(410, 6), OnNext(520, 7), OnNext(630, 8), OnNext(710, 9), OnNext(870, 10), OnNext(940, 11), OnNext(1020, 12) ); var subject = default(AsyncSubject<int>); var subscription = default(IDisposable); var results1 = scheduler.CreateObserver<int>(); var subscription1 = default(IDisposable); var results2 = scheduler.CreateObserver<int>(); var subscription2 = default(IDisposable); var results3 = scheduler.CreateObserver<int>(); var subscription3 = default(IDisposable); scheduler.ScheduleAbsolute(100, () => subject = new AsyncSubject<int>()); scheduler.ScheduleAbsolute(200, () => subscription = xs.Subscribe(subject)); scheduler.ScheduleAbsolute(1000, () => subscription.Dispose()); scheduler.ScheduleAbsolute(300, () => subscription1 = subject.Subscribe(results1)); scheduler.ScheduleAbsolute(400, () => subscription2 = subject.Subscribe(results2)); scheduler.ScheduleAbsolute(900, () => subscription3 = subject.Subscribe(results3)); scheduler.ScheduleAbsolute(600, () => subscription1.Dispose()); scheduler.ScheduleAbsolute(700, () => subscription2.Dispose()); scheduler.ScheduleAbsolute(800, () => subscription1.Dispose()); scheduler.ScheduleAbsolute(950, () => subscription3.Dispose()); scheduler.Start(); results1.Messages.AssertEqual( ); results2.Messages.AssertEqual( ); results3.Messages.AssertEqual( ); } [Fact] public void Finite() { var scheduler = new TestScheduler(); var xs = scheduler.CreateHotObservable( OnNext(70, 1), OnNext(110, 2), OnNext(220, 3), OnNext(270, 4), OnNext(340, 5), OnNext(410, 6), OnNext(520, 7), OnCompleted<int>(630), OnNext(640, 9), OnCompleted<int>(650), OnError<int>(660, new Exception()) ); var subject = default(AsyncSubject<int>); var subscription = default(IDisposable); var results1 = scheduler.CreateObserver<int>(); var subscription1 = default(IDisposable); var results2 = scheduler.CreateObserver<int>(); var subscription2 = default(IDisposable); var results3 = scheduler.CreateObserver<int>(); var subscription3 = default(IDisposable); scheduler.ScheduleAbsolute(100, () => subject = new AsyncSubject<int>()); scheduler.ScheduleAbsolute(200, () => subscription = xs.Subscribe(subject)); scheduler.ScheduleAbsolute(1000, () => subscription.Dispose()); scheduler.ScheduleAbsolute(300, () => subscription1 = subject.Subscribe(results1)); scheduler.ScheduleAbsolute(400, () => subscription2 = subject.Subscribe(results2)); scheduler.ScheduleAbsolute(900, () => subscription3 = subject.Subscribe(results3)); scheduler.ScheduleAbsolute(600, () => subscription1.Dispose()); scheduler.ScheduleAbsolute(700, () => subscription2.Dispose()); scheduler.ScheduleAbsolute(800, () => subscription1.Dispose()); scheduler.ScheduleAbsolute(950, () => subscription3.Dispose()); scheduler.Start(); results1.Messages.AssertEqual( ); results2.Messages.AssertEqual( OnNext(630, 7), OnCompleted<int>(630) ); results3.Messages.AssertEqual( OnNext(900, 7), OnCompleted<int>(900) ); } [Fact] public void Error() { var scheduler = new TestScheduler(); var ex = new Exception(); var xs = scheduler.CreateHotObservable( OnNext(70, 1), OnNext(110, 2), OnNext(220, 3), OnNext(270, 4), OnNext(340, 5), OnNext(410, 6), OnNext(520, 7), OnError<int>(630, ex), OnNext(640, 9), OnCompleted<int>(650), OnError<int>(660, new Exception()) ); var subject = default(AsyncSubject<int>); var subscription = default(IDisposable); var results1 = scheduler.CreateObserver<int>(); var subscription1 = default(IDisposable); var results2 = scheduler.CreateObserver<int>(); var subscription2 = default(IDisposable); var results3 = scheduler.CreateObserver<int>(); var subscription3 = default(IDisposable); scheduler.ScheduleAbsolute(100, () => subject = new AsyncSubject<int>()); scheduler.ScheduleAbsolute(200, () => subscription = xs.Subscribe(subject)); scheduler.ScheduleAbsolute(1000, () => subscription.Dispose()); scheduler.ScheduleAbsolute(300, () => subscription1 = subject.Subscribe(results1)); scheduler.ScheduleAbsolute(400, () => subscription2 = subject.Subscribe(results2)); scheduler.ScheduleAbsolute(900, () => subscription3 = subject.Subscribe(results3)); scheduler.ScheduleAbsolute(600, () => subscription1.Dispose()); scheduler.ScheduleAbsolute(700, () => subscription2.Dispose()); scheduler.ScheduleAbsolute(800, () => subscription1.Dispose()); scheduler.ScheduleAbsolute(950, () => subscription3.Dispose()); scheduler.Start(); results1.Messages.AssertEqual( ); results2.Messages.AssertEqual( OnError<int>(630, ex) ); results3.Messages.AssertEqual( OnError<int>(900, ex) ); } [Fact] public void Canceled() { var scheduler = new TestScheduler(); var xs = scheduler.CreateHotObservable( OnCompleted<int>(630), OnNext(640, 9), OnCompleted<int>(650), OnError<int>(660, new Exception()) ); var subject = default(AsyncSubject<int>); var subscription = default(IDisposable); var results1 = scheduler.CreateObserver<int>(); var subscription1 = default(IDisposable); var results2 = scheduler.CreateObserver<int>(); var subscription2 = default(IDisposable); var results3 = scheduler.CreateObserver<int>(); var subscription3 = default(IDisposable); scheduler.ScheduleAbsolute(100, () => subject = new AsyncSubject<int>()); scheduler.ScheduleAbsolute(200, () => subscription = xs.Subscribe(subject)); scheduler.ScheduleAbsolute(1000, () => subscription.Dispose()); scheduler.ScheduleAbsolute(300, () => subscription1 = subject.Subscribe(results1)); scheduler.ScheduleAbsolute(400, () => subscription2 = subject.Subscribe(results2)); scheduler.ScheduleAbsolute(900, () => subscription3 = subject.Subscribe(results3)); scheduler.ScheduleAbsolute(600, () => subscription1.Dispose()); scheduler.ScheduleAbsolute(700, () => subscription2.Dispose()); scheduler.ScheduleAbsolute(800, () => subscription1.Dispose()); scheduler.ScheduleAbsolute(950, () => subscription3.Dispose()); scheduler.Start(); results1.Messages.AssertEqual( ); results2.Messages.AssertEqual( OnCompleted<int>(630) ); results3.Messages.AssertEqual( OnCompleted<int>(900) ); } [Fact] public void SubjectDisposed() { var scheduler = new TestScheduler(); var subject = default(AsyncSubject<int>); var results1 = scheduler.CreateObserver<int>(); var subscription1 = default(IDisposable); var results2 = scheduler.CreateObserver<int>(); var subscription2 = default(IDisposable); var results3 = scheduler.CreateObserver<int>(); var subscription3 = default(IDisposable); scheduler.ScheduleAbsolute(100, () => subject = new AsyncSubject<int>()); scheduler.ScheduleAbsolute(200, () => subscription1 = subject.Subscribe(results1)); scheduler.ScheduleAbsolute(300, () => subscription2 = subject.Subscribe(results2)); scheduler.ScheduleAbsolute(400, () => subscription3 = subject.Subscribe(results3)); scheduler.ScheduleAbsolute(500, () => subscription1.Dispose()); scheduler.ScheduleAbsolute(600, () => subject.Dispose()); scheduler.ScheduleAbsolute(700, () => subscription2.Dispose()); scheduler.ScheduleAbsolute(800, () => subscription3.Dispose()); scheduler.ScheduleAbsolute(150, () => subject.OnNext(1)); scheduler.ScheduleAbsolute(250, () => subject.OnNext(2)); scheduler.ScheduleAbsolute(350, () => subject.OnNext(3)); scheduler.ScheduleAbsolute(450, () => subject.OnNext(4)); scheduler.ScheduleAbsolute(550, () => subject.OnNext(5)); scheduler.ScheduleAbsolute(650, () => ReactiveAssert.Throws<ObjectDisposedException>(() => subject.OnNext(6))); scheduler.ScheduleAbsolute(750, () => ReactiveAssert.Throws<ObjectDisposedException>(() => subject.OnCompleted())); scheduler.ScheduleAbsolute(850, () => ReactiveAssert.Throws<ObjectDisposedException>(() => subject.OnError(new Exception()))); scheduler.ScheduleAbsolute(950, () => ReactiveAssert.Throws<ObjectDisposedException>(() => subject.Subscribe())); scheduler.Start(); results1.Messages.AssertEqual( ); results2.Messages.AssertEqual( ); results3.Messages.AssertEqual( ); } #if !NO_THREAD [Fact] public void Await_Blocking() { var s = new AsyncSubject<int>(); GetResult_BlockingImpl(s.GetAwaiter()); } [Fact] public void Await_Throw() { var s = new AsyncSubject<int>(); GetResult_Blocking_ThrowImpl(s.GetAwaiter()); } #endif [Fact] public void GetResult_Empty() { var s = new AsyncSubject<int>(); s.OnCompleted(); ReactiveAssert.Throws<InvalidOperationException>(() => s.GetResult()); } #if !NO_THREAD [Fact] public void GetResult_Blocking() { GetResult_BlockingImpl(new AsyncSubject<int>()); } private void GetResult_BlockingImpl(AsyncSubject<int> s) { Assert.False(s.IsCompleted); var e = new ManualResetEvent(false); new Thread(() => { e.WaitOne(); s.OnNext(42); s.OnCompleted(); }).Start(); var y = default(int); var t = new Thread(() => { y = s.GetResult(); }); t.Start(); while (t.ThreadState != ThreadState.WaitSleepJoin) { ; } e.Set(); t.Join(); Assert.Equal(42, y); Assert.True(s.IsCompleted); } [Fact] public void GetResult_Blocking_Throw() { GetResult_Blocking_ThrowImpl(new AsyncSubject<int>()); } private void GetResult_Blocking_ThrowImpl(AsyncSubject<int> s) { Assert.False(s.IsCompleted); var e = new ManualResetEvent(false); var ex = new Exception(); new Thread(() => { e.WaitOne(); s.OnError(ex); }).Start(); var y = default(Exception); var t = new Thread(() => { try { s.GetResult(); } catch (Exception ex_) { y = ex_; } }); t.Start(); while (t.ThreadState != ThreadState.WaitSleepJoin) { ; } e.Set(); t.Join(); Assert.Same(ex, y); Assert.True(s.IsCompleted); } #endif [Fact] public void GetResult_Context() { var x = new AsyncSubject<int>(); var ctx = new MyContext(); var e = new ManualResetEvent(false); Task.Run(() => { SynchronizationContext.SetSynchronizationContext(ctx); var a = x.GetAwaiter(); a.OnCompleted(() => { e.Set(); }); }); x.OnNext(42); x.OnCompleted(); e.WaitOne(); Assert.True(ctx.Ran); } private class MyContext : SynchronizationContext { public bool Ran; public override void Post(SendOrPostCallback d, object state) { Ran = true; d(state); } } [Fact] public void HasObservers() { var s = new AsyncSubject<int>(); Assert.False(s.HasObservers); var d1 = s.Subscribe(_ => { }); Assert.True(s.HasObservers); d1.Dispose(); Assert.False(s.HasObservers); var d2 = s.Subscribe(_ => { }); Assert.True(s.HasObservers); var d3 = s.Subscribe(_ => { }); Assert.True(s.HasObservers); d2.Dispose(); Assert.True(s.HasObservers); d3.Dispose(); Assert.False(s.HasObservers); } [Fact] public void HasObservers_Dispose1() { var s = new AsyncSubject<int>(); Assert.False(s.HasObservers); Assert.False(s.IsDisposed); var d = s.Subscribe(_ => { }); Assert.True(s.HasObservers); Assert.False(s.IsDisposed); s.Dispose(); Assert.False(s.HasObservers); Assert.True(s.IsDisposed); d.Dispose(); Assert.False(s.HasObservers); Assert.True(s.IsDisposed); } [Fact] public void HasObservers_Dispose2() { var s = new AsyncSubject<int>(); Assert.False(s.HasObservers); Assert.False(s.IsDisposed); var d = s.Subscribe(_ => { }); Assert.True(s.HasObservers); Assert.False(s.IsDisposed); d.Dispose(); Assert.False(s.HasObservers); Assert.False(s.IsDisposed); s.Dispose(); Assert.False(s.HasObservers); Assert.True(s.IsDisposed); } [Fact] public void HasObservers_Dispose3() { var s = new AsyncSubject<int>(); Assert.False(s.HasObservers); Assert.False(s.IsDisposed); s.Dispose(); Assert.False(s.HasObservers); Assert.True(s.IsDisposed); } [Fact] public void HasObservers_OnCompleted() { var s = new AsyncSubject<int>(); Assert.False(s.HasObservers); var d = s.Subscribe(_ => { }); Assert.True(s.HasObservers); s.OnNext(42); Assert.True(s.HasObservers); s.OnCompleted(); Assert.False(s.HasObservers); } [Fact] public void HasObservers_OnError() { var s = new AsyncSubject<int>(); Assert.False(s.HasObservers); var d = s.Subscribe(_ => { }, ex => { }); Assert.True(s.HasObservers); s.OnNext(42); Assert.True(s.HasObservers); s.OnError(new Exception()); Assert.False(s.HasObservers); } } }
{ "pile_set_name": "Github" }