id stringlengths 27 29 | content stringlengths 226 3.24k |
|---|---|
codereview_new_cpp_data_5962 | WriterProxy::WriterProxy(
, locators_entry_(loc_alloc.max_unicast_locators, loc_alloc.max_multicast_locators)
, is_datasharing_writer_(false)
, received_at_least_one_heartbeat_(false)
- , state_(StateCode::IDLE)
{
//Create Events
ResourceEvent& event_manager = reader_->getRTPSParticipant()->getEventResource();
```suggestion
, state_(StateCode::STOPPED)
```
WriterProxy::WriterProxy(
, locators_entry_(loc_alloc.max_unicast_locators, loc_alloc.max_multicast_locators)
, is_datasharing_writer_(false)
, received_at_least_one_heartbeat_(false)
+ , state_(StateCode::STOPPED)
{
//Create Events
ResourceEvent& event_manager = reader_->getRTPSParticipant()->getEventResource(); |
codereview_new_cpp_data_5963 | TEST_F(XMLParserTests, regressions)
std::unique_ptr<BaseNode> root;
EXPECT_EQ(XMLP_ret::XML_ERROR, XMLParser::loadXML("regressions/12736.xml", root));
- EXPECT_EQ(XMLP_ret::XML_ERROR, XMLParser::loadXML("regressions/13418.xml", root));
- EXPECT_EQ(XMLP_ret::XML_ERROR, XMLParser::loadXML("regressions/13454.xml", root));
EXPECT_EQ(XMLP_ret::XML_ERROR, XMLParser::loadXML("regressions/13513.xml", root));
EXPECT_EQ(XMLP_ret::XML_ERROR, XMLParser::loadXML("regressions/14456.xml", root));
EXPECT_EQ(XMLP_ret::XML_ERROR, XMLParser::loadXML("regressions/15344.xml", root));
It seems we didn't backport these ones, so these two lines should be removed
```suggestion
```
TEST_F(XMLParserTests, regressions)
std::unique_ptr<BaseNode> root;
EXPECT_EQ(XMLP_ret::XML_ERROR, XMLParser::loadXML("regressions/12736.xml", root));
EXPECT_EQ(XMLP_ret::XML_ERROR, XMLParser::loadXML("regressions/13513.xml", root));
EXPECT_EQ(XMLP_ret::XML_ERROR, XMLParser::loadXML("regressions/14456.xml", root));
EXPECT_EQ(XMLP_ret::XML_ERROR, XMLParser::loadXML("regressions/15344.xml", root)); |
codereview_new_cpp_data_5964 | class BarType
std::array<char, 256> message_;
};
int process_id()
{
#if defined(__cplusplus_winrt)
I'd add a brief comment here on why `SystemInfo.cpp` cannot be included here.
class BarType
std::array<char, 256> message_;
};
+// NOTE: This function is duplicated from SystemInfo because it is not in the API and could not be added to test
+// compilation as that file is already compiled and linked, and doing such thing is wrong and would make a kitten cry.
+// (it duplicates an instantiated variable 'environment_file_' and so provoke a double free).
int process_id()
{
#if defined(__cplusplus_winrt) |
codereview_new_cpp_data_5965 | TEST(DDSBasic, DeleteDisabledEntities)
}
/**
- * This test checks a race condition on when calling DomainParticipantImpl::create_instance_handle()
* from different threads simultaneously. This was resulting in a `free(): invalid pointer` crash
* when deleting publishers created this way, as there was a clash in their respective instance
* handles. Not only did the crash occur, but it was also reported by TSan.
*
- * The tests spawns 200 thread, each creating a publisher and then waiting on a command from the
* main thread to delete them (so all of them at deleted at the same time).
*/
TEST(DDSBasic, MultithreadedPublisherCreation)
```suggestion
/**
* This test checks a race condition when calling DomainParticipantImpl::create_instance_handle()
* from different threads simultaneously. This was resulting in a `free(): invalid pointer` crash
* when deleting publishers created this way, as there was a clash in their respective instance
* handles. Not only did the crash occur, but it was also reported by TSan.
*
* The test spawns 200 threads, each creating a publisher and then waiting on a command from the
* main thread to delete them (so all of them at deleted at the same time).
*/
```
TEST(DDSBasic, DeleteDisabledEntities)
}
/**
+ * This test checks a race condition when calling DomainParticipantImpl::create_instance_handle()
* from different threads simultaneously. This was resulting in a `free(): invalid pointer` crash
* when deleting publishers created this way, as there was a clash in their respective instance
* handles. Not only did the crash occur, but it was also reported by TSan.
*
+ * The test spawns 200 threads, each creating a publisher and then waiting on a command from the
* main thread to delete them (so all of them at deleted at the same time).
*/
TEST(DDSBasic, MultithreadedPublisherCreation) |
codereview_new_cpp_data_5966 | TEST(RTPS, MultithreadedWriterCreation)
writer_attr.flow_controller_name = flow_controller_name;
eprosima::fastrtps::rtps::RTPSWriter* writer = eprosima::fastrtps::rtps::RTPSDomain::createRTPSWriter(
rtps_participant, writer_attr, history, nullptr);
- eprosima::fastrtps::WriterQos writer_qos;
/* Register writer in participant */
ASSERT_EQ(rtps_participant->registerWriter(writer, topic_attr, writer_qos), true);
{
I think this step is not necessary, but I'm fine leaving it. In that case, I would change it like this:
```suggestion
/* Register writer in participant */
eprosima::fastrtps::WriterQos writer_qos;
ASSERT_EQ(rtps_participant->registerWriter(writer, topic_attr, writer_qos), true);
```
TEST(RTPS, MultithreadedWriterCreation)
writer_attr.flow_controller_name = flow_controller_name;
eprosima::fastrtps::rtps::RTPSWriter* writer = eprosima::fastrtps::rtps::RTPSDomain::createRTPSWriter(
rtps_participant, writer_attr, history, nullptr);
/* Register writer in participant */
+ eprosima::fastrtps::WriterQos writer_qos;
ASSERT_EQ(rtps_participant->registerWriter(writer, topic_attr, writer_qos), true);
{ |
codereview_new_cpp_data_5967 | TEST_F(XMLParserTests, getXMLOwnershipStrengthQos)
titleElement = xml_doc.RootElement();
EXPECT_EQ(XMLP_ret::XML_OK,
XMLParserTest::propertiesPolicy_wrapper(titleElement, ownership_strength_policy, ident));
- EXPECT_EQ(ownership_strength_policy.value, 0);
sprintf(xml, xml_p, "100");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml));
```suggestion
EXPECT_EQ(ownership_strength_policy.value, 0u);
```
TEST_F(XMLParserTests, getXMLOwnershipStrengthQos)
titleElement = xml_doc.RootElement();
EXPECT_EQ(XMLP_ret::XML_OK,
XMLParserTest::propertiesPolicy_wrapper(titleElement, ownership_strength_policy, ident));
+ EXPECT_EQ(ownership_strength_policy.value, 0u);
sprintf(xml, xml_p, "100");
ASSERT_EQ(tinyxml2::XMLError::XML_SUCCESS, xml_doc.Parse(xml)); |
codereview_new_cpp_data_5968 | bool HelloWorldSubscriber::init(
{
rqos.durability().kind = TRANSIENT_LOCAL_DURABILITY_QOS;
// mimic publisher behaviour. Assure the user see all samples are received
rqos.history().kind = KEEP_ALL_HISTORY_QOS;
}
else
If set with both reliability and durability, please add it also to Publisher
bool HelloWorldSubscriber::init(
{
rqos.durability().kind = TRANSIENT_LOCAL_DURABILITY_QOS;
// mimic publisher behaviour. Assure the user see all samples are received
+ rqos.reliability().kind = RELIABLE_RELIABILITY_QOS;
rqos.history().kind = KEEP_ALL_HISTORY_QOS;
}
else |
codereview_new_cpp_data_5969 |
-// Copyright 2019 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Being a new file the copyright should be the current year
```suggestion
// Copyright 2022 Proyectos y Sistemas de Mantenimiento SL (eProsima).
```
+// Copyright 2022 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. |
codereview_new_cpp_data_5970 |
*
*/
#include <fastdds/rtps/common/Property.h>
#include <fastdds/utils/QosConverters.hpp>
-#include <string>
namespace eprosima {
namespace fastdds {
In case that [eProsima's code style](https://github.com/eProsima/cpp-style#code-style-for-eprosima-open-source-c-projects) does not specify a rule, the [Google C++ style guide](https://google.github.io/styleguide/cppguide.html#Names_and_Order_of_Includes) is followed. C++ standard library headers should be included before the project's headers.
*
*/
+#include <string>
#include <fastdds/rtps/common/Property.h>
#include <fastdds/utils/QosConverters.hpp>
namespace eprosima {
namespace fastdds { |
codereview_new_cpp_data_5971 |
*/
#include <string>
#include <fastdds/rtps/common/Property.h>
#include <fastdds/utils/QosConverters.hpp>
A blank line should separate the C++ standard headers from the project's ones. Sorry for the inconvenience :grimacing:
*/
#include <string>
+
#include <fastdds/rtps/common/Property.h>
#include <fastdds/utils/QosConverters.hpp>
|
codereview_new_cpp_data_5972 | fastrtps::TopicAttributes DataReaderImpl::topic_attributes() const
DataReaderListener* DataReaderImpl::get_listener_for(
const StatusMask& status)
{
- std::lock_guard<std::mutex> _(listener_mutex_);
-
- if (listener_ != nullptr &&
- user_datareader_->get_status_mask().is_active(status))
{
- return listener_;
}
return subscriber_->get_listener_for(status);
}
Maybe it would be best to keep the final return clause outside the scope of this mutex.
fastrtps::TopicAttributes DataReaderImpl::topic_attributes() const
DataReaderListener* DataReaderImpl::get_listener_for(
const StatusMask& status)
{
{
+ std::lock_guard<std::mutex> _(listener_mutex_);
+
+ if (listener_ != nullptr &&
+ user_datareader_->get_status_mask().is_active(status))
+ {
+ return listener_;
+ }
}
+
return subscriber_->get_listener_for(status);
}
|
codereview_new_cpp_data_5973 | ReceiverResource::ReceiverResource(
rValueResource.mValid = false;
max_message_size_ = rValueResource.max_message_size_;
active_callbacks_ = rValueResource.active_callbacks_;
}
bool ReceiverResource::SupportsLocator(
Should we set `rValueResource.active_callbacks_` to 0 ?
ReceiverResource::ReceiverResource(
rValueResource.mValid = false;
max_message_size_ = rValueResource.max_message_size_;
active_callbacks_ = rValueResource.active_callbacks_;
+ rValueResource.active_callbacks_ = 0;
}
bool ReceiverResource::SupportsLocator( |
codereview_new_cpp_data_5974 | void RTPSParticipantImpl::enable()
void RTPSParticipantImpl::disable()
{
- // Disabling even thread disables participant announcement
mp_event_thr.stop_thread();
// Disable Retries on Transports
```suggestion
// Disabling event thread disables participant announcement
```
void RTPSParticipantImpl::enable()
void RTPSParticipantImpl::disable()
{
+ // Disabling event thread also disables participant announcement, so there is no need to call
+ // stopRTPSParticipantAnnouncement()
mp_event_thr.stop_thread();
// Disable Retries on Transports |
codereview_new_cpp_data_5975 | DataReaderImpl::~DataReaderImpl()
bool DataReaderImpl::can_be_deleted() const
{
- {
- std::lock_guard<std::recursive_mutex> _(get_conditions_mutex());
- if (!read_conditions_.empty())
- {
- logWarning(DATA_READER, "DataReader " << guid() << " has ReadConditions not yet deleted");
- return false;
- }
}
if (reader_ != nullptr)
{
- std::lock_guard<RecursiveTimedMutex> lock(reader_->getMutex());
return !loan_manager_.has_outstanding_loans();
}
This mutex's scope should cover the whole operation.
DataReaderImpl::~DataReaderImpl()
bool DataReaderImpl::can_be_deleted() const
{
+ std::lock_guard<RecursiveTimedMutex> _(reader_->getMutex());
+ std::lock_guard<std::recursive_mutex> __(get_conditions_mutex());
+ if (!read_conditions_.empty())
+ {
+ logWarning(DATA_READER, "DataReader " << guid() << " has ReadConditions not yet deleted");
+ return false;
}
if (reader_ != nullptr)
{
return !loan_manager_.has_outstanding_loans();
}
|
codereview_new_cpp_data_5976 | ReturnCode_t DataReaderImpl::get_first_untaken_info(
uint64_t DataReaderImpl::get_unread_count(
bool mark_as_read)
{
- bool ret_val = reader_ ? history_.get_unread_count(mark_as_read) : 0;
if (mark_as_read)
{
try_notify_read_conditions();
```suggestion
uint64_t ret_val = reader_ ? history_.get_unread_count(mark_as_read) : 0;
```
ReturnCode_t DataReaderImpl::get_first_untaken_info(
uint64_t DataReaderImpl::get_unread_count(
bool mark_as_read)
{
+ uint64_t ret_val = reader_ ? history_.get_unread_count(mark_as_read) : 0;
if (mark_as_read)
{
try_notify_read_conditions(); |
codereview_new_cpp_data_5977 | void DataReaderHistory::check_and_remove_instance(
instance->alive_writers.empty() &&
instance_info->first.isDefined())
{
- if ((InstanceStateKind::ALIVE_INSTANCE_STATE != instance_info->second->instance_state) &&
- instance_info->first.isDefined())
- {
- instances_.erase(instance_info->first);
- }
-
instance_info = data_available_instances_.erase(instance_info);
}
}
Aren't you checking the same thing twice? The innermost if will always evaluate to true.
void DataReaderHistory::check_and_remove_instance(
instance->alive_writers.empty() &&
instance_info->first.isDefined())
{
+ instances_.erase(instance_info->first);
instance_info = data_available_instances_.erase(instance_info);
}
} |
codereview_new_cpp_data_5978 | void DataReaderHistory::check_and_remove_instance(
instance->alive_writers.empty() &&
instance_info->first.isDefined())
{
- if ((InstanceStateKind::ALIVE_INSTANCE_STATE != instance_info->second->instance_state) &&
- instance_info->first.isDefined())
- {
- instances_.erase(instance_info->first);
- }
-
instance_info = data_available_instances_.erase(instance_info);
}
}
Looks like you are evaluating the same things twice.
void DataReaderHistory::check_and_remove_instance(
instance->alive_writers.empty() &&
instance_info->first.isDefined())
{
+ instances_.erase(instance_info->first);
instance_info = data_available_instances_.erase(instance_info);
}
} |
codereview_new_cpp_data_5979 | bool StatefulReader::processDataMsg(
if (!change_pool_->reserve_cache(change_to_add))
{
logWarning(RTPS_MSG_IN,
- IDSTRING "Reached the specified maximum number samples allowed by this DataReader QoS. Rejecting change for reader: " <<
m_guid );
return false;
}
```suggestion
IDSTRING "Reached the maximum number of samples allowed by this reader's QoS. Rejecting change for reader " <<
```
bool StatefulReader::processDataMsg(
if (!change_pool_->reserve_cache(change_to_add))
{
logWarning(RTPS_MSG_IN,
+ IDSTRING "Reached the maximum number of samples allowed by this reader's QoS. Rejecting change for reader: " <<
m_guid );
return false;
} |
codereview_new_cpp_data_5980 | bool StatelessReader::processDataMsg(
if (!change_pool_->reserve_cache(change_to_add))
{
logWarning(RTPS_MSG_IN,
- IDSTRING "Reached the specified maximum number samples allowed by this DataReader QoS. Rejecting change for reader: " <<
m_guid );
return false;
}
```suggestion
IDSTRING "Reached the maximum number of samples allowed by this reader's QoS. Rejecting change for reader " <<
```
bool StatelessReader::processDataMsg(
if (!change_pool_->reserve_cache(change_to_add))
{
logWarning(RTPS_MSG_IN,
+ IDSTRING "Reached the maximum number of samples allowed by this reader's QoS. Rejecting change for reader: " <<
m_guid );
return false;
} |
codereview_new_cpp_data_5981 | TEST(DataWriterTests, InstancePolicyAllocationConsistency)
DataWriter* data_writer1 = publisher->create_datawriter(topic, qos);
ASSERT_EQ(data_writer1, nullptr);
// Next QoS config checks that if user sets max_samples < ( max_instances * max_samples_per_instance ) ,
// create_datareader() should return nullptr
qos.resource_limits().max_samples = 4999;
qos.resource_limits().max_instances = 10;
qos.resource_limits().max_samples_per_instance = 500;
- DataWriter* data_writer2 = publisher->create_datawriter(topic, qos);
- ASSERT_EQ(data_writer2, nullptr);
}
```suggestion
// create_datawriter() should return nullptr
```
TEST(DataWriterTests, InstancePolicyAllocationConsistency)
DataWriter* data_writer1 = publisher->create_datawriter(topic, qos);
ASSERT_EQ(data_writer1, nullptr);
+ // Below an ampliation of the last comprobation, for which it is proved the case of < 0 (-1),
+ // which also means infinite value
+ DataWriterQos qos = DATAWRITER_QOS_DEFAULT;
+ qos.resource_limits().max_instances = -1;
+
+ DataWriter* data_writer2 = publisher->create_datawriter(topic, qos);
+ ASSERT_EQ(data_writer2, nullptr);
+
// Next QoS config checks that if user sets max_samples < ( max_instances * max_samples_per_instance ) ,
// create_datareader() should return nullptr
qos.resource_limits().max_samples = 4999;
qos.resource_limits().max_instances = 10;
qos.resource_limits().max_samples_per_instance = 500;
+ DataWriter* data_writer3 = publisher->create_datawriter(topic, qos);
+ ASSERT_EQ(data_writer3, nullptr);
}
|
codereview_new_cpp_data_5982 | TEST(TopicTests, InstancePolicyAllocationConsistency)
Topic* topic1 = participant->create_topic("footopic1", type.get_type_name(), qos);
ASSERT_EQ(topic1, nullptr);
// Next QoS config checks that if user sets max_samples < ( max_instances * max_samples_per_instance ) ,
// create_datareader() should return nullptr
qos.resource_limits().max_samples = 4999;
qos.resource_limits().max_instances = 10;
qos.resource_limits().max_samples_per_instance = 500;
- Topic* topic2 = participant->create_topic("footopic2", type.get_type_name(), qos);
- ASSERT_EQ(topic2, nullptr);
}
} // namespace dds
```suggestion
// create_topic() should return nullptr
```
TEST(TopicTests, InstancePolicyAllocationConsistency)
Topic* topic1 = participant->create_topic("footopic1", type.get_type_name(), qos);
ASSERT_EQ(topic1, nullptr);
+ // Below an ampliation of the last comprobation, for which it is proved the case of < 0 (-1),
+ // which also means infinite value
+ TopicQos qos = TOPIC_QOS_DEFAULT;
+ qos.resource_limits().max_instances = -1;
+
+ Topic* topic2 = participant->create_topic("footopic1", type.get_type_name(), qos);
+ ASSERT_EQ(topic2, nullptr);
+
// Next QoS config checks that if user sets max_samples < ( max_instances * max_samples_per_instance ) ,
// create_datareader() should return nullptr
qos.resource_limits().max_samples = 4999;
qos.resource_limits().max_instances = 10;
qos.resource_limits().max_samples_per_instance = 500;
+ Topic* topic3 = participant->create_topic("footopic2", type.get_type_name(), qos);
+ ASSERT_EQ(topic3, nullptr);
}
} // namespace dds |
codereview_new_cpp_data_5983 | ReturnCode_t DataWriterImpl::check_qos_including_resource_limits(
const DataWriterQos& qos,
const TypeSupport& type)
{
- ReturnCode_t check_qos_return;
- check_qos_return = check_qos(qos);
- if (!check_qos_return)
{
- return check_qos_return;
}
- if (type->m_isGetKeyDefined)
- {
- check_qos_return = check_allocation_consistency(qos);
- if (!check_qos_return)
- {
- return check_qos_return;
- }
- }
- return ReturnCode_t::RETCODE_OK;
}
ReturnCode_t DataWriterImpl::check_qos(
```suggestion
ReturnCode_t check_qos_return = check_qos(qos);
if (ReturnCode_t::RETCODE_OK == check_qos_return &&
type->m_isGetKeyDefined)
{
check_qos_return = check_allocation_consistency(qos);
}
return check_qos_return;
```
ReturnCode_t DataWriterImpl::check_qos_including_resource_limits(
const DataWriterQos& qos,
const TypeSupport& type)
{
+ ReturnCode_t check_qos_return = check_qos(qos);
+ if (ReturnCode_t::RETCODE_OK == check_qos_return &&
+ type->m_isGetKeyDefined)
{
+ check_qos_return = check_allocation_consistency(qos);
}
+ return check_qos_return;
}
ReturnCode_t DataWriterImpl::check_qos( |
codereview_new_cpp_data_5984 | bool DiscoveryDataBase::server_acked_by_my_servers()
// Find the server's participant and check whether all its servers have ACKed the server's DATA(p)
auto this_server = participants_.find(server_guid_prefix_);
- // check is always there
assert(this_server != participants_.end());
for (auto prefix : servers_)
```suggestion
// check it is always there
```
bool DiscoveryDataBase::server_acked_by_my_servers()
// Find the server's participant and check whether all its servers have ACKed the server's DATA(p)
auto this_server = participants_.find(server_guid_prefix_);
+ // check it is always there
assert(this_server != participants_.end());
for (auto prefix : servers_) |
codereview_new_cpp_data_5985 | void PDPServer::announceParticipantState(
return;
}
}
-
-
}
else
{
We can remove these empty lines.
void PDPServer::announceParticipantState(
return;
}
}
}
else
{ |
codereview_new_cpp_data_5986 | int main(
if (participant != nullptr)
{
- if (subscriber != nullptr)
{
- if (reader != nullptr)
- {
- subscriber->delete_datareader(reader);
- }
-
- participant->delete_subscriber(subscriber);
- }
-
- if (topic != nullptr)
- {
- participant->delete_topic(topic);
}
DomainParticipantFactory::get_instance()->delete_participant(participant);
This could probably be simplified with a call to `delete_contained_entities`
int main(
if (participant != nullptr)
{
+ if (!participant->delete_contained_entities() && !result)
{
+ std::cout << "ERROR: precondition not met on participant entities removal" << std::endl;
+ result = 1;
}
DomainParticipantFactory::get_instance()->delete_participant(participant); |
codereview_new_cpp_data_5987 | void PDPServer::announceParticipantState(
std::vector<GUID_t> remote_readers;
LocatorList locators;
{
- std::vector<GuidPrefix_t> direct_clients_and_servers = discovery_db_.direct_clients_and_servers();
- for (GuidPrefix_t participant_prefix: direct_clients_and_servers)
- {
- // Add remote reader
- GUID_t remote_guid(participant_prefix, c_EntityId_SPDPReader);
- remote_readers.push_back(remote_guid);
- locators.push_back(discovery_db_.participant_metatraffic_locators(participant_prefix));
- }
- send_announcement(change, remote_readers, locators, dispose);
}
}
}
What is the purpose of this scope block?
void PDPServer::announceParticipantState(
std::vector<GUID_t> remote_readers;
LocatorList locators;
+ std::vector<GuidPrefix_t> direct_clients_and_servers = discovery_db_.direct_clients_and_servers();
+ for (GuidPrefix_t participant_prefix: direct_clients_and_servers)
{
+ // Add remote reader
+ GUID_t remote_guid(participant_prefix, c_EntityId_SPDPReader);
+ remote_readers.push_back(remote_guid);
+ locators.push_back(discovery_db_.participant_metatraffic_locators(participant_prefix));
}
+ send_announcement(change, remote_readers, locators, dispose);
}
}
|
codereview_new_cpp_data_5988 | bool DiscoveryDataBase::server_acked_by_my_servers()
// Find the server's participant and check whether all its servers have ACKed the server's DATA(p)
auto this_server = participants_.find(server_guid_prefix_);
- if (this_server == participants_.end())
- {
- return false;
- }
for (auto prefix : servers_)
{
Can this even happen considering the DiscoveryDataBase is created with the GUID of an existing participant? If it can, why return false? this will trigger a ping that will do some useless work since the change to send will be `nullptr`. Wouldn't it be better to treat it like the empty list and return true?
bool DiscoveryDataBase::server_acked_by_my_servers()
// Find the server's participant and check whether all its servers have ACKed the server's DATA(p)
auto this_server = participants_.find(server_guid_prefix_);
+ // check is always there
+ assert(this_server != participants_.end());
for (auto prefix : servers_)
{ |
codereview_new_cpp_data_5989 | void set_and_check_with_environment_file(
assert(res);
std::smatch::iterator it = mr.cbegin();
- // Check whether the address is IPv4
auto address = (++it)->str();
- assert(fastrtps::rtps::IPLocator::isIPv4(address));
fastrtps::rtps::IPLocator::setIPv4(locator, address);
assert(it != mr.cend());
Redundant with the isIPv4 call done later
void set_and_check_with_environment_file(
assert(res);
std::smatch::iterator it = mr.cbegin();
auto address = (++it)->str();
fastrtps::rtps::IPLocator::setIPv4(locator, address);
assert(it != mr.cend()); |
codereview_new_cpp_data_5990 | ReturnCode_t DomainParticipantImpl::delete_topic(
assert(it != topics_.end() && "Topic found by handle but factory not found");
TopicProxy* proxy = dynamic_cast<TopicProxy*>(topic->get_impl());
auto ret_code = it->second->delete_topic(proxy);
if (ReturnCode_t::RETCODE_OK == ret_code)
{
```suggestion
TopicProxy* proxy = reinterpret_cast<TopicProxy*>(topic->get_impl());
```
ReturnCode_t DomainParticipantImpl::delete_topic(
assert(it != topics_.end() && "Topic found by handle but factory not found");
TopicProxy* proxy = dynamic_cast<TopicProxy*>(topic->get_impl());
+ assert(nullptr != proxy);
auto ret_code = it->second->delete_topic(proxy);
if (ReturnCode_t::RETCODE_OK == ret_code)
{ |
codereview_new_cpp_data_6114 | void GetDeviceType(const std::unordered_map<std::string, std::string>& params, s
*device_type = "gpu";
} else if (value == std::string("cuda")) {
*device_type = "cuda";
- } else if (value == std::string("cuda_exp")) {
- Log::Warning(
- "Found device_type='cuda_exp' passed through params. "
- "That is an alias for device_type='cuda'. "
- "Use device_type='cuda' to suppress this warning. "
- "In the future, this warning will become an error. ");
- *device_type = "cuda";
} else {
Log::Fatal("Unknown device type %s", value.c_str());
}
I see. Now that we keep `cuda_exp` as an valid option with warning here, maybe it is reasonable to keep `USE_CUDA_EXP` in the make file.
void GetDeviceType(const std::unordered_map<std::string, std::string>& params, s
*device_type = "gpu";
} else if (value == std::string("cuda")) {
*device_type = "cuda";
} else {
Log::Fatal("Unknown device type %s", value.c_str());
} |
codereview_new_cpp_data_6115 | TreeLearner* TreeLearner::CreateTreeLearner(const std::string& learner_type, con
if (config->num_gpu == 1) {
return new CUDASingleGPUTreeLearner(config, boosting_on_cuda);
} else {
- Log::Fatal("cuda only supports training on a single GPU.");
}
} else {
- Log::Fatal("cuda only supports training on a single machine.");
}
}
return nullptr;
```suggestion
Log::Fatal("Currently cuda version only supports training on a single machine.");
```
TreeLearner* TreeLearner::CreateTreeLearner(const std::string& learner_type, con
if (config->num_gpu == 1) {
return new CUDASingleGPUTreeLearner(config, boosting_on_cuda);
} else {
+ Log::Fatal("Currently cuda version only supports training on a single GPU.");
}
} else {
+ Log::Fatal("Currently cuda version only supports training on a single machine.");
}
}
return nullptr; |
codereview_new_cpp_data_6130 | void Metadata::SetInitScore(const double* init_score, data_size_t len) {
void Metadata::InsertInitScores(const double* init_scores, data_size_t start_index, data_size_t len, data_size_t source_size) {
if (num_init_score_ <= 0) {
- Log::Fatal("Inserting intiial score data into dataset with no initial scores");
}
if (start_index + len > num_data_) {
// Note that len here is row count, not num_init_score, so we compare against num_data
```suggestion
Log::Fatal("Inserting initial score data into dataset with no initial scores");
```
void Metadata::SetInitScore(const double* init_score, data_size_t len) {
void Metadata::InsertInitScores(const double* init_scores, data_size_t start_index, data_size_t len, data_size_t source_size) {
if (num_init_score_ <= 0) {
+ Log::Fatal("Inserting initial score data into dataset with no initial scores");
}
if (start_index + len > num_data_) {
// Note that len here is row count, not num_init_score, so we compare against num_data |
codereview_new_cpp_data_6131 | void Metadata::Init(data_size_t num_data, int32_t has_weights, int32_t has_init_
num_data_ = num_data;
label_ = std::vector<label_t>(num_data_);
if (has_weights) {
- weights_ = std::vector<label_t>(num_data_, 0.0f);
num_weights_ = num_data_;
weight_load_from_file_ = false;
}
if (has_init_scores) {
num_init_score_ = static_cast<int64_t>(num_data) * nclasses;
- init_score_ = std::vector<double>(num_init_score_, 0);
}
if (has_queries) {
- if (!query_weights_.empty()) { query_weights_.clear(); }
- queries_ = std::vector<data_size_t>(num_data_, 0);
query_load_from_file_ = false;
}
}
Same as the initialization of `queries_`. Shall we use `clear` and `resize`?
void Metadata::Init(data_size_t num_data, int32_t has_weights, int32_t has_init_
num_data_ = num_data;
label_ = std::vector<label_t>(num_data_);
if (has_weights) {
+ if (!weights_.empty()) {
+ Log::Fatal("Calling Init() on Metadata weights that have already been initialized");
+ }
+ weights_.resize(num_data_, 0.0f);
num_weights_ = num_data_;
weight_load_from_file_ = false;
}
if (has_init_scores) {
+ if (!init_score_.empty()) {
+ Log::Fatal("Calling Init() on Metadata initial scores that have already been initialized");
+ }
num_init_score_ = static_cast<int64_t>(num_data) * nclasses;
+ init_score_.resize(num_init_score_, 0);
}
if (has_queries) {
+ if (!query_weights_.empty()) {
+ Log::Fatal("Calling Init() on Metadata queries that have already been initialized");
+ }
+ queries_.resize(num_data_, 0);
query_load_from_file_ = false;
}
} |
codereview_new_cpp_data_6233 | auto PythonServer::FillListPlayers(std::vector<PlayerSetupData>& players) const
const py::extract<py::list> py_players(r);
if (py_players.check()) {
py::stl_input_iterator<PlayerSetupData> players_begin(py_players), players_end;
- players.reserve(std::distance(players_begin, players_end));
players.insert(players.end(), players_begin, players_end);
} else {
DebugLogger() << "Wrong players list data: check returns "
if the `std::distance` was the problem, the leave the `.insert` ?
auto PythonServer::FillListPlayers(std::vector<PlayerSetupData>& players) const
const py::extract<py::list> py_players(r);
if (py_players.check()) {
py::stl_input_iterator<PlayerSetupData> players_begin(py_players), players_end;
+ players.reserve(py::len(py_players));
players.insert(players.end(), players_begin, players_end);
} else {
DebugLogger() << "Wrong players list data: check returns " |
codereview_new_cpp_data_6238 | namespace {
for (MeterType meter = MeterType(0); meter <= MeterType::METER_SPEED; // the meter(s) after MeterType::METER_SPEED are part-specific
meter = MeterType(int(meter) + 1))
{
- col_types[{std::string{to_string(meter)}, UserStringNop("METERS_SUBMENU")}] = StringCastedImmediateValueRef(std::string().append(ValueRef::MeterToName(meter)));
}
}
return col_types;
```suggestion
col_types[{std::string{to_string(meter)}, UserStringNop("METERS_SUBMENU")}] = StringCastedImmediateValueRef(std::string{ValueRef::MeterToName(meter)});
```
namespace {
for (MeterType meter = MeterType(0); meter <= MeterType::METER_SPEED; // the meter(s) after MeterType::METER_SPEED are part-specific
meter = MeterType(int(meter) + 1))
{
+ col_types[{std::string{to_string(meter)}, UserStringNop("METERS_SUBMENU")}] = StringCastedImmediateValueRef(std::string{ValueRef::MeterToName(meter)});
}
}
return col_types; |
codereview_new_cpp_data_6239 |
namespace parse {
std::string MeterToNameWrapper(MeterType meter) {
- return std::string().append(ValueRef::MeterToName(meter));
}
BOOST_PHOENIX_ADAPT_FUNCTION(std::string, MeterToName_, MeterToNameWrapper, 1)
```suggestion
std::string MeterToNameWrapper(MeterType meter)
{ return std::string{ValueRef::MeterToName(meter)}; }
```
namespace parse {
std::string MeterToNameWrapper(MeterType meter) {
+ return std::string{ValueRef::MeterToName(meter)};
}
BOOST_PHOENIX_ADAPT_FUNCTION(std::string, MeterToName_, MeterToNameWrapper, 1) |
codereview_new_cpp_data_6240 |
#include "../util/i18n.h"
namespace ValueRef {
- const std::string_view MeterToName(const MeterType meter);
}
UniverseObject::UniverseObject(UniverseObjectType type, std::string name,
```suggestion
std::string_view MeterToName(const MeterType meter);
```
#include "../util/i18n.h"
namespace ValueRef {
+ std::string_view MeterToName(const MeterType meter);
}
UniverseObject::UniverseObject(UniverseObjectType type, std::string name, |
codereview_new_cpp_data_6241 | void ShipDesign::BuildStatCaches() {
{ m_num_part_classes[part_class]++; }
}
- // ensure tags are unique
std::sort(tags.begin(), tags.end());
auto last = std::unique(tags.begin(), tags.end());
- tags.erase(last, tags.end());
// compile concatenated tags into contiguous storage
// TODO: transform_reduce when available on all platforms...
std::size_t tags_sz = 0;
- for (const auto& t : tags)
- tags_sz += t.size();
- m_tags_concatenated.reserve(tags_sz);
m_tags.clear();
m_tags.reserve(tags.size());
- std::for_each(tags.begin(), tags.end(), [this](auto& str) {
auto next_start = m_tags_concatenated.size();
m_tags_concatenated.append(str);
m_tags.push_back(std::string_view{m_tags_concatenated}.substr(next_start));
The erase is unnecesary, or at least not the best solution, I think. `tags` is a `vector<string_view>` assembled from the tags of the hull and parts. Some of those could be duplicates. `sort` and `unique` arrange all the unique ones at the start. Later, they are copied into `m_tags_concatenated`, but instead of erasing, it can just iterate from `begin` to `last` since that should be one-past the last unique tag in the vector.
void ShipDesign::BuildStatCaches() {
{ m_num_part_classes[part_class]++; }
}
+ // collect unique tags
std::sort(tags.begin(), tags.end());
auto last = std::unique(tags.begin(), tags.end());
// compile concatenated tags into contiguous storage
// TODO: transform_reduce when available on all platforms...
std::size_t tags_sz = 0;
+ std::for_each(tags.begin(), last, [&tags_sz](auto str) { tags_sz += str.size(); });
+ m_tags_concatenated.reserve(tags_sz);
m_tags.clear();
m_tags.reserve(tags.size());
+ std::for_each(tags.begin(), last, [this](auto str) {
auto next_start = m_tags_concatenated.size();
m_tags_concatenated.append(str);
m_tags.push_back(std::string_view{m_tags_concatenated}.substr(next_start)); |
codereview_new_cpp_data_6244 | namespace {
return 0;
}
- if (!empire->ProducibleItem(BuildType::BT_BUILDING, item_name, location_id)) {
- ErrorLogger() << "IssueEnqueueBuildingProductionOrder : specified item_name and location_id that don't indicate an item that can be built at that location";
- return 0;
- }
-
if (!empire->EnqueuableItem(BuildType::BT_BUILDING, item_name, location_id)) {
ErrorLogger() << "IssueEnqueueBuildingProductionOrder : specified item_name and location_id that don't indicate an item that can be enqueued at that location";
return 0;
The human client allows it, I see no reason, why the AI should not be allowed to do it.
namespace {
return 0;
}
if (!empire->EnqueuableItem(BuildType::BT_BUILDING, item_name, location_id)) {
ErrorLogger() << "IssueEnqueueBuildingProductionOrder : specified item_name and location_id that don't indicate an item that can be enqueued at that location";
return 0; |
codereview_new_cpp_data_6257 | static S2N_RESULT s2n_generate_client_session_id(struct s2n_connection *conn)
/* Only generate the session id for TLS1.3 if in middlebox compatibility mode
*
- * actual_protocol_version is used here so the session id is generated if TLS1.3
- * was not negotiated, even if the client supports it */
- if (conn->actual_protocol_version >= S2N_TLS13 && !s2n_is_middlebox_compat_enabled(conn)) {
return S2N_RESULT_OK;
}
Isn't this triggered [when generating the ClientHello](https://github.com/aws/s2n-tls/blob/32c2c0391065f1380176049466fd05fe5950f18c/tls/s2n_client_hello.c#L590)? At that point, we haven't negotiated a version with the server yet, so I think client_protocol_version and actual_protocol_version should be the same. actual_protocol_version is just set early to the same value as client_protocol_version for historic reasons.
static S2N_RESULT s2n_generate_client_session_id(struct s2n_connection *conn)
/* Only generate the session id for TLS1.3 if in middlebox compatibility mode
*
+ * s2n_connection_get_protocol_version, which returns conn->actual_protocol_version, is used here because
+ * s2n_tls12_client_deserialize_session_state sets actual_protocol_version based on the protocol the
+ * server that issued the session ticket indicated. If we are attempting to resume a session for that
+ * session ticket, we should base the decision of whether to generate a session ID on the protocol version
+ * we are attempting to resume with. */
+ if (s2n_connection_get_protocol_version(conn) >= S2N_TLS13 && !s2n_is_middlebox_compat_enabled(conn)) {
return S2N_RESULT_OK;
}
|
codereview_new_cpp_data_6258 | static S2N_RESULT s2n_generate_client_session_id(struct s2n_connection *conn)
/* Only generate the session id for TLS1.3 if in middlebox compatibility mode
*
- * actual_protocol_version is used here so the session id is generated if TLS1.3
- * was not negotiated, even if the client supports it */
- if (conn->actual_protocol_version >= S2N_TLS13 && !s2n_is_middlebox_compat_enabled(conn)) {
return S2N_RESULT_OK;
}
It's probably worth noting [why](https://github.com/aws/s2n-tls/pull/3845/files#r1113730266) we have to use actual_protocol_version here. "was not negotiated" doesn't make a lot of sense on its own since negotiation hasn't happened yet :)
static S2N_RESULT s2n_generate_client_session_id(struct s2n_connection *conn)
/* Only generate the session id for TLS1.3 if in middlebox compatibility mode
*
+ * s2n_connection_get_protocol_version, which returns conn->actual_protocol_version, is used here because
+ * s2n_tls12_client_deserialize_session_state sets actual_protocol_version based on the protocol the
+ * server that issued the session ticket indicated. If we are attempting to resume a session for that
+ * session ticket, we should base the decision of whether to generate a session ID on the protocol version
+ * we are attempting to resume with. */
+ if (s2n_connection_get_protocol_version(conn) >= S2N_TLS13 && !s2n_is_middlebox_compat_enabled(conn)) {
return S2N_RESULT_OK;
}
|
codereview_new_cpp_data_6259 | int main(int argc, char **argv)
EXPECT_NOT_NULL(conn = s2n_connection_new(S2N_CLIENT));
struct s2n_stuffer *hello_stuffer = &conn->handshake.io;
conn->actual_protocol_version = S2N_TLS12;
EXPECT_SUCCESS(s2n_client_hello_send(conn));
EXPECT_SUCCESS(s2n_stuffer_skip_read(hello_stuffer, LENGTH_TO_SESSION_ID));
Nit: we can avoid needing the s2n_connection_free at the end of the test with:
```suggestion
DEFER_CLEANUP(struct s2n_connection *conn = s2n_connection_new(S2N_CLIENT),
s2n_connection_ptr_free) ;
EXPECT_NOT_NULL(conn);
```
int main(int argc, char **argv)
EXPECT_NOT_NULL(conn = s2n_connection_new(S2N_CLIENT));
struct s2n_stuffer *hello_stuffer = &conn->handshake.io;
conn->actual_protocol_version = S2N_TLS12;
+ EXPECT_TRUE(conn->client_protocol_version >= S2N_TLS13);
EXPECT_SUCCESS(s2n_client_hello_send(conn));
EXPECT_SUCCESS(s2n_stuffer_skip_read(hello_stuffer, LENGTH_TO_SESSION_ID)); |
codereview_new_cpp_data_6260 | int main(int argc, char **argv)
EXPECT_NOT_NULL(conn = s2n_connection_new(S2N_CLIENT));
struct s2n_stuffer *hello_stuffer = &conn->handshake.io;
conn->actual_protocol_version = S2N_TLS12;
EXPECT_SUCCESS(s2n_client_hello_send(conn));
EXPECT_SUCCESS(s2n_stuffer_skip_read(hello_stuffer, LENGTH_TO_SESSION_ID));
Should you either also set client_protocol_version or verify that it's >=S2N_TLS13, to make sure you're covering this bug?
int main(int argc, char **argv)
EXPECT_NOT_NULL(conn = s2n_connection_new(S2N_CLIENT));
struct s2n_stuffer *hello_stuffer = &conn->handshake.io;
conn->actual_protocol_version = S2N_TLS12;
+ EXPECT_TRUE(conn->client_protocol_version >= S2N_TLS13);
EXPECT_SUCCESS(s2n_client_hello_send(conn));
EXPECT_SUCCESS(s2n_stuffer_skip_read(hello_stuffer, LENGTH_TO_SESSION_ID)); |
codereview_new_cpp_data_6262 | static int s2n_parse_x509_extension(struct s2n_cert *cert, const uint8_t *oid,
*/
int len = ASN1_STRING_length(asn1_str);
if (ext_value != NULL) {
POSIX_ENSURE(*ext_value_len >= (uint32_t) len, S2N_ERR_INSUFFICIENT_MEM_SIZE);
/* ASN1_STRING_data() returns an internal pointer to the data.
* Since this is an internal pointer it should not be freed or modified in any way.
```suggestion
POSIX_ENSURE_GTE(len, 0);
POSIX_ENSURE(*ext_value_len >= (uint32_t) len, S2N_ERR_INSUFFICIENT_MEM_SIZE);
```
static int s2n_parse_x509_extension(struct s2n_cert *cert, const uint8_t *oid,
*/
int len = ASN1_STRING_length(asn1_str);
if (ext_value != NULL) {
+ POSIX_ENSURE_GTE(len, 0);
POSIX_ENSURE(*ext_value_len >= (uint32_t) len, S2N_ERR_INSUFFICIENT_MEM_SIZE);
/* ASN1_STRING_data() returns an internal pointer to the data.
* Since this is an internal pointer it should not be freed or modified in any way. |
codereview_new_cpp_data_6263 | int s2n_connection_set_config(struct s2n_connection *conn, struct s2n_config *co
}
/* Users can enable OCSP status requests via s2n_config_set_status_request_type.
- * However, s2n_config_set_verification_ca_location can also enable OCSP status
- * requests. To ensure backwards compatibility, this function is allowed to enable
- * OCSP status requests for clients. For servers, however, OCSP status requests
- * are only sent if the user intentionally opted in via
- * s2n_config_set_status_request_type.
*/
conn->request_ocsp_status = config->ocsp_status_requested_by_user;
if (config->ocsp_status_requested_by_s2n && conn->mode == S2N_CLIENT) {
I already understand the problem, but still found this comment confusing. I think it was mostly the "this function"-- I read that as s2n_connection_set_config, not s2n_config_set_verification_ca_location. You also don't explicitly say that we can "change" the behavior for servers bc mutual auth didn't originally support ocsp stapling, which I think is a key part of the problem.
int s2n_connection_set_config(struct s2n_connection *conn, struct s2n_config *co
}
/* Users can enable OCSP status requests via s2n_config_set_status_request_type.
+ * To ensure backwards compatibility, s2n_config_set_verification_ca_location can
+ * also enable OCSP status requests if called on a client. This behavior can be
+ * avoided if s2n_config_set_verification_ca_location is called on a server, since
+ * s2n-tls did not initially support sending an OCSP status request from a server.
*/
conn->request_ocsp_status = config->ocsp_status_requested_by_user;
if (config->ocsp_status_requested_by_s2n && conn->mode == S2N_CLIENT) { |
codereview_new_cpp_data_6264 | int s2n_connection_set_config(struct s2n_connection *conn, struct s2n_config *co
conn->multirecord_send = true;
}
- /* Users can enable OCSP status requests via s2n_config_set_status_request_type.
- * To ensure backwards compatibility, s2n_config_set_verification_ca_location can
- * also enable OCSP status requests if called on a client. This behavior can be
- * avoided if s2n_config_set_verification_ca_location is called on a server, since
- * s2n-tls did not initially support sending an OCSP status request from a server.
*/
conn->request_ocsp_status = config->ocsp_status_requested_by_user;
if (config->ocsp_status_requested_by_s2n && conn->mode == S2N_CLIENT) {
As I was writing the above suggested documentation, I realized that we might have a problem: are we maintaining the old ordering behavior?
Like what about this sequence:
1. s2n_config_set_verification_ca_location: old=S2N_STATUS_REQUEST_OCSP, new=(ocsp_status_requested_by_user=false, ocsp_status_requested_by_s2n=true)
2. s2n_config_set_status_request_type(S2N_STATUS_REQUEST_NONE): old=S2N_STATUS_REQUEST_NONE, new=(ocsp_status_requested_by_user=false, ocsp_status_requested_by_s2n=true)
So with the old logic, ocsp would be disabled. But with the new logic, ocsp would be enabled.
int s2n_connection_set_config(struct s2n_connection *conn, struct s2n_config *co
conn->multirecord_send = true;
}
+ /* Historically, calling s2n_config_set_verification_ca_location enabled OCSP stapling
+ * regardless of the value set by an application calling s2n_config_set_status_request_type.
+ * We maintain this behavior for backwards compatibility.
+ *
+ * However, the s2n_config_set_verification_ca_location behavior predates client authentication
+ * support for OCSP stapling, so could only affect whether clients requested OCSP stapling. We
+ * therefore only have to maintain the legacy behavior for clients, not servers.
*/
conn->request_ocsp_status = config->ocsp_status_requested_by_user;
if (config->ocsp_status_requested_by_s2n && conn->mode == S2N_CLIENT) { |
codereview_new_cpp_data_6265 | int s2n_set_cipher_as_client(struct s2n_connection *conn, uint8_t wire[S2N_TLS_C
static int s2n_wire_ciphers_contain(const uint8_t *match, const uint8_t *wire, uint32_t count, uint32_t cipher_suite_len)
{
- for (size_t i = 0; i < count; i++) {
const uint8_t *theirs = wire + (i * cipher_suite_len) + (cipher_suite_len - S2N_TLS_CIPHER_SUITE_LEN);
if (!memcmp(match, theirs, S2N_TLS_CIPHER_SUITE_LEN)) {
This should probably be a `uint32_t`. I'm concerned about the `i < count` comparison: before the change the types matched, now they don't.
Is this broken by `-Wsign-compare-check`? What line does the compiler warn about if so?
int s2n_set_cipher_as_client(struct s2n_connection *conn, uint8_t wire[S2N_TLS_C
static int s2n_wire_ciphers_contain(const uint8_t *match, const uint8_t *wire, uint32_t count, uint32_t cipher_suite_len)
{
+ for (size_t i = 0; i < (size_t) count; i++) {
const uint8_t *theirs = wire + (i * cipher_suite_len) + (cipher_suite_len - S2N_TLS_CIPHER_SUITE_LEN);
if (!memcmp(match, theirs, S2N_TLS_CIPHER_SUITE_LEN)) { |
codereview_new_cpp_data_6266 | S2N_RESULT s2n_early_data_record_bytes(struct s2n_connection *conn, ssize_t data
if (data_len < 0 || !s2n_is_early_data_io(conn)) {
return S2N_RESULT_OK;
}
/* Ensure the bytes read are within the bounds of what we can actually record. */
if ((size_t) data_len > (UINT64_MAX - conn->early_data_bytes)) {
`ssize_t` can be negative so we should check before casting to an unsigned value
```suggestion
RESULT_ENSURE_GTE(data_len, 0);
if ((size_t) data_len > (UINT64_MAX - conn->early_data_bytes)) {
```
S2N_RESULT s2n_early_data_record_bytes(struct s2n_connection *conn, ssize_t data
if (data_len < 0 || !s2n_is_early_data_io(conn)) {
return S2N_RESULT_OK;
}
+ /* Check to ensure data_len is non-negative */
+ RESULT_ENSURE_GTE(data_len, 0);
/* Ensure the bytes read are within the bounds of what we can actually record. */
if ((size_t) data_len > (UINT64_MAX - conn->early_data_bytes)) { |
codereview_new_cpp_data_6267 | S2N_RESULT s2n_protocol_preferences_set(struct s2n_blob *application_protocols,
*/
RESULT_GUARD_POSIX(s2n_realloc(&new_protocols, 0));
for (size_t i = 0; i < (size_t) protocol_count; i++) {
const uint8_t *protocol = (const uint8_t *) protocols[i];
size_t length = strlen(protocols[i]);
```suggestion
RESULT_ENSURE_GTE(protocol_count, 0);
for (size_t i = 0; i < (size_t) protocol_count; i++) {
```
S2N_RESULT s2n_protocol_preferences_set(struct s2n_blob *application_protocols,
*/
RESULT_GUARD_POSIX(s2n_realloc(&new_protocols, 0));
+ /* Check to ensure protocol_count is non-negative */
+ RESULT_ENSURE_GTE(protocol_count, 0);
+
for (size_t i = 0; i < (size_t) protocol_count; i++) {
const uint8_t *protocol = (const uint8_t *) protocols[i];
size_t length = strlen(protocols[i]); |
codereview_new_cpp_data_6268 | S2N_RESULT s2n_record_write(struct s2n_connection *conn, uint8_t content_type, s
iov.iov_len = in->size;
int written = s2n_record_writev(conn, content_type, &iov, 1, 0, in->size);
RESULT_GUARD_POSIX(written);
- RESULT_ENSURE(written == (int) in->size, S2N_ERR_FRAGMENT_LENGTH_TOO_LARGE);
return S2N_RESULT_OK;
}
better to cast to the unsigned value since we've already validated that it's non-negative from the line above
```suggestion
RESULT_ENSURE((uint32_t) written == in->size, S2N_ERR_FRAGMENT_LENGTH_TOO_LARGE);
```
S2N_RESULT s2n_record_write(struct s2n_connection *conn, uint8_t content_type, s
iov.iov_len = in->size;
int written = s2n_record_writev(conn, content_type, &iov, 1, 0, in->size);
RESULT_GUARD_POSIX(written);
+ RESULT_ENSURE((uint32_t) written == in->size, S2N_ERR_FRAGMENT_LENGTH_TOO_LARGE);
return S2N_RESULT_OK;
} |
codereview_new_cpp_data_6269 | int s2n_connection_get_session(struct s2n_connection *conn, uint8_t *session, si
POSIX_ENSURE_REF(session);
const int len = s2n_connection_get_session_length(conn);
const size_t size = len;
- if (len == 0) {
return 0;
}
- POSIX_ENSURE(size < max_length, S2N_ERR_SERIALIZED_SESSION_STATE_TOO_LONG);
struct s2n_blob serialized_data = { 0 };
POSIX_GUARD(s2n_blob_init(&serialized_data, session, len));
```suggestion
POSIX_GUARD(len);
const size_t size = len;
```
int s2n_connection_get_session(struct s2n_connection *conn, uint8_t *session, si
POSIX_ENSURE_REF(session);
const int len = s2n_connection_get_session_length(conn);
+ POSIX_GUARD(len);
const size_t size = len;
+ if (size == 0) {
return 0;
}
+ POSIX_ENSURE(size <= max_length, S2N_ERR_SERIALIZED_SESSION_STATE_TOO_LONG);
struct s2n_blob serialized_data = { 0 };
POSIX_GUARD(s2n_blob_init(&serialized_data, session, len)); |
codereview_new_cpp_data_6270 | int s2n_connection_get_session(struct s2n_connection *conn, uint8_t *session, si
POSIX_ENSURE_REF(session);
const int len = s2n_connection_get_session_length(conn);
const size_t size = len;
- if (len == 0) {
return 0;
}
- POSIX_ENSURE(size < max_length, S2N_ERR_SERIALIZED_SESSION_STATE_TOO_LONG);
struct s2n_blob serialized_data = { 0 };
POSIX_GUARD(s2n_blob_init(&serialized_data, session, len));
```suggestion
if (size == 0) {
```
int s2n_connection_get_session(struct s2n_connection *conn, uint8_t *session, si
POSIX_ENSURE_REF(session);
const int len = s2n_connection_get_session_length(conn);
+ POSIX_GUARD(len);
const size_t size = len;
+ if (size == 0) {
return 0;
}
+ POSIX_ENSURE(size <= max_length, S2N_ERR_SERIALIZED_SESSION_STATE_TOO_LONG);
struct s2n_blob serialized_data = { 0 };
POSIX_GUARD(s2n_blob_init(&serialized_data, session, len)); |
codereview_new_cpp_data_6271 | static S2N_RESULT s2n_verify_host_information_common_name(struct s2n_connection
*cn_found = true;
char peer_cn[255] = { 0 };
- uint32_t len = ASN1_STRING_length(common_name);
-
- RESULT_ENSURE_GT(len, 0);
RESULT_ENSURE_LTE(len, s2n_array_len(peer_cn) - 1);
RESULT_CHECKED_MEMCPY(peer_cn, ASN1_STRING_data(common_name), len);
RESULT_ENSURE(conn->verify_host_fn(peer_cn, len, conn->data_for_verify_host), S2N_ERR_CERT_UNTRUSTED);
cast after you check
```suggestion
int cn_len = ASN1_STRING_length(common_name);
RESULT_ENSURE_GT(cn_len, 0);
uint32_t len = (uint32_t) cn_len;
```
static S2N_RESULT s2n_verify_host_information_common_name(struct s2n_connection
*cn_found = true;
char peer_cn[255] = { 0 };
+ int cn_len = ASN1_STRING_length(common_name);
+ RESULT_ENSURE_GT(cn_len, 0);
+ uint32_t len = (uint32_t) cn_len;
RESULT_ENSURE_LTE(len, s2n_array_len(peer_cn) - 1);
RESULT_CHECKED_MEMCPY(peer_cn, ASN1_STRING_data(common_name), len);
RESULT_ENSURE(conn->verify_host_fn(peer_cn, len, conn->data_for_verify_host), S2N_ERR_CERT_UNTRUSTED); |
codereview_new_cpp_data_6272 | int s2n_set_cipher_as_client(struct s2n_connection *conn, uint8_t wire[S2N_TLS_C
static int s2n_wire_ciphers_contain(const uint8_t *match, const uint8_t *wire, uint32_t count, uint32_t cipher_suite_len)
{
- for (size_t i = 0; i < (size_t) count; i++) {
const uint8_t *theirs = wire + (i * cipher_suite_len) + (cipher_suite_len - S2N_TLS_CIPHER_SUITE_LEN);
if (!memcmp(match, theirs, S2N_TLS_CIPHER_SUITE_LEN)) {
Doesn't the sign-compare check only apply if the signs are different? It doesn't seem like this cast is needed, but I could be wrong. If there is a reason to make this cast, wouldn't a lot of the other for loops also have to be updated?
int s2n_set_cipher_as_client(struct s2n_connection *conn, uint8_t wire[S2N_TLS_C
static int s2n_wire_ciphers_contain(const uint8_t *match, const uint8_t *wire, uint32_t count, uint32_t cipher_suite_len)
{
+ for (size_t i = 0; i < count; i++) {
const uint8_t *theirs = wire + (i * cipher_suite_len) + (cipher_suite_len - S2N_TLS_CIPHER_SUITE_LEN);
if (!memcmp(match, theirs, S2N_TLS_CIPHER_SUITE_LEN)) { |
codereview_new_cpp_data_6273 | int s2n_connection_get_peer_cert_chain(const struct s2n_connection *conn, struct
int cert_count = sk_X509_num(cert_chain_validated);
- /* Check to ensure cert_count is non-negative before casting */
POSIX_ENSURE_GTE(cert_count, 0);
for (size_t cert_idx = 0; cert_idx < (size_t) cert_count; cert_idx++) {
nit: I'm not really sure if these comments add very much. If the check comes right after the call, it might be clear enough what the check is for.
```suggestion
POSIX_ENSURE_GTE(cert_count, 0);
```
int s2n_connection_get_peer_cert_chain(const struct s2n_connection *conn, struct
int cert_count = sk_X509_num(cert_chain_validated);
POSIX_ENSURE_GTE(cert_count, 0);
for (size_t cert_idx = 0; cert_idx < (size_t) cert_count; cert_idx++) { |
codereview_new_cpp_data_6274 | S2N_RESULT s2n_protocol_preferences_set(struct s2n_blob *application_protocols,
*/
RESULT_GUARD_POSIX(s2n_realloc(&new_protocols, 0));
- /* Check to ensure protocol_count is non-negative */
RESULT_ENSURE_GTE(protocol_count, 0);
for (size_t i = 0; i < (size_t) protocol_count; i++) {
nit: Same comment on the comment. If the check comes right before where you cast it, it might be clear enough without the comment.
```suggestion
RESULT_ENSURE_GTE(protocol_count, 0);
```
S2N_RESULT s2n_protocol_preferences_set(struct s2n_blob *application_protocols,
*/
RESULT_GUARD_POSIX(s2n_realloc(&new_protocols, 0));
RESULT_ENSURE_GTE(protocol_count, 0);
for (size_t i = 0; i < (size_t) protocol_count; i++) { |
codereview_new_cpp_data_6275 | S2N_RESULT s2n_protocol_preferences_set(struct s2n_blob *application_protocols,
* s2n_realloc will just update the size field here
*/
RESULT_GUARD_POSIX(s2n_realloc(&new_protocols, 0));
-
RESULT_ENSURE_GTE(protocol_count, 0);
for (size_t i = 0; i < (size_t) protocol_count; i++) {
nit
```suggestion
RESULT_ENSURE_GTE(protocol_count, 0);
for (size_t i = 0; i < (size_t) protocol_count; i++) {
```
S2N_RESULT s2n_protocol_preferences_set(struct s2n_blob *application_protocols,
* s2n_realloc will just update the size field here
*/
RESULT_GUARD_POSIX(s2n_realloc(&new_protocols, 0));
RESULT_ENSURE_GTE(protocol_count, 0);
for (size_t i = 0; i < (size_t) protocol_count; i++) { |
codereview_new_cpp_data_6276 | int main(int argc, char **argv)
}
uint16_t prime = 257;
- for (uint32_t i = 0; i < (size_t) 0xFFFFFF - prime; i += prime) {
EXPECT_SUCCESS(s2n_stuffer_write_network_order(&stuffer, i, byte_length));
EXPECT_SUCCESS(s2n_stuffer_read_uint24(&stuffer, &actual_value));
EXPECT_EQUAL(i, actual_value);
Should this be `(uint32_t) 0xFF`...?
int main(int argc, char **argv)
}
uint16_t prime = 257;
+ for (uint32_t i = 0; i < (uint32_t) 0xFFFFFF - prime; i += prime) {
EXPECT_SUCCESS(s2n_stuffer_write_network_order(&stuffer, i, byte_length));
EXPECT_SUCCESS(s2n_stuffer_read_uint24(&stuffer, &actual_value));
EXPECT_EQUAL(i, actual_value); |
codereview_new_cpp_data_6277 | int s2n_verify_cbc(struct s2n_connection *conn, struct s2n_hmac_state *hmac, str
/* Check the maximum amount that could theoretically be padding */
uint32_t check = MIN(255, (payload_and_padding_size - 1));
uint32_t cutoff = check - padding_length;
for (size_t i = 0, j = decrypted->size - 1 - check; i < check && j < decrypted->size; i++, j++) {
uint8_t mask = ~(0xff << ((i >= cutoff) * 8));
Should these be `int32_t`? Logically the amount of padding should be non-negative, but none-the-less the subtraction here scares me. Is there anyway for cutoff or check to go negative?
int s2n_verify_cbc(struct s2n_connection *conn, struct s2n_hmac_state *hmac, str
/* Check the maximum amount that could theoretically be padding */
uint32_t check = MIN(255, (payload_and_padding_size - 1));
+ /*Check to ensure check >= padding_length*/
+ POSIX_ENSURE_GTE(check, padding_length);
+
uint32_t cutoff = check - padding_length;
for (size_t i = 0, j = decrypted->size - 1 - check; i < check && j < decrypted->size; i++, j++) {
uint8_t mask = ~(0xff << ((i >= cutoff) * 8)); |
codereview_new_cpp_data_6278 | int main(int argc, char **argv)
EXPECT_BYTEARRAY_EQUAL(client_hello->extensions.raw.data, client_extensions, client_extensions_len);
/* Verify s2n_client_hello_get_extensions_length correct */
- EXPECT_EQUAL(s2n_client_hello_get_extensions_length(client_hello), (ssize_t) client_extensions_len);
/* Verify s2n_client_hello_get_extensions correct */
uint8_t *extensions_out;
/* Verify s2n_client_hello_get_extensions retrieves the full cipher_suites when its len <= max_len */
EXPECT_TRUE(client_hello->extensions.raw.size < S2N_LARGE_RECORD_LENGTH);
EXPECT_NOT_NULL(extensions_out = malloc(S2N_LARGE_RECORD_LENGTH));
- EXPECT_EQUAL((ssize_t) client_extensions_len, s2n_client_hello_get_extensions(client_hello, extensions_out, S2N_LARGE_RECORD_LENGTH));
EXPECT_BYTEARRAY_EQUAL(extensions_out, client_extensions, client_extensions_len);
free(extensions_out);
extensions_out = NULL;
Is the cast here required? Looks like you changed `client_extensions_len` to be an `ssize_t` and `s2n_client_hello_get_extensions_length` returns an `ssize_t`. Same on line 1015.
int main(int argc, char **argv)
EXPECT_BYTEARRAY_EQUAL(client_hello->extensions.raw.data, client_extensions, client_extensions_len);
/* Verify s2n_client_hello_get_extensions_length correct */
+ EXPECT_EQUAL(s2n_client_hello_get_extensions_length(client_hello), client_extensions_len);
/* Verify s2n_client_hello_get_extensions correct */
uint8_t *extensions_out;
/* Verify s2n_client_hello_get_extensions retrieves the full cipher_suites when its len <= max_len */
EXPECT_TRUE(client_hello->extensions.raw.size < S2N_LARGE_RECORD_LENGTH);
EXPECT_NOT_NULL(extensions_out = malloc(S2N_LARGE_RECORD_LENGTH));
+ EXPECT_EQUAL(client_extensions_len, s2n_client_hello_get_extensions(client_hello, extensions_out, S2N_LARGE_RECORD_LENGTH));
EXPECT_BYTEARRAY_EQUAL(extensions_out, client_extensions, client_extensions_len);
free(extensions_out);
extensions_out = NULL; |
codereview_new_cpp_data_6279 | int s2n_fd_set_non_blocking(int fd)
static int buffer_read(void *io_context, uint8_t *buf, uint32_t len)
{
struct s2n_stuffer *in_buf;
- uint32_t n_read, n_avail;
if (buf == NULL) {
return 0;
Do we have concerns about masking internal errors here?
It should never be the case that read_cursor > write_cursor, but by declaring these variables uint32_t we invoke undefined behavior in that error case.
```
#define s2n_stuffer_data_available(s) ((s)->write_cursor - (s)->read_cursor)
```
int s2n_fd_set_non_blocking(int fd)
static int buffer_read(void *io_context, uint8_t *buf, uint32_t len)
{
struct s2n_stuffer *in_buf;
+ int n_read, n_avail;
if (buf == NULL) {
return 0; |
codereview_new_cpp_data_6280 | int s2n_io_pair_shutdown_one_end(struct s2n_test_io_pair *io_pair, int mode_to_c
void s2n_print_connection(struct s2n_connection *conn, const char *marker)
{
- size_t i;
printf("marker: %s\n", marker);
printf("HEADER IN Stuffer (write: %d, read: %d, size: %d)\n", conn->header_in.write_cursor, conn->header_in.read_cursor, conn->header_in.blob.size);
initialize this while you're here 😄
```suggestion
size_t i = 0;
```
int s2n_io_pair_shutdown_one_end(struct s2n_test_io_pair *io_pair, int mode_to_c
void s2n_print_connection(struct s2n_connection *conn, const char *marker)
{
+ size_t i = 0;
printf("marker: %s\n", marker);
printf("HEADER IN Stuffer (write: %d, read: %d, size: %d)\n", conn->header_in.write_cursor, conn->header_in.read_cursor, conn->header_in.blob.size); |
codereview_new_cpp_data_6281 | int s2n_verify_cbc(struct s2n_connection *conn, struct s2n_hmac_state *hmac, str
/* Check the maximum amount that could theoretically be padding */
uint32_t check = MIN(255, (payload_and_padding_size - 1));
- /*Check to ensure check >= padding_length*/
POSIX_ENSURE_GTE(check, padding_length);
uint32_t cutoff = check - padding_length;
Nit spaces.
int s2n_verify_cbc(struct s2n_connection *conn, struct s2n_hmac_state *hmac, str
/* Check the maximum amount that could theoretically be padding */
uint32_t check = MIN(255, (payload_and_padding_size - 1));
+ /* Check to ensure check >= padding_length */
POSIX_ENSURE_GTE(check, padding_length);
uint32_t cutoff = check - padding_length; |
codereview_new_cpp_data_6282 | static int s2n_cbc_cipher_3des_encrypt(struct s2n_session_key *key, struct s2n_b
/* len is set by EVP_EncryptUpdate and checked post operation */
int len = 0;
POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT);
- S2N_ERROR_IF(len != (int) in->size, S2N_ERR_ENCRYPT);
return 0;
}
We should probably upcast to a `int64_t` here, in case things wrap.
```suggestion
POSIX_ENSURE((int64_t) len == (int64_t) in->size, S2N_ERR_ENCRYPT);
```
static int s2n_cbc_cipher_3des_encrypt(struct s2n_session_key *key, struct s2n_b
/* len is set by EVP_EncryptUpdate and checked post operation */
int len = 0;
POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT);
+ POSIX_ENSURE((int64_t)len == (int64_t) in->size, S2N_ERR_ENCRYPT);
return 0;
} |
codereview_new_cpp_data_6283 | static int s2n_cbc_cipher_aes_encrypt(struct s2n_session_key *key, struct s2n_bl
/* len is set by EVP_EncryptUpdate and checked post operation */
int len = 0;
POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT);
- S2N_ERROR_IF(len != (int) in->size, S2N_ERR_ENCRYPT);
return 0;
}
```suggestion
POSIX_ENSURE((int64_t) len == (int64_t) in->size, S2N_ERR_ENCRYPT);
```
static int s2n_cbc_cipher_aes_encrypt(struct s2n_session_key *key, struct s2n_bl
/* len is set by EVP_EncryptUpdate and checked post operation */
int len = 0;
POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT);
+ POSIX_ENSURE((int64_t) len == (int64_t) in->size, S2N_ERR_ENCRYPT);
return 0;
} |
codereview_new_cpp_data_6284 | static int s2n_parse_x509_extension(struct s2n_cert *cert, const uint8_t *oid,
/* ASN1_STRING_length() returns the length of the content of `asn1_str`.
* Ref: https://www.openssl.org/docs/man1.1.0/man3/ASN1_STRING_length.html.
*/
- uint32_t len = ASN1_STRING_length(asn1_str);
if (ext_value != NULL) {
POSIX_ENSURE(*ext_value_len >= len, S2N_ERR_INSUFFICIENT_MEM_SIZE);
/* ASN1_STRING_data() returns an internal pointer to the data.
This returns a `int` so it's best to keep it signed, or at least make sure it's non-negative before casting it.
static int s2n_parse_x509_extension(struct s2n_cert *cert, const uint8_t *oid,
/* ASN1_STRING_length() returns the length of the content of `asn1_str`.
* Ref: https://www.openssl.org/docs/man1.1.0/man3/ASN1_STRING_length.html.
*/
+ int len = ASN1_STRING_length(asn1_str);
if (ext_value != NULL) {
POSIX_ENSURE(*ext_value_len >= len, S2N_ERR_INSUFFICIENT_MEM_SIZE);
/* ASN1_STRING_data() returns an internal pointer to the data. |
codereview_new_cpp_data_6285 | static int s2n_rsa_encrypt(const struct s2n_pkey *pub, struct s2n_blob *in, stru
/* Safety: RSA_public_encrypt does not mutate the key */
int r = RSA_public_encrypt(in->size, (unsigned char *) in->data, (unsigned char *) out->data,
s2n_unsafe_rsa_get_non_const(pub_key), RSA_PKCS1_PADDING);
- POSIX_ENSURE((int64_t) r == out->size, S2N_ERR_SIZE_MISMATCH);
return 0;
}
Sorry if I'm missing something here, but if `r` is cast to `int64` but `out->size` is unsigned, won't `r` still be cast to unsigned when comparing? And shouldn't the compiler still complain about this with the sign-compare check?
static int s2n_rsa_encrypt(const struct s2n_pkey *pub, struct s2n_blob *in, stru
/* Safety: RSA_public_encrypt does not mutate the key */
int r = RSA_public_encrypt(in->size, (unsigned char *) in->data, (unsigned char *) out->data,
s2n_unsafe_rsa_get_non_const(pub_key), RSA_PKCS1_PADDING);
+ POSIX_ENSURE((int64_t) r == (int64_t) out->size, S2N_ERR_SIZE_MISMATCH);
return 0;
} |
codereview_new_cpp_data_6286 | static int s2n_parse_x509_extension(struct s2n_cert *cert, const uint8_t *oid,
* X509_get_ext_count returns the number of extensions in the x509 certificate.
* Ref: https://www.openssl.org/docs/man1.1.0/man3/X509_get_ext_count.html.
*/
- size_t ext_count = X509_get_ext_count(x509_cert);
- POSIX_ENSURE_GT(ext_count, 0);
/* OBJ_txt2obj() converts the input text string into an ASN1_OBJECT structure.
* If no_name is 0 then long names and short names will be interpreted as well as numerical forms.
check before you cast
```suggestion
int ext_count_value = X509_get_ext_count(x509_cert);
POSIX_ENSURE_GT(ext_count_value, 0);
size_t ext_count = (size_t) ext_count_value;
```
static int s2n_parse_x509_extension(struct s2n_cert *cert, const uint8_t *oid,
* X509_get_ext_count returns the number of extensions in the x509 certificate.
* Ref: https://www.openssl.org/docs/man1.1.0/man3/X509_get_ext_count.html.
*/
+ int ext_count_value = X509_get_ext_count(x509_cert);
+ POSIX_ENSURE_GT(ext_count_value, 0);
+ size_t ext_count = (size_t) ext_count_value;
/* OBJ_txt2obj() converts the input text string into an ASN1_OBJECT structure.
* If no_name is 0 then long names and short names will be interpreted as well as numerical forms. |
codereview_new_cpp_data_6287 |
/* The valid_public_key in the corpus directory was generated by taking the first public
* key (count = 0) from kyber_r3.kat and prepending KYBER_512_R3_PUBLIC_KEY_BYTES as two
* hex-encoded bytes. This is how we would expect it to appear on the wire. */
-static struct s2n_kem_params kem_params = { .kem = &s2n_kyber_512_r3, .len_prefixed = true };
int s2n_fuzz_test(const uint8_t *buf, size_t len) {
- POSIX_GUARD(s2n_kem_recv_public_key_fuzz_test(buf, len, &kem_params));
return S2N_SUCCESS;
}
For all of the fuzz tests, does it make sense to have versions for `len_prefixed` both `true` and `false` ?
/* The valid_public_key in the corpus directory was generated by taking the first public
* key (count = 0) from kyber_r3.kat and prepending KYBER_512_R3_PUBLIC_KEY_BYTES as two
* hex-encoded bytes. This is how we would expect it to appear on the wire. */
+static struct s2n_kem_params kyber_r3_draft0_params = { .kem = &s2n_kyber_512_r3, .len_prefixed = true };
+static struct s2n_kem_params kyber_r3_draft5_params = { .kem = &s2n_kyber_512_r3, .len_prefixed = false };
int s2n_fuzz_test(const uint8_t *buf, size_t len) {
+ POSIX_GUARD(s2n_kem_recv_public_key_fuzz_test(buf, len, &kyber_r3_draft0_params));
+ POSIX_GUARD(s2n_kem_recv_public_key_fuzz_test(buf, len, &kyber_r3_draft5_params));
return S2N_SUCCESS;
}
|
codereview_new_cpp_data_6288 |
* A valid ciphertext to provide to s2n_kem_recv_ciphertext (as it would have appeared on
* the wire) was generated by taking the corresponding KAT ciphertext (count = 0) and
* prepending KYBER_512_R3_CIPHERTEXT_BYTES as two hex-encoded bytes. */
-static struct s2n_kem_params kem_params = { .kem = &s2n_kyber_512_r3, .len_prefixed = true };
int s2n_fuzz_init(int *argc, char **argv[])
{
- POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test_init(KAT_FILE_NAME, &kem_params));
return S2N_SUCCESS;
}
int s2n_fuzz_test(const uint8_t *buf, size_t len)
{
- POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test(buf, len, &kem_params));
return S2N_SUCCESS;
}
static void s2n_fuzz_cleanup()
{
- s2n_kem_free(&kem_params);
}
S2N_FUZZ_TARGET(s2n_fuzz_init, s2n_fuzz_test, s2n_fuzz_cleanup)
This test is Kyber specific and TLS version agnostic. So, I think it probably makes sense to test both `true` and `false`.
* A valid ciphertext to provide to s2n_kem_recv_ciphertext (as it would have appeared on
* the wire) was generated by taking the corresponding KAT ciphertext (count = 0) and
* prepending KYBER_512_R3_CIPHERTEXT_BYTES as two hex-encoded bytes. */
+static struct s2n_kem_params kyber_r3_draft0_params = { .kem = &s2n_kyber_512_r3, .len_prefixed = true };
+static struct s2n_kem_params kyber_r3_draft5_params = { .kem = &s2n_kyber_512_r3, .len_prefixed = false };
int s2n_fuzz_init(int *argc, char **argv[])
{
+ POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test_init(KAT_FILE_NAME, &kyber_r3_draft0_params));
+ POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test_init(KAT_FILE_NAME, &kyber_r3_draft5_params));
return S2N_SUCCESS;
}
int s2n_fuzz_test(const uint8_t *buf, size_t len)
{
+ POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test(buf, len, &kyber_r3_draft0_params));
+ POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test(buf, len, &kyber_r3_draft5_params));
return S2N_SUCCESS;
}
static void s2n_fuzz_cleanup()
{
+ s2n_kem_free(&kyber_r3_draft0_params);
+ s2n_kem_free(&kyber_r3_draft5_params);
}
S2N_FUZZ_TARGET(s2n_fuzz_init, s2n_fuzz_test, s2n_fuzz_cleanup)
+ |
codereview_new_cpp_data_6289 | bool s2n_kem_preferences_includes_tls13_kem_group(const struct s2n_kem_preferenc
return false;
}
-/* Whether the client should include the length prefix in the PQ TLS 1.3 KEM KeyShares that it sends. Earlier drafts of
- * the PQ TLS 1.3 standard required length prefixing, and later drafts removed this length prefix. To not break
* backwards compatibility, we check what revision of the draft standard is configured to determine whether to send it. */
-bool s2n_tls13_client_prefers_hybrid_kem_length_prefix(const struct s2n_kem_preferences *kem_pref)
{
return kem_pref && (kem_pref->tls13_pq_hybrid_draft_revision == 0);
}
Suggest being specific about referencing the draft versions; `... draft 0 ...` and `... drafts 1-5 ...`. "Earlier drafts" and "later drafts" don't answer the possible question of why we're checking explicitly for `kem_pref->tls13_pq_hybrid_draft_revision == 0`.
bool s2n_kem_preferences_includes_tls13_kem_group(const struct s2n_kem_preferenc
return false;
}
+/* Whether the client must include the length prefix in the PQ TLS 1.3 KEM KeyShares that it sends. Draft 0 of
+ * the PQ TLS 1.3 standard required length prefixing, and drafts 1-5 removed this length prefix. To not break
* backwards compatibility, we check what revision of the draft standard is configured to determine whether to send it. */
+bool s2n_tls13_client_must_use_hybrid_kem_length_prefix(const struct s2n_kem_preferences *kem_pref)
{
return kem_pref && (kem_pref->tls13_pq_hybrid_draft_revision == 0);
} |
codereview_new_cpp_data_6290 | static int s2n_server_key_share_recv_pq_hybrid(struct s2n_connection *conn, uint
POSIX_ENSURE(s2n_stuffer_data_available(extension) == actual_hybrid_share_size, S2N_ERR_BAD_KEY_SHARE);
struct s2n_kem_params *client_kem_params = &conn->kex_params.client_kem_group_params.kem_params;
- POSIX_GUARD(s2n_is_tls13_hybrid_kem_length_prefixed(S2N_SERVER, actual_hybrid_share_size, server_kem_group_params->kem_group, &client_kem_params->len_prefixed));
/* Parse ECC key share */
uint16_t expected_ecc_share_size = server_kem_group_params->kem_group->curve->share_size;
See other comments about the `mode` argument. Here is the client calling this function with `mode = S2N_SERVER`. At the very least, I think the argument needs a more descriptive name than simply `mode`. But I don't see why the client can't just check it's own local `kem_params` to determine whether it sent a length prefixed key share in the first place.
static int s2n_server_key_share_recv_pq_hybrid(struct s2n_connection *conn, uint
POSIX_ENSURE(s2n_stuffer_data_available(extension) == actual_hybrid_share_size, S2N_ERR_BAD_KEY_SHARE);
struct s2n_kem_params *client_kem_params = &conn->kex_params.client_kem_group_params.kem_params;
+
+ /* Don't need to call s2n_is_tls13_hybrid_kem_length_prefixed() to set client_kem_params->len_prefixed since we are
+ * the client, and server-side should auto-detect hybrid share size and match our behavior. */
/* Parse ECC key share */
uint16_t expected_ecc_share_size = server_kem_group_params->kem_group->curve->share_size; |
codereview_new_cpp_data_6291 | const struct s2n_kem_group *ALL_SUPPORTED_KEM_GROUPS[S2N_SUPPORTED_KEM_GROUPS_CO
* The old format is used by draft 0 of the Hybrid PQ TLS 1.3 specification, and all revisions of the Hybrid PQ TLS 1.2
* draft specification. Only draft revisions 1-5 of the Hybrid PQ TLS 1.3 specification use the new format.
*/
-int s2n_is_tls13_hybrid_kem_length_prefixed(uint16_t actual_hybrid_share_size, const struct s2n_kem_group *kem_group, bool *is_length_prefixed)
{
- POSIX_ENSURE_REF(kem_group);
- POSIX_ENSURE_REF(kem_group->curve);
- POSIX_ENSURE_REF(kem_group->kem);
- POSIX_ENSURE_REF(is_length_prefixed);
uint16_t unprefixed_hybrid_share_size = kem_group->curve->share_size + kem_group->kem->public_key_length;
uint16_t prefixed_hybrid_share_size = (2 * S2N_SIZE_OF_KEY_SHARE_SIZE) + unprefixed_hybrid_share_size;
- POSIX_ENSURE((actual_hybrid_share_size == unprefixed_hybrid_share_size)
|| (actual_hybrid_share_size == prefixed_hybrid_share_size),
S2N_ERR_BAD_KEY_SHARE);
- if (actual_hybrid_share_size == prefixed_hybrid_share_size) {
- *is_length_prefixed = true;
- } else {
- *is_length_prefixed = false;
- }
- return S2N_SUCCESS;
}
S2N_RESULT s2n_kem_generate_keypair(struct s2n_kem_params *kem_params)
This entire function is much more readable :)
const struct s2n_kem_group *ALL_SUPPORTED_KEM_GROUPS[S2N_SUPPORTED_KEM_GROUPS_CO
* The old format is used by draft 0 of the Hybrid PQ TLS 1.3 specification, and all revisions of the Hybrid PQ TLS 1.2
* draft specification. Only draft revisions 1-5 of the Hybrid PQ TLS 1.3 specification use the new format.
*/
+S2N_RESULT s2n_is_tls13_hybrid_kem_length_prefixed(uint16_t actual_hybrid_share_size, const struct s2n_kem_group *kem_group, bool *is_length_prefixed)
{
+ RESULT_ENSURE_REF(kem_group);
+ RESULT_ENSURE_REF(kem_group->curve);
+ RESULT_ENSURE_REF(kem_group->kem);
+ RESULT_ENSURE_REF(is_length_prefixed);
uint16_t unprefixed_hybrid_share_size = kem_group->curve->share_size + kem_group->kem->public_key_length;
uint16_t prefixed_hybrid_share_size = (2 * S2N_SIZE_OF_KEY_SHARE_SIZE) + unprefixed_hybrid_share_size;
+ RESULT_ENSURE((actual_hybrid_share_size == unprefixed_hybrid_share_size)
|| (actual_hybrid_share_size == prefixed_hybrid_share_size),
S2N_ERR_BAD_KEY_SHARE);
+ *is_length_prefixed = (actual_hybrid_share_size == prefixed_hybrid_share_size);
+ return S2N_RESULT_OK;
}
S2N_RESULT s2n_kem_generate_keypair(struct s2n_kem_params *kem_params) |
codereview_new_cpp_data_6292 | const struct s2n_kem_group *ALL_SUPPORTED_KEM_GROUPS[S2N_SUPPORTED_KEM_GROUPS_CO
* The old format is used by draft 0 of the Hybrid PQ TLS 1.3 specification, and all revisions of the Hybrid PQ TLS 1.2
* draft specification. Only draft revisions 1-5 of the Hybrid PQ TLS 1.3 specification use the new format.
*/
-int s2n_is_tls13_hybrid_kem_length_prefixed(uint16_t actual_hybrid_share_size, const struct s2n_kem_group *kem_group, bool *is_length_prefixed)
{
- POSIX_ENSURE_REF(kem_group);
- POSIX_ENSURE_REF(kem_group->curve);
- POSIX_ENSURE_REF(kem_group->kem);
- POSIX_ENSURE_REF(is_length_prefixed);
uint16_t unprefixed_hybrid_share_size = kem_group->curve->share_size + kem_group->kem->public_key_length;
uint16_t prefixed_hybrid_share_size = (2 * S2N_SIZE_OF_KEY_SHARE_SIZE) + unprefixed_hybrid_share_size;
- POSIX_ENSURE((actual_hybrid_share_size == unprefixed_hybrid_share_size)
|| (actual_hybrid_share_size == prefixed_hybrid_share_size),
S2N_ERR_BAD_KEY_SHARE);
- if (actual_hybrid_share_size == prefixed_hybrid_share_size) {
- *is_length_prefixed = true;
- } else {
- *is_length_prefixed = false;
- }
- return S2N_SUCCESS;
}
S2N_RESULT s2n_kem_generate_keypair(struct s2n_kem_params *kem_params)
nit(?): not sure if it's just my browser/line wrap being weird, but the formatting and newlines on these three lines looks a bit strange and inconsistent. What I see:
```
POSIX_ENSURE((actual_hybrid_share_size == unprefixed_hybrid_share_size)
|| (actual_hybrid_share_size == prefixed_hybrid_share_size),
S2N_ERR_BAD_KEY_SHARE);
```
I think it should look something like:
```
POSIX_ENSURE(
(actual_hybrid_share_size == unprefixed_hybrid_share_size) ||
(actual_hybrid_share_size == prefixed_hybrid_share_size),
S2N_ERR_BAD_KEY_SHARE
);
```
Feel free to disagree or tell me that my browser is displaying the formatting incorrectly.
const struct s2n_kem_group *ALL_SUPPORTED_KEM_GROUPS[S2N_SUPPORTED_KEM_GROUPS_CO
* The old format is used by draft 0 of the Hybrid PQ TLS 1.3 specification, and all revisions of the Hybrid PQ TLS 1.2
* draft specification. Only draft revisions 1-5 of the Hybrid PQ TLS 1.3 specification use the new format.
*/
+S2N_RESULT s2n_is_tls13_hybrid_kem_length_prefixed(uint16_t actual_hybrid_share_size, const struct s2n_kem_group *kem_group, bool *is_length_prefixed)
{
+ RESULT_ENSURE_REF(kem_group);
+ RESULT_ENSURE_REF(kem_group->curve);
+ RESULT_ENSURE_REF(kem_group->kem);
+ RESULT_ENSURE_REF(is_length_prefixed);
uint16_t unprefixed_hybrid_share_size = kem_group->curve->share_size + kem_group->kem->public_key_length;
uint16_t prefixed_hybrid_share_size = (2 * S2N_SIZE_OF_KEY_SHARE_SIZE) + unprefixed_hybrid_share_size;
+ RESULT_ENSURE((actual_hybrid_share_size == unprefixed_hybrid_share_size)
|| (actual_hybrid_share_size == prefixed_hybrid_share_size),
S2N_ERR_BAD_KEY_SHARE);
+ *is_length_prefixed = (actual_hybrid_share_size == prefixed_hybrid_share_size);
+ return S2N_RESULT_OK;
}
S2N_RESULT s2n_kem_generate_keypair(struct s2n_kem_params *kem_params) |
codereview_new_cpp_data_6293 | static int s2n_client_key_share_recv_pq_hybrid(struct s2n_connection *conn, stru
bool is_hybrid_share_length_prefixed = 0;
uint16_t actual_hybrid_share_size = key_share->blob.size;
- POSIX_GUARD(s2n_is_tls13_hybrid_kem_length_prefixed(actual_hybrid_share_size, kem_group, &is_hybrid_share_length_prefixed));
if (is_hybrid_share_length_prefixed) {
/* Ignore KEM groups with unexpected ECC share sizes */
nit: leave a comment here noting that the key share length must be one of two values (length prefixed or not).
Not nit: It looks like we were just ignoring and returning success if the key share size was unexpected. Why are we now `GUARD`ing against it?
static int s2n_client_key_share_recv_pq_hybrid(struct s2n_connection *conn, stru
bool is_hybrid_share_length_prefixed = 0;
uint16_t actual_hybrid_share_size = key_share->blob.size;
+
+ /* The length of the hybrid key share must be one of two possible lengths. It's internal values are either length
+ * prefixed, or they are not. If actual_hybrid_share_size is not one of these two lengths, then
+ * s2n_is_tls13_hybrid_kem_length_prefixed() will return an error. */
+ POSIX_GUARD_RESULT(s2n_is_tls13_hybrid_kem_length_prefixed(actual_hybrid_share_size, kem_group, &is_hybrid_share_length_prefixed));
if (is_hybrid_share_length_prefixed) {
/* Ignore KEM groups with unexpected ECC share sizes */ |
codereview_new_cpp_data_6294 |
* A valid ciphertext to provide to s2n_kem_recv_ciphertext (as it would have appeared on
* the wire) was generated by taking the corresponding KAT ciphertext (count = 0) and
* prepending KYBER_512_R3_CIPHERTEXT_BYTES as two hex-encoded bytes. */
-static struct s2n_kem_params kyber_r3_draft0_params = { .kem = &s2n_kyber_512_r3, .len_prefixed = true };
-static struct s2n_kem_params kyber_r3_draft5_params = { .kem = &s2n_kyber_512_r3, .len_prefixed = false };
int s2n_fuzz_init(int *argc, char **argv[])
{
- POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test_init(KAT_FILE_NAME, &kyber_r3_draft0_params));
- POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test_init(KAT_FILE_NAME, &kyber_r3_draft5_params));
return S2N_SUCCESS;
}
int s2n_fuzz_test(const uint8_t *buf, size_t len)
{
- POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test(buf, len, &kyber_r3_draft0_params));
- POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test(buf, len, &kyber_r3_draft5_params));
return S2N_SUCCESS;
}
static void s2n_fuzz_cleanup()
{
- s2n_kem_free(&kyber_r3_draft0_params);
- s2n_kem_free(&kyber_r3_draft5_params);
}
S2N_FUZZ_TARGET(s2n_fuzz_init, s2n_fuzz_test, s2n_fuzz_cleanup)
Same nit as above
* A valid ciphertext to provide to s2n_kem_recv_ciphertext (as it would have appeared on
* the wire) was generated by taking the corresponding KAT ciphertext (count = 0) and
* prepending KYBER_512_R3_CIPHERTEXT_BYTES as two hex-encoded bytes. */
+static struct s2n_kem_params kyber512_r3_draft0_params = { .kem = &s2n_kyber_512_r3, .len_prefixed = true };
+static struct s2n_kem_params kyber512_r3_draft5_params = { .kem = &s2n_kyber_512_r3, .len_prefixed = false };
int s2n_fuzz_init(int *argc, char **argv[])
{
+ POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test_init(KAT_FILE_NAME, &kyber512_r3_draft0_params));
+ POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test_init(KAT_FILE_NAME, &kyber512_r3_draft5_params));
return S2N_SUCCESS;
}
int s2n_fuzz_test(const uint8_t *buf, size_t len)
{
+ POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test(buf, len, &kyber512_r3_draft0_params));
+ POSIX_GUARD(s2n_kem_recv_ciphertext_fuzz_test(buf, len, &kyber512_r3_draft5_params));
return S2N_SUCCESS;
}
static void s2n_fuzz_cleanup()
{
+ s2n_kem_free(&kyber512_r3_draft0_params);
+ s2n_kem_free(&kyber512_r3_draft5_params);
}
S2N_FUZZ_TARGET(s2n_fuzz_init, s2n_fuzz_test, s2n_fuzz_cleanup) |
codereview_new_cpp_data_6295 | static int s2n_client_key_share_recv_pq_hybrid(struct s2n_connection *conn, stru
}
/* The length of the hybrid key share must be one of two possible lengths. Its internal values are either length
- * prefixed, or they are not. If actual_hybrid_share_size is not one of these two lengths, then
- * s2n_is_tls13_hybrid_kem_length_prefixed() will return an error. */
uint16_t actual_hybrid_share_size = key_share->blob.size;
uint16_t unprefixed_hybrid_share_size = kem_group->curve->share_size + kem_group->kem->public_key_length;
uint16_t prefixed_hybrid_share_size = (2 * S2N_SIZE_OF_KEY_SHARE_SIZE) + unprefixed_hybrid_share_size;
Comment is now out of date
static int s2n_client_key_share_recv_pq_hybrid(struct s2n_connection *conn, stru
}
/* The length of the hybrid key share must be one of two possible lengths. Its internal values are either length
+ * prefixed, or they are not. */
uint16_t actual_hybrid_share_size = key_share->blob.size;
uint16_t unprefixed_hybrid_share_size = kem_group->curve->share_size + kem_group->kem->public_key_length;
uint16_t prefixed_hybrid_share_size = (2 * S2N_SIZE_OF_KEY_SHARE_SIZE) + unprefixed_hybrid_share_size; |
codereview_new_cpp_data_6296 | static int s2n_client_key_share_recv_pq_hybrid(struct s2n_connection *conn, stru
}
/* The length of the hybrid key share must be one of two possible lengths. Its internal values are either length
- * prefixed, or they are not. If actual_hybrid_share_size is not one of these two lengths, then
- * s2n_is_tls13_hybrid_kem_length_prefixed() will return an error. */
uint16_t actual_hybrid_share_size = key_share->blob.size;
uint16_t unprefixed_hybrid_share_size = kem_group->curve->share_size + kem_group->kem->public_key_length;
uint16_t prefixed_hybrid_share_size = (2 * S2N_SIZE_OF_KEY_SHARE_SIZE) + unprefixed_hybrid_share_size;
Looking at the old code, could you just do prefixed_hybrid_share_size=kem_group->client_share_size? Or, if we don't use client_share_size anywhere anymore, can we remove it? Removing it might be the clearer option, since that's no longer always the size of the share.
static int s2n_client_key_share_recv_pq_hybrid(struct s2n_connection *conn, stru
}
/* The length of the hybrid key share must be one of two possible lengths. Its internal values are either length
+ * prefixed, or they are not. */
uint16_t actual_hybrid_share_size = key_share->blob.size;
uint16_t unprefixed_hybrid_share_size = kem_group->curve->share_size + kem_group->kem->public_key_length;
uint16_t prefixed_hybrid_share_size = (2 * S2N_SIZE_OF_KEY_SHARE_SIZE) + unprefixed_hybrid_share_size; |
codereview_new_cpp_data_6297 | int s2n_config_set_ktls_mode(struct s2n_config *config, s2n_ktls_mode ktls_mode)
config->ktls_send_requested = true;
break;
case S2N_KTLS_MODE_SEND:
config->ktls_send_requested = true;
break;
case S2N_KTLS_MODE_RECV:
config->ktls_recv_requested = true;
break;
case S2N_KTLS_MODE_DISABLED:
config->ktls_recv_requested = false;
I understand why you would need to check the connection to see if KTLS has actually been enabled, but why would someone need to query the config to determine if they've requested it? Or is this not a public API?
int s2n_config_set_ktls_mode(struct s2n_config *config, s2n_ktls_mode ktls_mode)
config->ktls_send_requested = true;
break;
case S2N_KTLS_MODE_SEND:
+ config->ktls_recv_requested = false;
config->ktls_send_requested = true;
break;
case S2N_KTLS_MODE_RECV:
config->ktls_recv_requested = true;
+ config->ktls_send_requested = false;
break;
case S2N_KTLS_MODE_DISABLED:
config->ktls_recv_requested = false; |
codereview_new_cpp_data_6298 | int s2n_config_set_ktls_mode(struct s2n_config *config, s2n_ktls_mode ktls_mode)
config->ktls_send_requested = true;
break;
case S2N_KTLS_MODE_SEND:
config->ktls_send_requested = true;
break;
case S2N_KTLS_MODE_RECV:
config->ktls_recv_requested = true;
break;
case S2N_KTLS_MODE_DISABLED:
config->ktls_recv_requested = false;
https://github.com/aws/s2n-tls/pull/3797/files#r1094771980 I agree that this seems like kind of odd behavior. Why do we need S2N_KTLS_MODE_DUPLEX if we can just call the method twice to enable both send and receive? Maybe these should be "S2N_KTLS_MODE_SEND_ONLY" and "S2N_KTLS_MODE_RECV_ONLY" and set the other direction to false?
int s2n_config_set_ktls_mode(struct s2n_config *config, s2n_ktls_mode ktls_mode)
config->ktls_send_requested = true;
break;
case S2N_KTLS_MODE_SEND:
+ config->ktls_recv_requested = false;
config->ktls_send_requested = true;
break;
case S2N_KTLS_MODE_RECV:
config->ktls_recv_requested = true;
+ config->ktls_send_requested = false;
break;
case S2N_KTLS_MODE_DISABLED:
config->ktls_recv_requested = false; |
codereview_new_cpp_data_6299 | int main(int argc, char **argv)
DEFER_CLEANUP(struct s2n_test_io_pair io_pair = { 0 }, s2n_io_pair_close);
EXPECT_SUCCESS(s2n_io_pair_init_non_blocking(&io_pair));
EXPECT_SUCCESS(s2n_connection_set_io_pair(conn, &io_pair));
- EXPECT_TRUE(conn->managed_send_io);
- EXPECT_TRUE(conn->managed_recv_io);
EXPECT_FALSE(conn->ktls_recv_enabled);
EXPECT_FALSE(conn->ktls_send_enabled);
I think this part isn't needed anymore.
int main(int argc, char **argv)
DEFER_CLEANUP(struct s2n_test_io_pair io_pair = { 0 }, s2n_io_pair_close);
EXPECT_SUCCESS(s2n_io_pair_init_non_blocking(&io_pair));
EXPECT_SUCCESS(s2n_connection_set_io_pair(conn, &io_pair));
EXPECT_FALSE(conn->ktls_recv_enabled);
EXPECT_FALSE(conn->ktls_send_enabled); |
codereview_new_cpp_data_6300 | static S2N_RESULT s2n_verify_host_information_san(struct s2n_connection *conn, X
int n = sk_GENERAL_NAME_num(names_list);
RESULT_ENSURE(n > 0, S2N_ERR_CERT_UNTRUSTED);
for (int i = 0; i < n; i++) {
GENERAL_NAME *current_name = sk_GENERAL_NAME_value(names_list, i);
/* return success on the first entry that passes verification */
- s2n_result result = s2n_verify_host_information_san_entry(conn, current_name, san_found);
if (s2n_result_is_ok(result)) {
return S2N_RESULT_OK;
}
}
/* if an error was set by one of the entries, then just propagate the error from the last SAN entry call */
- RESULT_DEBUG_ENSURE(s2n_errno, S2N_ERR_SAFETY); /* make sure we actually set an error on the last call */
- return S2N_RESULT_ERROR;
}
static S2N_RESULT s2n_verify_host_information_common_name(struct s2n_connection *conn, X509 *public_cert, bool *cn_found)
I'm not a fan of making any decisions on s2n_errno not immediately preceded by verifying that an error happened.
At this point, are we sure that the last call to s2n_verify_host_information_san_entry failed?
* If it could have passed (but I don't think this is possible?) then we could be propagating an s2n_errno set elsewhere and unrelated to our code.
* If it must have failed, then can we increase the scope of result and do RESULT_GUARD(result)? That way it's very clear that we're only taking this action for a known failure.
static S2N_RESULT s2n_verify_host_information_san(struct s2n_connection *conn, X
int n = sk_GENERAL_NAME_num(names_list);
RESULT_ENSURE(n > 0, S2N_ERR_CERT_UNTRUSTED);
+ s2n_result result = S2N_RESULT_OK;
for (int i = 0; i < n; i++) {
GENERAL_NAME *current_name = sk_GENERAL_NAME_value(names_list, i);
/* return success on the first entry that passes verification */
+ result = s2n_verify_host_information_san_entry(conn, current_name, san_found);
if (s2n_result_is_ok(result)) {
return S2N_RESULT_OK;
}
}
/* if an error was set by one of the entries, then just propagate the error from the last SAN entry call */
+ RESULT_GUARD(result);
+
+ RESULT_BAIL(S2N_ERR_CERT_UNTRUSTED);
}
static S2N_RESULT s2n_verify_host_information_common_name(struct s2n_connection *conn, X509 *public_cert, bool *cn_found) |
codereview_new_cpp_data_6301 | int s2n_fuzz_test(const uint8_t *buf, size_t len)
POSIX_GUARD(s2n_connection_set_blinding(server_conn, S2N_SELF_SERVICE_BLINDING));
uint8_t test_data[] = "test psk identity";
- struct s2n_blob test_blob = { 0 };
POSIX_GUARD(s2n_blob_init(&test_blob, test_data, sizeof(test_data)));
/* Ignore the result of this function */
nit
```suggestion
struct s2n_blob test_blob = { 0 };
```
int s2n_fuzz_test(const uint8_t *buf, size_t len)
POSIX_GUARD(s2n_connection_set_blinding(server_conn, S2N_SELF_SERVICE_BLINDING));
uint8_t test_data[] = "test psk identity";
+ struct s2n_blob test_blob = { 0 };
POSIX_GUARD(s2n_blob_init(&test_blob, test_data, sizeof(test_data)));
/* Ignore the result of this function */ |
codereview_new_cpp_data_6302 | int main(int argc, char *const *argv)
GUARD_EXIT(renegotiate(conn, sockfd, reneg_ctx.wait), "Renegotiation failed");
}
- wait_for_shutdown(conn, sockfd);
GUARD_EXIT(s2n_connection_free(conn), "Error freeing connection");
Since `wait_for_event` can return an error, should this `wait_for_shutdown` have a GUARD_EXIT? Or should it still continue here if that happens?
int main(int argc, char *const *argv)
GUARD_EXIT(renegotiate(conn, sockfd, reneg_ctx.wait), "Renegotiation failed");
}
+ GUARD_EXIT(wait_for_shutdown(conn, sockfd), "Error closing connection");
GUARD_EXIT(s2n_connection_free(conn), "Error freeing connection");
|
codereview_new_cpp_data_6303 | int s2n_server_key_recv(struct s2n_connection *conn)
struct s2n_blob signature = { 0 };
POSIX_GUARD(s2n_blob_init(&signature, s2n_stuffer_raw_read(in, signature_length), signature_length));
POSIX_ENSURE_REF(signature.data);
POSIX_ENSURE_GT(signature_length, 0);
If you're not altering this file I wouldn't take out this newline.
int s2n_server_key_recv(struct s2n_connection *conn)
struct s2n_blob signature = { 0 };
POSIX_GUARD(s2n_blob_init(&signature, s2n_stuffer_raw_read(in, signature_length), signature_length));
+
POSIX_ENSURE_REF(signature.data);
POSIX_ENSURE_GT(signature_length, 0);
|
codereview_new_cpp_data_6304 | void usage()
fprintf(stderr, " Display this message and quit.\n");
fprintf(stderr, " --buffered-send <buffer size>\n");
fprintf(stderr, " Set s2n_send to buffer up to <buffer size> bytes before sending records over the wire.\n");
- fprintf(stderr, " -X, --max-conns \n");
fprintf(stderr, " Sets the max number of connections s2nd will accept.\n");
/* clang-format on */
exit(1);
It might be helpful to show that this flag takes an argument:
```suggestion
fprintf(stderr, " -X, --max-conns <max connections>\n");
```
void usage()
fprintf(stderr, " Display this message and quit.\n");
fprintf(stderr, " --buffered-send <buffer size>\n");
fprintf(stderr, " Set s2n_send to buffer up to <buffer size> bytes before sending records over the wire.\n");
+ fprintf(stderr, " -X, --max-conns <max connections>\n");
fprintf(stderr, " Sets the max number of connections s2nd will accept.\n");
/* clang-format on */
exit(1); |
codereview_new_cpp_data_6305 | void usage()
fprintf(stderr, " --buffered-send <buffer size>\n");
fprintf(stderr, " Set s2n_send to buffer up to <buffer size> bytes before sending records over the wire.\n");
fprintf(stderr, " -X, --max-conns <max connections>\n");
- fprintf(stderr, " Sets the max number of connections s2nd will accept.\n");
/* clang-format on */
exit(1);
}
People might assume it's max simultaneous connections.
```suggestion
fprintf(stderr, " -X, --max-conns <max connections>\n");
fprintf(stderr, " Sets the max number of connections s2nd will accept before shutting down.\n");
```
void usage()
fprintf(stderr, " --buffered-send <buffer size>\n");
fprintf(stderr, " Set s2n_send to buffer up to <buffer size> bytes before sending records over the wire.\n");
fprintf(stderr, " -X, --max-conns <max connections>\n");
+ fprintf(stderr, " Sets the max number of connections s2nd will accept before shutting down.\n");
/* clang-format on */
exit(1);
} |
codereview_new_cpp_data_6306 |
S2N_RESULT s2n_handshake_transcript_update(struct s2n_connection *conn)
{
struct s2n_stuffer message = conn->handshake.io;
RESULT_GUARD_POSIX(s2n_stuffer_reread(&message));
nit: probably should RESULT_ENSURE_REF(conn)
S2N_RESULT s2n_handshake_transcript_update(struct s2n_connection *conn)
{
+ RESULT_ENSURE_REF(conn);
+
struct s2n_stuffer message = conn->handshake.io;
RESULT_GUARD_POSIX(s2n_stuffer_reread(&message));
|
codereview_new_cpp_data_6307 | const char *s2n_connection_get_handshake_type_name(struct s2n_connection *conn)
S2N_RESULT s2n_handshake_message_send(struct s2n_connection *conn, uint8_t content_type, s2n_blocked_status *blocked)
{
struct s2n_stuffer *in = &conn->handshake.io;
uint32_t size = s2n_stuffer_data_available(in);
Nit: probably should ENSURE_REF(conn) here
const char *s2n_connection_get_handshake_type_name(struct s2n_connection *conn)
S2N_RESULT s2n_handshake_message_send(struct s2n_connection *conn, uint8_t content_type, s2n_blocked_status *blocked)
{
+ RESULT_ENSURE_REF(conn);
struct s2n_stuffer *in = &conn->handshake.io;
uint32_t size = s2n_stuffer_data_available(in); |
codereview_new_cpp_data_6308 | int s2n_queue_writer_close_alert_warning(struct s2n_connection *conn)
alert[1] = S2N_TLS_ALERT_CLOSE_NOTIFY;
struct s2n_blob out = { 0 };
- s2n_blob_init(&out, alert, sizeof(alert));
/* If there is an alert pending or we've already sent a close_notify, do nothing */
if (s2n_stuffer_data_available(&conn->writer_alert_out) || conn->close_notify_queued) {
s2n_blob_init returns a posix code. We should probably posix guard all of these calls.
int s2n_queue_writer_close_alert_warning(struct s2n_connection *conn)
alert[1] = S2N_TLS_ALERT_CLOSE_NOTIFY;
struct s2n_blob out = { 0 };
+ POSIX_GUARD(s2n_blob_init(&out, alert, sizeof(alert)));
/* If there is an alert pending or we've already sent a close_notify, do nothing */
if (s2n_stuffer_data_available(&conn->writer_alert_out) || conn->close_notify_queued) { |
codereview_new_cpp_data_6309 | static int s2n_queue_reader_alert(struct s2n_connection *conn, uint8_t level, ui
alert[1] = error_code;
struct s2n_blob out = { 0 };
- s2n_blob_init(&out, alert, sizeof(alert));
/* If there is an alert pending, do nothing */
if (s2n_stuffer_data_available(&conn->reader_alert_out)) {
A guard is missing here.
static int s2n_queue_reader_alert(struct s2n_connection *conn, uint8_t level, ui
alert[1] = error_code;
struct s2n_blob out = { 0 };
+ POSIX_GUARD(s2n_blob_init(&out, alert, sizeof(alert)));
/* If there is an alert pending, do nothing */
if (s2n_stuffer_data_available(&conn->reader_alert_out)) { |
codereview_new_cpp_data_6310 | static int s2n_signature_scheme_valid_to_accept(struct s2n_connection *conn, con
POSIX_ENSURE_LTE(conn->actual_protocol_version, scheme->maximum_protocol_version);
}
- if (conn->actual_protocol_version >= S2N_TLS13
- || conn->actual_protocol_version == S2N_UNKNOWN_PROTOCOL_VERSION) {
POSIX_ENSURE_NE(scheme->hash_alg, S2N_HASH_SHA1);
POSIX_ENSURE_NE(scheme->sig_alg, S2N_SIGNATURE_RSA);
- if (scheme->sig_alg == S2N_SIGNATURE_ECDSA) {
- POSIX_ENSURE_REF(scheme->signature_curve);
- }
}
if (conn->actual_protocol_version < S2N_TLS13) {
- POSIX_ENSURE_EQ(scheme->signature_curve, NULL);
POSIX_ENSURE_NE(scheme->sig_alg, S2N_SIGNATURE_RSA_PSS_PSS);
}
I'm not sure we need the signature_curve checks. Those have more to do with properly constructing signature schemes than with properly choosing them, so are probably fine as unit tests. I also assume we ENSURE_REF the curve before we use it later.
static int s2n_signature_scheme_valid_to_accept(struct s2n_connection *conn, con
POSIX_ENSURE_LTE(conn->actual_protocol_version, scheme->maximum_protocol_version);
}
+ POSIX_ENSURE_NE(conn->actual_protocol_version, S2N_UNKNOWN_PROTOCOL_VERSION);
+
+ if (conn->actual_protocol_version >= S2N_TLS13) {
POSIX_ENSURE_NE(scheme->hash_alg, S2N_HASH_SHA1);
POSIX_ENSURE_NE(scheme->sig_alg, S2N_SIGNATURE_RSA);
}
if (conn->actual_protocol_version < S2N_TLS13) {
POSIX_ENSURE_NE(scheme->sig_alg, S2N_SIGNATURE_RSA_PSS_PSS);
}
|
codereview_new_cpp_data_6311 | static int s2n_signature_scheme_valid_to_accept(struct s2n_connection *conn, con
if (conn->actual_protocol_version >= S2N_TLS13) {
POSIX_ENSURE_NE(scheme->hash_alg, S2N_HASH_SHA1);
POSIX_ENSURE_NE(scheme->sig_alg, S2N_SIGNATURE_RSA);
- }
-
- if (conn->actual_protocol_version < S2N_TLS13) {
POSIX_ENSURE_NE(scheme->sig_alg, S2N_SIGNATURE_RSA_PSS_PSS);
}
Can't this now just be an else?
static int s2n_signature_scheme_valid_to_accept(struct s2n_connection *conn, con
if (conn->actual_protocol_version >= S2N_TLS13) {
POSIX_ENSURE_NE(scheme->hash_alg, S2N_HASH_SHA1);
POSIX_ENSURE_NE(scheme->sig_alg, S2N_SIGNATURE_RSA);
+ } else {
POSIX_ENSURE_NE(scheme->sig_alg, S2N_SIGNATURE_RSA_PSS_PSS);
}
|
codereview_new_cpp_data_6312 | static int s2n_signature_scheme_valid_to_accept(struct s2n_connection *conn, con
}
POSIX_ENSURE_NE(conn->actual_protocol_version, S2N_UNKNOWN_PROTOCOL_VERSION);
-
if (conn->actual_protocol_version >= S2N_TLS13) {
POSIX_ENSURE_NE(scheme->hash_alg, S2N_HASH_SHA1);
POSIX_ENSURE_NE(scheme->sig_alg, S2N_SIGNATURE_RSA);
Nit, but it'll be clearer that this ENSURE_NE is for these checks if you group them together. Otherwise, you'd probably want a comment on the ENSURE, as suggested by you feeling the need to leave a PR comment :) But with them grouped together, I don't think the comment is necessary.
```suggestion
POSIX_ENSURE_NE(conn->actual_protocol_version, S2N_UNKNOWN_PROTOCOL_VERSION);
if (conn->actual_protocol_version >= S2N_TLS13) {
```
static int s2n_signature_scheme_valid_to_accept(struct s2n_connection *conn, con
}
POSIX_ENSURE_NE(conn->actual_protocol_version, S2N_UNKNOWN_PROTOCOL_VERSION);
if (conn->actual_protocol_version >= S2N_TLS13) {
POSIX_ENSURE_NE(scheme->hash_alg, S2N_HASH_SHA1);
POSIX_ENSURE_NE(scheme->sig_alg, S2N_SIGNATURE_RSA); |
codereview_new_cpp_data_6313 | struct s2n_cipher_suite *cipher_suites_cloudfront_tls_1_2_2017[] = {
&s2n_ecdhe_rsa_with_chacha20_poly1305_sha256,
&s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
&s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
&s2n_rsa_with_aes_128_gcm_sha256,
&s2n_rsa_with_aes_256_gcm_sha384,
- &s2n_rsa_with_aes_128_cbc_sha256,
- &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
- &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha
};
const struct s2n_cipher_preferences cipher_preferences_cloudfront_tls_1_2_2017 = {
These 2 need to go before `s2n_rsa_with_aes_128_gcm_sha256`, as they provide better security and should be preferred over non-ecdhe ciphers.
struct s2n_cipher_suite *cipher_suites_cloudfront_tls_1_2_2017[] = {
&s2n_ecdhe_rsa_with_chacha20_poly1305_sha256,
&s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
&s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
&s2n_rsa_with_aes_128_gcm_sha256,
&s2n_rsa_with_aes_256_gcm_sha384,
+ &s2n_rsa_with_aes_128_cbc_sha256
};
const struct s2n_cipher_preferences cipher_preferences_cloudfront_tls_1_2_2017 = { |
codereview_new_cpp_data_6314 | struct s2n_cipher_suite *cipher_suites_cloudfront_tls_1_2_2017[] = {
&s2n_ecdhe_rsa_with_chacha20_poly1305_sha256,
&s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
&s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
- &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
&s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
&s2n_rsa_with_aes_128_gcm_sha256,
&s2n_rsa_with_aes_256_gcm_sha384,
&s2n_rsa_with_aes_128_cbc_sha256
really minor, but everything before orders ECDSA before RSA variant, this one inverses it. In our case it does not matter as we only support 1 cert type, but eventually when we provide dual support, we'd prefer ECDSA to be picked over RSA.
struct s2n_cipher_suite *cipher_suites_cloudfront_tls_1_2_2017[] = {
&s2n_ecdhe_rsa_with_chacha20_poly1305_sha256,
&s2n_ecdhe_ecdsa_with_aes_256_cbc_sha384,
&s2n_ecdhe_rsa_with_aes_256_cbc_sha384,
&s2n_ecdhe_ecdsa_with_aes_256_cbc_sha,
+ &s2n_ecdhe_rsa_with_aes_256_cbc_sha,
&s2n_rsa_with_aes_128_gcm_sha256,
&s2n_rsa_with_aes_256_gcm_sha384,
&s2n_rsa_with_aes_128_cbc_sha256 |
codereview_new_cpp_data_6324 | int main(int argc, char **argv)
EXPECT_EQUAL(record_type, 0x16);
/** test: padding without record type should fail
*= https://tools.ietf.org/rfc/rfc8446#section-5.4
*= type=test
*# If a receiving implementation does not
Nit, but might help readability
```suggestion
/** test: padding without record type should fail
*
*= https://tools.ietf.org/rfc/rfc8446#section-5.4
*= type=test
```
Same for the other compliance comments
int main(int argc, char **argv)
EXPECT_EQUAL(record_type, 0x16);
/** test: padding without record type should fail
+ *
*= https://tools.ietf.org/rfc/rfc8446#section-5.4
*= type=test
*# If a receiving implementation does not |
codereview_new_cpp_data_6509 | void P002_data_struct::setTwoPointCalibration(
/*****************************************************
- * plugin_write
****************************************************/
bool P002_data_struct::plugin_set_config(struct EventStruct *event,
String & string) {
This comment doesn't match the method.
void P002_data_struct::setTwoPointCalibration(
/*****************************************************
+ * plugin_set_config
****************************************************/
bool P002_data_struct::plugin_set_config(struct EventStruct *event,
String & string) { |
codereview_new_cpp_data_6510 | int16_t P025_data_struct::read() const {
return (int16_t)readConversionRegister025();
}
-uint8_t P025_data_struct::getMux() const
-{
- return mux;
-}
-
uint16_t P025_data_struct::readConversionRegister025() const {
bool is_ok = false;
const uint16_t wConversionRegister = I2C_read16_reg(i2cAddress, P025_CONVERSION_REGISTER, &is_ok);
In an earlier comment I suggested to remove this access method, as it is only used once in a log statement, where the actual setting could have also been used. It would scrape off a few bytes from the bin size 😉
int16_t P025_data_struct::read() const {
return (int16_t)readConversionRegister025();
}
uint16_t P025_data_struct::readConversionRegister025() const {
bool is_ok = false;
const uint16_t wConversionRegister = I2C_read16_reg(i2cAddress, P025_CONVERSION_REGISTER, &is_ok); |
codereview_new_cpp_data_6511 | bool do_process_c009_delay_queue(int controller_number, const Queue_element_base
jsonString += F("\"ESP\":{");
{
// Create nested objects in "ESP":
- jsonString += to_json_object_value(F("name"), Settings.getUnitname());
jsonString += ',';
jsonString += to_json_object_value(F("unit"), String(Settings.Unit));
jsonString += ',';
Not sure what side-effects this may have, since the unit nr is also present.
bool do_process_c009_delay_queue(int controller_number, const Queue_element_base
jsonString += F("\"ESP\":{");
{
// Create nested objects in "ESP":
+ jsonString += to_json_object_value(F("name"), Settings.getName());
jsonString += ',';
jsonString += to_json_object_value(F("unit"), String(Settings.Unit));
jsonString += ','; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.