id stringlengths 27 29 | content stringlengths 226 3.24k |
|---|---|
codereview_new_cpp_data_12008 | TEST_F(ModelFixture, 4678_SurfaceGlassUFactorSqlError) {
ASSERT_TRUE(surface->uFactor());
double uFactor = surface->uFactor().get();
- EXPECT_TRUE(openstudio::equal(0.310, uFactor, 1.0E-3));
ASSERT_TRUE(surface->thermalConductance());
double thermalConductance = surface->thermalConductance().get();
- EXPECT_TRUE(openstudio::equal(0.325, thermalConductance, 1.0E-3));
}
```suggestion
EXPECT_DOUBLE_EQ(0.310, uFactor);
ASSERT_TRUE(surface->thermalConductance());
double thermalConductance = surface->thermalConductance().get();
EXPECT_DOUBLE_EQ(0.325, thermalConductance);
```
TEST_F(ModelFixture, 4678_SurfaceGlassUFactorSqlError) {
ASSERT_TRUE(surface->uFactor());
double uFactor = surface->uFactor().get();
+ EXPECT_NEAR(0.310, uFactor, 1E-03);
ASSERT_TRUE(surface->thermalConductance());
double thermalConductance = surface->thermalConductance().get();
+ EXPECT_NEAR(0.325, thermalConductance, 1E-03);
} |
codereview_new_cpp_data_12009 | namespace model {
OptionalDouble()},
{"SurfacePropertySurroundingSurfaces", "Surrounding Surface Temperature Schedule", "surroundingSurfaceTemperatureSchedule", true, "Temperature",
OptionalDouble(), OptionalDouble()},
- {"SurfacePropertyGroundSurfaces", "Ground Surface Temperature Schedule", "groundSurfaceTemperatureSchedule", true, "Temperature",
- OptionalDouble(), OptionalDouble()},
- {"SurfacePropertyGroundSurfaces", "Ground Surface Reflectance Schedule", "groundSurfaceReflectanceSchedule", true, "", 0.0, 1.0},
{"SurfacePropertyIncidentSolarMultiplier", "Incident Solar Multiplier", "incidentSolarMultiplierSchedule", true, "", 0.0, 1.0},
{"SurfacePropertyLocalEnvironment", "External Shading Fraction", "externalShadingFractionSchedule", true, "", 0.0, 1.0},
{"SwimmingPoolIndoor", "Activity Factor", "activityFactorSchedule", true, "", 0.0, OptionalDouble()}, // can exceed 1.0, for wave pools for eg
"Incident Solar Multiplier Schedule"?
namespace model {
OptionalDouble()},
{"SurfacePropertySurroundingSurfaces", "Surrounding Surface Temperature Schedule", "surroundingSurfaceTemperatureSchedule", true, "Temperature",
OptionalDouble(), OptionalDouble()},
+ {"SurfacePropertyGroundSurfaces", "Ground Surface Temperature", "groundSurfaceTemperatureSchedule", true, "Temperature", OptionalDouble(),
+ OptionalDouble()},
+ {"SurfacePropertyGroundSurfaces", "Ground Surface Reflectance", "groundSurfaceReflectanceSchedule", true, "", 0.0, 1.0},
{"SurfacePropertyIncidentSolarMultiplier", "Incident Solar Multiplier", "incidentSolarMultiplierSchedule", true, "", 0.0, 1.0},
{"SurfacePropertyLocalEnvironment", "External Shading Fraction", "externalShadingFractionSchedule", true, "", 0.0, 1.0},
{"SwimmingPoolIndoor", "Activity Factor", "activityFactorSchedule", true, "", 0.0, OptionalDouble()}, // can exceed 1.0, for wave pools for eg |
codereview_new_cpp_data_12010 | namespace model {
OptionalDouble()},
{"SurfacePropertySurroundingSurfaces", "Surrounding Surface Temperature Schedule", "surroundingSurfaceTemperatureSchedule", true, "Temperature",
OptionalDouble(), OptionalDouble()},
- {"SurfacePropertyGroundSurfaces", "Ground Surface Temperature Schedule", "groundSurfaceTemperatureSchedule", true, "Temperature",
- OptionalDouble(), OptionalDouble()},
- {"SurfacePropertyGroundSurfaces", "Ground Surface Reflectance Schedule", "groundSurfaceReflectanceSchedule", true, "", 0.0, 1.0},
{"SurfacePropertyIncidentSolarMultiplier", "Incident Solar Multiplier", "incidentSolarMultiplierSchedule", true, "", 0.0, 1.0},
{"SurfacePropertyLocalEnvironment", "External Shading Fraction", "externalShadingFractionSchedule", true, "", 0.0, 1.0},
{"SwimmingPoolIndoor", "Activity Factor", "activityFactorSchedule", true, "", 0.0, OptionalDouble()}, // can exceed 1.0, for wave pools for eg
This one was just missing from before? Nice catch.
namespace model {
OptionalDouble()},
{"SurfacePropertySurroundingSurfaces", "Surrounding Surface Temperature Schedule", "surroundingSurfaceTemperatureSchedule", true, "Temperature",
OptionalDouble(), OptionalDouble()},
+ {"SurfacePropertyGroundSurfaces", "Ground Surface Temperature", "groundSurfaceTemperatureSchedule", true, "Temperature", OptionalDouble(),
+ OptionalDouble()},
+ {"SurfacePropertyGroundSurfaces", "Ground Surface Reflectance", "groundSurfaceReflectanceSchedule", true, "", 0.0, 1.0},
{"SurfacePropertyIncidentSolarMultiplier", "Incident Solar Multiplier", "incidentSolarMultiplierSchedule", true, "", 0.0, 1.0},
{"SurfacePropertyLocalEnvironment", "External Shading Fraction", "externalShadingFractionSchedule", true, "", 0.0, 1.0},
{"SwimmingPoolIndoor", "Activity Factor", "activityFactorSchedule", true, "", 0.0, OptionalDouble()}, // can exceed 1.0, for wave pools for eg |
codereview_new_cpp_data_12011 | namespace energyplus {
}
// Rated Cooling Coefficient of Performance
- if ((value = modelObject.ratedCoolingCoefficientofPerformance())) {
- idfObject.setDouble(Coil_Cooling_WaterToAirHeatPump_EquationFitFields::GrossRatedCoolingCOP, *value);
- }
// Rated Entering Water Temperature
- if ((value = modelObject.ratedEnteringWaterTemperature())) {
- idfObject.setDouble(Coil_Cooling_WaterToAirHeatPump_EquationFitFields::RatedEnteringWaterTemperature, *value);
- }
// Rated Entering Air Dry-Bulb Temperature
- if ((value = modelObject.ratedEnteringAirDryBulbTemperature())) {
- idfObject.setDouble(Coil_Cooling_WaterToAirHeatPump_EquationFitFields::RatedEnteringAirDryBulbTemperature, *value);
- }
// Rated Entering Air Wet-Bulb Temperature
- if ((value = modelObject.ratedEnteringAirWetBulbTemperature())) {
- idfObject.setDouble(Coil_Cooling_WaterToAirHeatPump_EquationFitFields::RatedEnteringAirWetBulbTemperature, *value);
- }
// Total Cooling Capacity Curve Name
{
I know you're following the file, but the optional isn't necessary at all. I would take the opportunity to clean up
```suggestion
// Rated Entering Water Temperature
idfObject.setDouble(Coil_Cooling_WaterToAirHeatPump_EquationFitFields::RatedEnteringWaterTemperature, modelObject.ratedEnteringWaterTemperature());
```
namespace energyplus {
}
// Rated Cooling Coefficient of Performance
+ idfObject.setDouble(Coil_Cooling_WaterToAirHeatPump_EquationFitFields::GrossRatedCoolingCOP, modelObject.ratedCoolingCoefficientofPerformance());
// Rated Entering Water Temperature
+ idfObject.setDouble(Coil_Cooling_WaterToAirHeatPump_EquationFitFields::RatedEnteringWaterTemperature,
+ modelObject.ratedEnteringWaterTemperature());
// Rated Entering Air Dry-Bulb Temperature
+ idfObject.setDouble(Coil_Cooling_WaterToAirHeatPump_EquationFitFields::RatedEnteringAirDryBulbTemperature,
+ modelObject.ratedEnteringAirDryBulbTemperature());
// Rated Entering Air Wet-Bulb Temperature
+ idfObject.setDouble(Coil_Cooling_WaterToAirHeatPump_EquationFitFields::RatedEnteringAirWetBulbTemperature,
+ modelObject.ratedEnteringAirWetBulbTemperature());
// Total Cooling Capacity Curve Name
{ |
codereview_new_cpp_data_12012 | namespace model {
}
double CoilHeatingDXSingleSpeed::ratedSupplyFanPowerPerVolumeFlowRate() const {
return getImpl<detail::CoilHeatingDXSingleSpeed_Impl>()->ratedSupplyFanPowerPerVolumeFlowRate2017();
}
bool CoilHeatingDXSingleSpeed::setRatedSupplyFanPowerPerVolumeFlowRate(double ratedSupplyFanPowerPerVolumeFlowRate) {
return getImpl<detail::CoilHeatingDXSingleSpeed_Impl>()->setRatedSupplyFanPowerPerVolumeFlowRate2017(ratedSupplyFanPowerPerVolumeFlowRate);
}
I would add a Warning message here (and on aall deprecated methods)
```
LOG(Warn, "As of 3.5.0, this method is deprecated. Use CoilHeatingDXSingleSpeed::ratedSupplyFanPowerPerVolumeFlowRate2017() instead. It will be removed in three releases.")
```
or something like that. We also want to mention the deprecation in the Release Notes
namespace model {
}
double CoilHeatingDXSingleSpeed::ratedSupplyFanPowerPerVolumeFlowRate() const {
+ LOG(Warn, "As of 3.5.0, ratedSupplyFanPowerPerVolumeFlowRate is deprecated. Use ratedSupplyFanPowerPerVolumeFlowRate2017 instead. It will be removed within three releases.");
return getImpl<detail::CoilHeatingDXSingleSpeed_Impl>()->ratedSupplyFanPowerPerVolumeFlowRate2017();
}
bool CoilHeatingDXSingleSpeed::setRatedSupplyFanPowerPerVolumeFlowRate(double ratedSupplyFanPowerPerVolumeFlowRate) {
+ LOG(Warn, "As of 3.5.0, setRatedSupplyFanPowerPerVolumeFlowRate is deprecated. Use setRatedSupplyFanPowerPerVolumeFlowRate2017 instead. It will be removed within three releases.");
return getImpl<detail::CoilHeatingDXSingleSpeed_Impl>()->setRatedSupplyFanPowerPerVolumeFlowRate2017(ratedSupplyFanPowerPerVolumeFlowRate);
}
|
codereview_new_cpp_data_12013 | namespace energyplus {
TableLookup tableLookup(model);
// Name
- if (s = tableMulti.nameString()) {
- tableLookup.setName(*s);
- }
for (WorkspaceObject source : tableMulti.sources()) {
for (unsigned index : source.getSourceIndices(tableMulti.handle())) {
```suggestion
// Name
tableLookup.setName(tableMulti.nameString());
```
namespace energyplus {
TableLookup tableLookup(model);
// Name
+ tableLookup.setName(tableMulti.nameString());
for (WorkspaceObject source : tableMulti.sources()) {
for (unsigned index : source.getSourceIndices(tableMulti.handle())) { |
codereview_new_cpp_data_12014 | namespace openstudio {
namespace energyplus {
boost::optional<IdfObject> ForwardTranslator::translateOutputControlTableStyle(OutputControlTableStyle& modelObject) {
- IdfObject idfObject(openstudio::IddObjectType::OutputControl_Table_Style);
- m_idfObjects.push_back(idfObject);
- OptionalString s = modelObject.name();
- if (s) {
- idfObject.setName(*s);
- }
- std::string columnSeparator = modelObject.columnSeparator();
- idfObject.setString(OutputControl_Table_StyleFields::ColumnSeparator, columnSeparator);
- std::string unitConversion = modelObject.unitConversion();
- idfObject.setString(OutputControl_Table_StyleFields::UnitConversion, unitConversion);
-
- return boost::optional<IdfObject>(idfObject);
}
} // namespace energyplus
object does not have a name.
namespace openstudio {
namespace energyplus {
boost::optional<IdfObject> ForwardTranslator::translateOutputControlTableStyle(OutputControlTableStyle& modelObject) {
+ IdfObject idfObject = createAndRegisterIdfObject(openstudio::IddObjectType::OutputControl_Table_Style);
+ idfObject.setString(OutputControl_Table_StyleFields::ColumnSeparator, modelObject.columnSeparator());
+ idfObject.setString(OutputControl_Table_StyleFields::UnitConversion, modelObject.unitConversion());
+ return idfObject;
}
} // namespace energyplus |
codereview_new_cpp_data_12015 | namespace openstudio {
namespace energyplus {
boost::optional<IdfObject> ForwardTranslator::translateOutputControlTableStyle(OutputControlTableStyle& modelObject) {
- IdfObject idfObject(openstudio::IddObjectType::OutputControl_Table_Style);
- m_idfObjects.push_back(idfObject);
- OptionalString s = modelObject.name();
- if (s) {
- idfObject.setName(*s);
- }
- std::string columnSeparator = modelObject.columnSeparator();
- idfObject.setString(OutputControl_Table_StyleFields::ColumnSeparator, columnSeparator);
- std::string unitConversion = modelObject.unitConversion();
- idfObject.setString(OutputControl_Table_StyleFields::UnitConversion, unitConversion);
-
- return boost::optional<IdfObject>(idfObject);
}
} // namespace energyplus
Use createAndRegisterIdfObject (or an emplace_back)
namespace openstudio {
namespace energyplus {
boost::optional<IdfObject> ForwardTranslator::translateOutputControlTableStyle(OutputControlTableStyle& modelObject) {
+ IdfObject idfObject = createAndRegisterIdfObject(openstudio::IddObjectType::OutputControl_Table_Style);
+ idfObject.setString(OutputControl_Table_StyleFields::ColumnSeparator, modelObject.columnSeparator());
+ idfObject.setString(OutputControl_Table_StyleFields::UnitConversion, modelObject.unitConversion());
+ return idfObject;
}
} // namespace energyplus |
codereview_new_cpp_data_12016 | namespace openstudio {
namespace energyplus {
boost::optional<IdfObject> ForwardTranslator::translateOutputControlTableStyle(OutputControlTableStyle& modelObject) {
- IdfObject idfObject(openstudio::IddObjectType::OutputControl_Table_Style);
- m_idfObjects.push_back(idfObject);
- OptionalString s = modelObject.name();
- if (s) {
- idfObject.setName(*s);
- }
- std::string columnSeparator = modelObject.columnSeparator();
- idfObject.setString(OutputControl_Table_StyleFields::ColumnSeparator, columnSeparator);
- std::string unitConversion = modelObject.unitConversion();
- idfObject.setString(OutputControl_Table_StyleFields::UnitConversion, unitConversion);
-
- return boost::optional<IdfObject>(idfObject);
}
} // namespace energyplus
```suggestion
idfObject.setString(OutputControl_Table_StyleFields::ColumnSeparator, modelObject.columnSeparator());
```
namespace openstudio {
namespace energyplus {
boost::optional<IdfObject> ForwardTranslator::translateOutputControlTableStyle(OutputControlTableStyle& modelObject) {
+ IdfObject idfObject = createAndRegisterIdfObject(openstudio::IddObjectType::OutputControl_Table_Style);
+ idfObject.setString(OutputControl_Table_StyleFields::ColumnSeparator, modelObject.columnSeparator());
+ idfObject.setString(OutputControl_Table_StyleFields::UnitConversion, modelObject.unitConversion());
+ return idfObject;
}
} // namespace energyplus |
codereview_new_cpp_data_12017 | namespace openstudio {
namespace energyplus {
boost::optional<IdfObject> ForwardTranslator::translateOutputControlTableStyle(OutputControlTableStyle& modelObject) {
- IdfObject idfObject(openstudio::IddObjectType::OutputControl_Table_Style);
- m_idfObjects.push_back(idfObject);
- OptionalString s = modelObject.name();
- if (s) {
- idfObject.setName(*s);
- }
- std::string columnSeparator = modelObject.columnSeparator();
- idfObject.setString(OutputControl_Table_StyleFields::ColumnSeparator, columnSeparator);
- std::string unitConversion = modelObject.unitConversion();
- idfObject.setString(OutputControl_Table_StyleFields::UnitConversion, unitConversion);
-
- return boost::optional<IdfObject>(idfObject);
}
} // namespace energyplus
```suggestion
idfObject.setString(OutputControl_Table_StyleFields::UnitConversion, modelObject.unitConversion());
return idfObject;
```
namespace openstudio {
namespace energyplus {
boost::optional<IdfObject> ForwardTranslator::translateOutputControlTableStyle(OutputControlTableStyle& modelObject) {
+ IdfObject idfObject = createAndRegisterIdfObject(openstudio::IddObjectType::OutputControl_Table_Style);
+ idfObject.setString(OutputControl_Table_StyleFields::ColumnSeparator, modelObject.columnSeparator());
+ idfObject.setString(OutputControl_Table_StyleFields::UnitConversion, modelObject.unitConversion());
+ return idfObject;
}
} // namespace energyplus |
codereview_new_cpp_data_12018 | namespace energyplus {
boost::optional<IdfObject> ForwardTranslator::translateOutputSQLite(model::OutputSQLite& modelObject) {
- IdfObject idfObject(openstudio::IddObjectType::Output_SQLite);
- m_idfObjects.push_back(idfObject);
- std::string optionType = modelObject.optionType();
- idfObject.setString(Output_SQLiteFields::OptionType, optionType);
- std::string unitConversionforTabularData = modelObject.unitConversionforTabularData();
- idfObject.setString(Output_SQLiteFields::UnitConversionforTabularData, unitConversionforTabularData);
return idfObject;
} // End of translate function
same suggestions as above
namespace energyplus {
boost::optional<IdfObject> ForwardTranslator::translateOutputSQLite(model::OutputSQLite& modelObject) {
+ IdfObject idfObject = createAndRegisterIdfObject(openstudio::IddObjectType::Output_SQLite);
+ idfObject.setString(Output_SQLiteFields::OptionType, modelObject.optionType());
+ idfObject.setString(Output_SQLiteFields::UnitConversionforTabularData, modelObject.unitConversionforTabularData());
return idfObject;
} // End of translate function |
codereview_new_cpp_data_12019 | namespace model {
/// @cond
OutputSQLite::OutputSQLite(std::shared_ptr<detail::OutputSQLite_Impl> impl) : ModelObject(impl) {}
OutputSQLite::OutputSQLite(Model& model) : ModelObject(OutputSQLite::iddObjectType(), model) {
- setOptionType("SimpleAndTabular");
- setUnitConversionforTabularData("UseOutputControlTableStyle");
}
/// @endcond
:+1: for the ctor.
But nitpick: move it right after `} // namespace detail`. the `/// @cond` is there to tell doxygen to ignore that block, so that's relevant for the shared_ptr constructor, but not for this one.
(I'm not sure it has any effect in the .cpp file to be honest, but it's more "correct" to have it outside of the cond, and I typically go look for the model ctors at the start of the non `detail` namespace.)
namespace model {
/// @cond
OutputSQLite::OutputSQLite(std::shared_ptr<detail::OutputSQLite_Impl> impl) : ModelObject(impl) {}
OutputSQLite::OutputSQLite(Model& model) : ModelObject(OutputSQLite::iddObjectType(), model) {
+ bool ok = setOptionType("SimpleAndTabular");
+ OS_ASSERT(ok);
+ ok = setUnitConversionforTabularData("UseOutputControlTableStyle");
+ OS_ASSERT(ok);
}
/// @endcond |
codereview_new_cpp_data_12020 |
#include "../core/Assert.hpp"
#include "../core/Json.hpp"
-#include "../geometry/point3d.hpp"
-#include "../geometry/vector3d.hpp"
#include "../geometry/Geometry.hpp"
-#include "../geometry/intersection.hpp"
#include <iomanip>
Ubuntu (and optionally Mac) are using case-sensitive filesystems.
```suggestion
#include "../geometry/Point3d.hpp"
#include "../geometry/Vector3d.hpp"
#include "../geometry/Geometry.hpp"
#include "../geometry/Intersection.hpp"
```
#include "../core/Assert.hpp"
#include "../core/Json.hpp"
+#include "../geometry/Point3d.hpp"
+#include "../geometry/Vector3d.hpp"
#include "../geometry/Geometry.hpp"
+#include "../geometry/Intersection.hpp"
#include <iomanip>
|
codereview_new_cpp_data_12021 | void CompareTwoModels(Model& model, Model& baseline) {
// Compare stories
// Match by name,compare properties
// Compare shading count
- double tol = 0.001; // Same tolerance as defaulyts for circularEqual
auto thermalZones1 = model.getModelObjects<ThermalZone>();
auto thermalZones2 = baseline.getModelObjects<ThermalZone>();
EXPECT_EQ(thermalZones1.size(), thermalZones2.size());
```suggestion
double tol = 0.001; // Same tolerance as defaults for circularEqual
```
void CompareTwoModels(Model& model, Model& baseline) {
// Compare stories
// Match by name,compare properties
// Compare shading count
+ double tol = 0.001; // Same tolerance as defaults for circularEqual
auto thermalZones1 = model.getModelObjects<ThermalZone>();
auto thermalZones2 = baseline.getModelObjects<ThermalZone>();
EXPECT_EQ(thermalZones1.size(), thermalZones2.size()); |
codereview_new_cpp_data_12022 | namespace energyplus {
idfObject.setDouble(Material_AirGapFields::ThermalResistance, modelObject.thermalResistance());
- // Call the translation of these objects, which has two advantages:
- // * will not translate them if they are orphaned (=not referencing a material), and,
- // * makes the order of these objects in the IDF deterministic
- if (boost::optional<MaterialPropertyMoisturePenetrationDepthSettings> _empd = modelObject.materialPropertyMoisturePenetrationDepthSettings()) {
- translateAndMapModelObject(_empd.get());
- }
-
- if (boost::optional<MaterialPropertyPhaseChange> _phaseChange = modelObject.materialPropertyPhaseChange()) {
- translateAndMapModelObject(_phaseChange.get());
- }
-
- if (boost::optional<MaterialPropertyPhaseChangeHysteresis> _phaseChangeHysteresis = modelObject.materialPropertyPhaseChangeHysteresis()) {
- translateAndMapModelObject(_phaseChangeHysteresis.get());
- }
-
return boost::optional<IdfObject>(idfObject);
}
is PCM / Moisture allowed for a MaterialAirGap?
namespace energyplus {
idfObject.setDouble(Material_AirGapFields::ThermalResistance, modelObject.thermalResistance());
return boost::optional<IdfObject>(idfObject);
}
|
codereview_new_cpp_data_12023 | namespace energyplus {
idfObject.setDouble(WindowMaterial_BlindFields::MaximumSlatAngle, *d);
}
- // Call the translation of these objects, which has two advantages:
- // * will not translate them if they are orphaned (=not referencing a material), and,
- // * makes the order of these objects in the IDF deterministic
- if (boost::optional<MaterialPropertyMoisturePenetrationDepthSettings> _empd = modelObject.materialPropertyMoisturePenetrationDepthSettings()) {
- translateAndMapModelObject(_empd.get());
- }
-
- if (boost::optional<MaterialPropertyPhaseChange> _phaseChange = modelObject.materialPropertyPhaseChange()) {
- translateAndMapModelObject(_phaseChange.get());
- }
-
- if (boost::optional<MaterialPropertyPhaseChangeHysteresis> _phaseChangeHysteresis = modelObject.materialPropertyPhaseChangeHysteresis()) {
- translateAndMapModelObject(_phaseChangeHysteresis.get());
- }
-
return boost::optional<IdfObject>(idfObject);
}
PCM not accepted for blind.
https://github.com/NREL/EnergyPlus/blob/0c9b8c4944b5f06edb000bf0d766f067830af0ae/src/EnergyPlus/HeatBalanceManager.cc#L3421
Neither is MoisturePenetration, which also only accepted a RegularMaterial.
https://github.com/NREL/EnergyPlus/blob/0c9b8c4944b5f06edb000bf0d766f067830af0ae/src/EnergyPlus/MoistureBalanceEMPDManager.cc#L209-L229
namespace energyplus {
idfObject.setDouble(WindowMaterial_BlindFields::MaximumSlatAngle, *d);
}
return boost::optional<IdfObject>(idfObject);
}
|
codereview_new_cpp_data_12024 | namespace energyplus {
result->setDouble(WindowMaterial_BlindFields::MaximumSlatAngle, 60);
}
- // Call the translation of these objects, which has two advantages:
- // * will not translate them if they are orphaned (=not referencing a material), and,
- // * makes the order of these objects in the IDF deterministic
- if (boost::optional<MaterialPropertyMoisturePenetrationDepthSettings> _empd = modelObject.materialPropertyMoisturePenetrationDepthSettings()) {
- translateAndMapModelObject(_empd.get());
- }
-
- if (boost::optional<MaterialPropertyPhaseChange> _phaseChange = modelObject.materialPropertyPhaseChange()) {
- translateAndMapModelObject(_phaseChange.get());
- }
-
- if (boost::optional<MaterialPropertyPhaseChangeHysteresis> _phaseChangeHysteresis = modelObject.materialPropertyPhaseChangeHysteresis()) {
- translateAndMapModelObject(_phaseChangeHysteresis.get());
- }
-
return result;
}
Same idea. I'm not going to comment on all files.
namespace energyplus {
result->setDouble(WindowMaterial_BlindFields::MaximumSlatAngle, 60);
}
return result;
}
|
codereview_new_cpp_data_12025 | namespace energyplus {
idfObject.setString(Material_RoofVegetationFields::MoistureDiffusionCalculationMethod, *s);
}
- // Call the translation of these objects, which has two advantages:
- // * will not translate them if they are orphaned (=not referencing a material), and,
- // * makes the order of these objects in the IDF deterministic
- if (boost::optional<MaterialPropertyMoisturePenetrationDepthSettings> _empd = modelObject.materialPropertyMoisturePenetrationDepthSettings()) {
- translateAndMapModelObject(_empd.get());
- }
-
- if (boost::optional<MaterialPropertyPhaseChange> _phaseChange = modelObject.materialPropertyPhaseChange()) {
- translateAndMapModelObject(_phaseChange.get());
- }
-
- if (boost::optional<MaterialPropertyPhaseChangeHysteresis> _phaseChangeHysteresis = modelObject.materialPropertyPhaseChangeHysteresis()) {
- translateAndMapModelObject(_phaseChangeHysteresis.get());
- }
-
return boost::optional<IdfObject>(idfObject);
}
Wrong group too. https://github.com/NREL/EnergyPlus/blob/0c9b8c4944b5f06edb000bf0d766f067830af0ae/src/EnergyPlus/HeatBalanceManager.cc#L3899
Only:
* Material
* Material:NoMass
(* Apparently the inner materials of the CFactor / FFactor constructions or something)
namespace energyplus {
idfObject.setString(Material_RoofVegetationFields::MoistureDiffusionCalculationMethod, *s);
}
return boost::optional<IdfObject>(idfObject);
}
|
codereview_new_cpp_data_12026 | TEST_F(ModelFixture, UniqueModelObjectCachedGetters) {
EXPECT_TRUE(m.getOptionalUniqueModelObject<RunPeriodControlDaylightSavingTime>());
EXPECT_EQ(i++, m.getModelObjects<ModelObject>().size());
- EXPECT_FALSE(m.getOptionalUniqueModelObject<YearDescription>());
EXPECT_EQ(i, m.getModelObjects<ModelObject>().size());
- YearDescription yearDescription = m.getUniqueModelObject<YearDescription>();
- EXPECT_TRUE(m.getOptionalUniqueModelObject<YearDescription>());
EXPECT_EQ(i++, m.getModelObjects<ModelObject>().size());
EXPECT_FALSE(m.getOptionalUniqueModelObject<Site>());
```suggestion
EXPECT_FALSE(m.getOptionalUniqueModelObject<model::YearDescription>());
EXPECT_EQ(i, m.getModelObjects<ModelObject>().size());
auto yearDescription = m.getUniqueModelObject<model::YearDescription>();
EXPECT_TRUE(m.getOptionalUniqueModelObject<YearDescription>());
EXPECT_EQ(i++, m.getModelObjects<ModelObject>().size());
```
TEST_F(ModelFixture, UniqueModelObjectCachedGetters) {
EXPECT_TRUE(m.getOptionalUniqueModelObject<RunPeriodControlDaylightSavingTime>());
EXPECT_EQ(i++, m.getModelObjects<ModelObject>().size());
+ EXPECT_FALSE(m.getOptionalUniqueModelObject<model::YearDescription>());
EXPECT_EQ(i, m.getModelObjects<ModelObject>().size());
+ auto yearDescription = m.getUniqueModelObject<model::YearDescription>();
+ EXPECT_TRUE(m.getOptionalUniqueModelObject<model::YearDescription>());
EXPECT_EQ(i++, m.getModelObjects<ModelObject>().size());
EXPECT_FALSE(m.getOptionalUniqueModelObject<Site>()); |
codereview_new_cpp_data_12027 | namespace model {
if (resultingObjects.empty()) {
return boost::none;
}
- OS_ASSERT(resultingObjects.size() <= component.numObjects()); // we may not be adding unique object(s) to the workspace
for (const WorkspaceObject& wo : resultingObjects) {
- LOG(Warn, wo.nameString());
OptionalComponentData ocd = wo.optionalCast<ComponentData>();
if (ocd) {
ComponentData componentDataObject = *ocd;
This is leftover from the other PR, and should not be here.
namespace model {
if (resultingObjects.empty()) {
return boost::none;
}
+ OS_ASSERT(resultingObjects.size() == component.numObjects());
for (const WorkspaceObject& wo : resultingObjects) {
OptionalComponentData ocd = wo.optionalCast<ComponentData>();
if (ocd) {
ComponentData componentDataObject = *ocd; |
codereview_new_cpp_data_12030 |
#ifdef AMREX_TINY_PROFILING
#include <AMReX_TinyProfiler.H>
#else
-struct MemStat {};
#endif
#include <utility>
Could we add `namespace amrex` here?
#ifdef AMREX_TINY_PROFILING
#include <AMReX_TinyProfiler.H>
#else
+namespace amrex {
+ struct MemStat {};
+}
#endif
#include <utility> |
codereview_new_cpp_data_12031 | MFIter::Finalize ()
#pragma omp single
#endif
m_fa->clearThisBD();
m_fa.reset(nullptr);
}
}
```suggestion
}
if (m_fa) {
m_fa.reset(nullptr);
}
```
MFIter::Finalize ()
#pragma omp single
#endif
m_fa->clearThisBD();
+ }
+ if (m_fa) {
m_fa.reset(nullptr);
}
} |
codereview_new_cpp_data_12032 | Arena::Initialize ()
// see reason on allowed reuse of the default CPU BArena in Arena::Finalize
BL_ASSERT(the_arena == nullptr || the_arena == The_BArena());
BL_ASSERT(the_async_arena == nullptr);
- BL_ASSERT(the_device_arena == nullptr || the_arena == The_BArena());
- BL_ASSERT(the_managed_arena == nullptr || the_arena == The_BArena());
BL_ASSERT(the_pinned_arena == nullptr);
- BL_ASSERT(the_cpu_arena == nullptr || the_arena == The_BArena());
#ifdef AMREX_USE_GPU
#ifdef AMREX_USE_DPCPP
```suggestion
BL_ASSERT(the_device_arena == nullptr || the_device_arena == The_BArena());
```
Arena::Initialize ()
// see reason on allowed reuse of the default CPU BArena in Arena::Finalize
BL_ASSERT(the_arena == nullptr || the_arena == The_BArena());
BL_ASSERT(the_async_arena == nullptr);
+ BL_ASSERT(the_device_arena == nullptr || the_device_arena == The_BArena());
+ BL_ASSERT(the_managed_arena == nullptr || the_managed_arena == The_BArena());
BL_ASSERT(the_pinned_arena == nullptr);
+ BL_ASSERT(the_cpu_arena == nullptr || the_cpu_arena == The_BArena());
#ifdef AMREX_USE_GPU
#ifdef AMREX_USE_DPCPP |
codereview_new_cpp_data_12033 | Arena::Initialize ()
// see reason on allowed reuse of the default CPU BArena in Arena::Finalize
BL_ASSERT(the_arena == nullptr || the_arena == The_BArena());
BL_ASSERT(the_async_arena == nullptr);
- BL_ASSERT(the_device_arena == nullptr || the_arena == The_BArena());
- BL_ASSERT(the_managed_arena == nullptr || the_arena == The_BArena());
BL_ASSERT(the_pinned_arena == nullptr);
- BL_ASSERT(the_cpu_arena == nullptr || the_arena == The_BArena());
#ifdef AMREX_USE_GPU
#ifdef AMREX_USE_DPCPP
```suggestion
BL_ASSERT(the_managed_arena == nullptr || the_managed_arena == The_BArena());
```
Arena::Initialize ()
// see reason on allowed reuse of the default CPU BArena in Arena::Finalize
BL_ASSERT(the_arena == nullptr || the_arena == The_BArena());
BL_ASSERT(the_async_arena == nullptr);
+ BL_ASSERT(the_device_arena == nullptr || the_device_arena == The_BArena());
+ BL_ASSERT(the_managed_arena == nullptr || the_managed_arena == The_BArena());
BL_ASSERT(the_pinned_arena == nullptr);
+ BL_ASSERT(the_cpu_arena == nullptr || the_cpu_arena == The_BArena());
#ifdef AMREX_USE_GPU
#ifdef AMREX_USE_DPCPP |
codereview_new_cpp_data_12034 | Arena::Initialize ()
// see reason on allowed reuse of the default CPU BArena in Arena::Finalize
BL_ASSERT(the_arena == nullptr || the_arena == The_BArena());
BL_ASSERT(the_async_arena == nullptr);
- BL_ASSERT(the_device_arena == nullptr || the_arena == The_BArena());
- BL_ASSERT(the_managed_arena == nullptr || the_arena == The_BArena());
BL_ASSERT(the_pinned_arena == nullptr);
- BL_ASSERT(the_cpu_arena == nullptr || the_arena == The_BArena());
#ifdef AMREX_USE_GPU
#ifdef AMREX_USE_DPCPP
```suggestion
BL_ASSERT(the_cpu_arena == nullptr || the_cpu_arena == The_BArena());
```
Arena::Initialize ()
// see reason on allowed reuse of the default CPU BArena in Arena::Finalize
BL_ASSERT(the_arena == nullptr || the_arena == The_BArena());
BL_ASSERT(the_async_arena == nullptr);
+ BL_ASSERT(the_device_arena == nullptr || the_device_arena == The_BArena());
+ BL_ASSERT(the_managed_arena == nullptr || the_managed_arena == The_BArena());
BL_ASSERT(the_pinned_arena == nullptr);
+ BL_ASSERT(the_cpu_arena == nullptr || the_cpu_arena == The_BArena());
#ifdef AMREX_USE_GPU
#ifdef AMREX_USE_DPCPP |
codereview_new_cpp_data_12035 | MLTensorOp::compVelGrad (int amrlev, const Array<MultiFab*,AMREX_SPACEDIM>& flux
Box const zbx = mfi.nodaltilebox(2);)
AMREX_D_TERM(Array4<Real> const fxfab = fluxes[0]->array(mfi);,
Array4<Real> const fyfab = fluxes[1]->array(mfi);,
- Array4<Real> const fzfab = fluxes[1]->array(mfi);)
// The derivatives are put in the array with the following order:
// component: 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8
This should be `Array4<Real> const fzfab = fluxes[2]->array(mfi);` right
MLTensorOp::compVelGrad (int amrlev, const Array<MultiFab*,AMREX_SPACEDIM>& flux
Box const zbx = mfi.nodaltilebox(2);)
AMREX_D_TERM(Array4<Real> const fxfab = fluxes[0]->array(mfi);,
Array4<Real> const fyfab = fluxes[1]->array(mfi);,
+ Array4<Real> const fzfab = fluxes[2]->array(mfi);)
// The derivatives are put in the array with the following order:
// component: 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 |
codereview_new_cpp_data_12036 | MLTensorOp::compVelGrad (int amrlev, const Array<MultiFab*,AMREX_SPACEDIM>& flux
Box const zbx = mfi.nodaltilebox(2);)
AMREX_D_TERM(Array4<Real> const fxfab = fluxes[0]->array(mfi);,
Array4<Real> const fyfab = fluxes[1]->array(mfi);,
- Array4<Real> const fzfab = fluxes[1]->array(mfi);)
// The derivatives are put in the array with the following order:
// component: 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8
```suggestion
Array4<Real> const fzfab = fluxes[2]->array(mfi);)
```
MLTensorOp::compVelGrad (int amrlev, const Array<MultiFab*,AMREX_SPACEDIM>& flux
Box const zbx = mfi.nodaltilebox(2);)
AMREX_D_TERM(Array4<Real> const fxfab = fluxes[0]->array(mfi);,
Array4<Real> const fyfab = fluxes[1]->array(mfi);,
+ Array4<Real> const fzfab = fluxes[2]->array(mfi);)
// The derivatives are put in the array with the following order:
// component: 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 |
codereview_new_cpp_data_12037 | BuildFromChkptFile (std::string const& fname,
int max_coarsening_level, int ngrow, bool build_coarse_level_by_coarsening,
bool a_extend_domain_face, int a_num_coarsen_opt)
{
- AMREX_ALWAYS_ASSERT_WITH_MESSAGE(a_num_coarsen_opt == 0,
- "Check point file restart doesn't work with coarse optimization");
ChkptFile chkpt_file(fname);
IndexSpace::push(new IndexSpaceChkptFile(chkpt_file,
geom, required_coarsening_level,
I think this assertion is not needed. `a_num_coarsen_opt` is used in building the finest level with the implicit function. In the case of restarting from checkpoint, this parameter can be simply ignored.
BuildFromChkptFile (std::string const& fname,
int max_coarsening_level, int ngrow, bool build_coarse_level_by_coarsening,
bool a_extend_domain_face, int a_num_coarsen_opt)
{
+ amrex::ignore_unused(a_num_coarsen_opt);
+
ChkptFile chkpt_file(fname);
IndexSpace::push(new IndexSpaceChkptFile(chkpt_file,
geom, required_coarsening_level, |
codereview_new_cpp_data_12038 | IndexSpaceChkptFile::IndexSpaceChkptFile (const ChkptFile& chkpt_file,
if (build_coarse_level_by_coarsening) {
amrex::Abort("Failed to build required coarse EB level "+std::to_string(ilev));
} else {
- m_chkpt_file_level.emplace_back(this, chkpt_file, cgeom, EB2::max_grid_size, ng,
- extend_domain_face);
}
} else {
break;
Since only the finest level is saved in a checkpoint file, is this line correct?
IndexSpaceChkptFile::IndexSpaceChkptFile (const ChkptFile& chkpt_file,
if (build_coarse_level_by_coarsening) {
amrex::Abort("Failed to build required coarse EB level "+std::to_string(ilev));
} else {
+ amrex::Abort("Chkptfile only stored for finest level. Failed to build "+std::to_string(ilev));
}
} else {
break; |
codereview_new_cpp_data_12039 | ChkptFileLevel::set_invalid_ghost_data_covered ()
for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
{
auto& edgecent = m_edgecent[idim];
#ifdef AMREX_USE_OMP
#pragma omp parallel if (Gpu::notInLaunchRegion())
#endif
{
- BoxArray const& covered_edge_grids = amrex::convert(m_covered_grids,
- edgecent.ixType());
std::vector<std::pair<int,Box> > isects;
for (MFIter mfi(edgecent); mfi.isValid(); ++mfi)
{
BoxArray::intersections is thread safe. We should move this outside the omp parallel region.
ChkptFileLevel::set_invalid_ghost_data_covered ()
for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
{
auto& edgecent = m_edgecent[idim];
+ BoxArray const& covered_edge_grids = amrex::convert(m_covered_grids,
+ edgecent.ixType());
#ifdef AMREX_USE_OMP
#pragma omp parallel if (Gpu::notInLaunchRegion())
#endif
{
std::vector<std::pair<int,Box> > isects;
for (MFIter mfi(edgecent); mfi.isValid(); ++mfi)
{ |
codereview_new_cpp_data_12040 | StateData::FillBoundary (Box const& bx,
{
BL_PROFILE("StateData::FillBoundary(geom)");
- if (domain.contains(enclosedCells(bx))) return;
Vector<BCRec> bcr(num_comp);
What if the domain is nodal type like Press_Type in IAMR?
StateData::FillBoundary (Box const& bx,
{
BL_PROFILE("StateData::FillBoundary(geom)");
+ if (domain.contains(convert(bx,domain.ixType()))) return;
Vector<BCRec> bcr(num_comp);
|
codereview_new_cpp_data_12054 | int procinfo_setup(PROC_MAP& pm) {
// };
//
double total_cpu_time() {
- static natural_t processorCount = 0;
processor_cpu_load_info_t cpuLoad;
mach_msg_type_number_t processorMsgCount;
static double scale;
uint64_t totalUserTime = 0;
- if (processorCount == 0) {
long hz = sysconf(_SC_CLK_TCK);
scale = 1./hz;
}
There is no default value for `scale`.
In case `processorCount` is not equal zero, this value will be set to `0` by default, so later this function will always return `0` because the last statement is multiplication with `scale`.
If this is intended - then fine, but I believe this is a mistake
int procinfo_setup(PROC_MAP& pm) {
// };
//
double total_cpu_time() {
+ static bool first = true;
+ natural_t processorCount = 0;
processor_cpu_load_info_t cpuLoad;
mach_msg_type_number_t processorMsgCount;
static double scale;
uint64_t totalUserTime = 0;
+ if (!first) {
+ first = false;
long hz = sysconf(_SC_CLK_TCK);
scale = 1./hz;
} |
codereview_new_cpp_data_12055 | wxPanel* CDlgAdvPreferencesBase::createProcessorTab(wxNotebook* notebook) {
m_txtMaxLoad = new wxTextCtrl(
box, ID_TXTMAXLOAD, wxEmptyString, wxDefaultPosition, getTextCtrlSize(wxT("100.00")), wxTE_RIGHT
);
addNewRowToSizer(box_sizer,
- wxString (_("Suspend computing when your computer is busy running other programs.")),
m_chkMaxLoad, m_txtMaxLoad,
new wxStaticText(box, ID_DEFAULT, wxT("%"), wxDefaultPosition, wxDefaultSize, 0)
);
To eliminate the compiler syntax error, you must create a separate variable with this string thus:
` wxString SuspendOnBusyTT(_("Suspend computing when your computer is busy running other programs."));`
and pass `SuspendOnBusyTT ` in the call.
See `MaxCPUTimeTT ` on line 315 for an example.
wxPanel* CDlgAdvPreferencesBase::createProcessorTab(wxNotebook* notebook) {
m_txtMaxLoad = new wxTextCtrl(
box, ID_TXTMAXLOAD, wxEmptyString, wxDefaultPosition, getTextCtrlSize(wxT("100.00")), wxTE_RIGHT
);
+ wxString load_tt(_("Suspend computing when your computer is busy running other programs."));
addNewRowToSizer(box_sizer,
+ load_tt,
m_chkMaxLoad, m_txtMaxLoad,
new wxStaticText(box, ID_DEFAULT, wxT("%"), wxDefaultPosition, wxDefaultSize, 0)
); |
codereview_new_cpp_data_12056 | wxPanel* CDlgAdvPreferencesBase::createProcessorTab(wxNotebook* notebook) {
box, ID_DEFAULT, _("Use at most"), wxDefaultPosition, wxDefaultSize, 0
);
m_txtProcUseProcessors = new wxTextCtrl(box, ID_TXTPROCUSEPROCESSORS, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
wxStaticText* staticText22 = new wxStaticText(
box, ID_DEFAULT, _("% of the CPUs and at most"), wxDefaultPosition, wxDefaultSize, 0
);
Precede line 336 with `/*xgettext:no-c-format*/`. For an explanation why this is needed, see [here](https://stackoverflow.com/questions/70490036/xgettext-incorrectly-infers-c-format-in-cpp-files) and [here](https://www.mit.edu/afs.new/athena/astaff/source/src-9.4/third/gettext2/gettext-tools/doc/gettext_3.html#SEC18).
wxPanel* CDlgAdvPreferencesBase::createProcessorTab(wxNotebook* notebook) {
box, ID_DEFAULT, _("Use at most"), wxDefaultPosition, wxDefaultSize, 0
);
m_txtProcUseProcessors = new wxTextCtrl(box, ID_TXTPROCUSEPROCESSORS, wxEmptyString, wxDefaultPosition, textCtrlSize, wxTE_RIGHT );
+ /*xgettext:no-c-format*/
wxStaticText* staticText22 = new wxStaticText(
box, ID_DEFAULT, _("% of the CPUs and at most"), wxDefaultPosition, wxDefaultSize, 0
); |
codereview_new_cpp_data_12057 | void CDlgAdvPreferences::ReadPreferenceSettings() {
// 0 means "no restriction" but we don't use a checkbox here
if (prefs.max_ncpus_pct == 0.0) prefs.max_ncpus_pct = 100.0;
DisplayValue(prefs.max_ncpus_pct, m_txtProcUseProcessors);
DisplayValue(prefs.niu_max_ncpus_pct, m_txtProcUseProcessorsNotInUse);
// cpu limit
// 0 means "no restriction" but we don't use a checkbox here
if (prefs.cpu_usage_limit == 0.0) prefs.cpu_usage_limit = 100.0;
DisplayValue(prefs.cpu_usage_limit, m_txtProcUseCPUTime);
DisplayValue(prefs.niu_cpu_usage_limit, m_txtProcUseCPUTimeNotInUse);
// on batteries
missing: `if (prefs.niu_max_ncpus_pct == 0.0) prefs.niu_max_ncpus_pct = 100.0;`
void CDlgAdvPreferences::ReadPreferenceSettings() {
// 0 means "no restriction" but we don't use a checkbox here
if (prefs.max_ncpus_pct == 0.0) prefs.max_ncpus_pct = 100.0;
DisplayValue(prefs.max_ncpus_pct, m_txtProcUseProcessors);
+ if (prefs.niu_max_ncpus_pct == 0.0) prefs.niu_max_ncpus_pct = 100.0;
DisplayValue(prefs.niu_max_ncpus_pct, m_txtProcUseProcessorsNotInUse);
// cpu limit
// 0 means "no restriction" but we don't use a checkbox here
if (prefs.cpu_usage_limit == 0.0) prefs.cpu_usage_limit = 100.0;
DisplayValue(prefs.cpu_usage_limit, m_txtProcUseCPUTime);
+ if (prefs.niu_cpu_usage_limit == 0.0) prefs.niu_cpu_usage_limit = 100.0;
DisplayValue(prefs.niu_cpu_usage_limit, m_txtProcUseCPUTimeNotInUse);
// on batteries |
codereview_new_cpp_data_12058 | void CDlgAdvPreferences::ReadPreferenceSettings() {
// 0 means "no restriction" but we don't use a checkbox here
if (prefs.max_ncpus_pct == 0.0) prefs.max_ncpus_pct = 100.0;
DisplayValue(prefs.max_ncpus_pct, m_txtProcUseProcessors);
DisplayValue(prefs.niu_max_ncpus_pct, m_txtProcUseProcessorsNotInUse);
// cpu limit
// 0 means "no restriction" but we don't use a checkbox here
if (prefs.cpu_usage_limit == 0.0) prefs.cpu_usage_limit = 100.0;
DisplayValue(prefs.cpu_usage_limit, m_txtProcUseCPUTime);
DisplayValue(prefs.niu_cpu_usage_limit, m_txtProcUseCPUTimeNotInUse);
// on batteries
Missing: `if (prefs.niu_cpu_usage_limit == 0.0) prefs.niu_cpu_usage_limit = 100.0;`
void CDlgAdvPreferences::ReadPreferenceSettings() {
// 0 means "no restriction" but we don't use a checkbox here
if (prefs.max_ncpus_pct == 0.0) prefs.max_ncpus_pct = 100.0;
DisplayValue(prefs.max_ncpus_pct, m_txtProcUseProcessors);
+ if (prefs.niu_max_ncpus_pct == 0.0) prefs.niu_max_ncpus_pct = 100.0;
DisplayValue(prefs.niu_max_ncpus_pct, m_txtProcUseProcessorsNotInUse);
// cpu limit
// 0 means "no restriction" but we don't use a checkbox here
if (prefs.cpu_usage_limit == 0.0) prefs.cpu_usage_limit = 100.0;
DisplayValue(prefs.cpu_usage_limit, m_txtProcUseCPUTime);
+ if (prefs.niu_cpu_usage_limit == 0.0) prefs.niu_cpu_usage_limit = 100.0;
DisplayValue(prefs.niu_cpu_usage_limit, m_txtProcUseCPUTimeNotInUse);
// on batteries |
codereview_new_cpp_data_12059 | int VBOX_BASE::get_scratch_directory(string& dir) {
int VBOX_BASE::get_slot_directory(string& dir) {
char slot_dir[256];
- if (getcwd(slot_dir, sizeof(slot_dir))) {;}
dir = slot_dir;
if (!dir.empty()) {
Maybe instead of this weird looking code is better to check for an error? In case of function failure the content of `slot_dir` might contain thrash and further crash the whole application.
int VBOX_BASE::get_scratch_directory(string& dir) {
int VBOX_BASE::get_slot_directory(string& dir) {
char slot_dir[256];
+ getcwd(slot_dir, sizeof(slot_dir));
dir = slot_dir;
if (!dir.empty()) { |
codereview_new_cpp_data_12061 |
#define COLUMN_PROGRESS 2
#define COLUMN_SIZE 3
#define COLUMN_TIME 4
-#define COLUMN_TOCOMPLETION 5
-#define COLUMN_SPEED 6
-#define COLUMN_STATUS 7
// DefaultShownColumns is an array containing the
// columnIDs of the columns to be shown by default,
Please never change IDs, and if new add a new value, always add it to the end
#define COLUMN_PROGRESS 2
#define COLUMN_SIZE 3
#define COLUMN_TIME 4
+#define COLUMN_SPEED 5
+#define COLUMN_STATUS 6
+#define COLUMN_TOCOMPLETION 7
// DefaultShownColumns is an array containing the
// columnIDs of the columns to be shown by default, |
codereview_new_cpp_data_12063 | static void filename_hash(const char* filename, int fanout, char* dir) {
//
int check_download_file(const char* file_path, const char* dl_hier_path) {
bool md5_file_exists = false;
- char md5_file_path[256];
- char md5_hash_src[33], md5_hash_dst[33];
double nbytes;
std::string file_content, file_hash;
int file_size;
Please use `MAXPATHLEN` instead of 256
static void filename_hash(const char* filename, int fanout, char* dir) {
//
int check_download_file(const char* file_path, const char* dl_hier_path) {
bool md5_file_exists = false;
+ char md5_file_path[MAXPATHLEN];
+ char md5_hash_src[MD5_LEN], md5_hash_dst[MD5_LEN];
double nbytes;
std::string file_content, file_hash;
int file_size; |
codereview_new_cpp_data_12064 | static void filename_hash(const char* filename, int fanout, char* dir) {
//
int check_download_file(const char* file_path, const char* dl_hier_path) {
bool md5_file_exists = false;
- char md5_file_path[256];
- char md5_hash_src[33], md5_hash_dst[33];
double nbytes;
std::string file_content, file_hash;
int file_size;
Please move `33` to a constant and use it to initialize arrays' sizes
static void filename_hash(const char* filename, int fanout, char* dir) {
//
int check_download_file(const char* file_path, const char* dl_hier_path) {
bool md5_file_exists = false;
+ char md5_file_path[MAXPATHLEN];
+ char md5_hash_src[MD5_LEN], md5_hash_dst[MD5_LEN];
double nbytes;
std::string file_content, file_hash;
int file_size; |
codereview_new_cpp_data_12335 | void initONNXBindings(PyObject* module) {
ONNXShapeTypeInference(graph, params_dict, opset_version);
}),
py::arg("graph"),
- py::arg("params_dict") = true,
py::arg("opset_version"))
.def(
"_jit_pass_onnx_set_dynamic_input_shape",
1. Should this change be applied on main branch?
2. why `= true`?
void initONNXBindings(PyObject* module) {
ONNXShapeTypeInference(graph, params_dict, opset_version);
}),
py::arg("graph"),
+ py::arg("params_dict"),
py::arg("opset_version"))
.def(
"_jit_pass_onnx_set_dynamic_input_shape", |
codereview_new_cpp_data_12336 | static bool cb_dirsrc(void *user, void *data) {
static bool cb_cfgsanbox_grain(void *user, void *data) {
RConfigNode *node = (RConfigNode*) data;
if (strstr (node->value, "?")) {
- eprintf ("comma separated grain types to be masked out by the sandbox.\n");
- eprintf ("all, none, disk, files, exec, socket, exec\n");
return false;
}
int gt = R_SANDBOX_GRAIN_NONE;
User is an instance of core. Use rcorehelp here
static bool cb_dirsrc(void *user, void *data) {
static bool cb_cfgsanbox_grain(void *user, void *data) {
RConfigNode *node = (RConfigNode*) data;
if (strstr (node->value, "?")) {
+ static RCoreHelpMessage help_msg_grain = {
+ "Usage:", "e cfg.sandbox.grain=arg[,arg...]", "set grain types to mask out",
+ "Grain types:", "", "",
+ "", "all", "",
+ "", "none", "",
+ "", "disk", "",
+ "", "files", "",
+ "", "exec", "",
+ "", "socket", "",
+ NULL
+ };
+ r_core_cmd_help ((RCore *)user, help_msg_grain);
return false;
}
int gt = R_SANDBOX_GRAIN_NONE; |
codereview_new_cpp_data_12337 | static void anop_esil(RAnal *a, RAnalOp *op, ut64 addr, const ut8 *buf, int len,
R_LOG_ERROR ("unknown operand size: %d", gop.insn->detail->x86.operands[0].size);
val = 256;
}
- // experimental simplified expression
esilprintf (op,
"%s,0x%"PFMT64x",&,POP,$z,cf,:=,"
"%s,%s,<<=,"
```suggestion
// OLD: esilprintf (op, "0,%s,!,!,?{,1,%s,-,%s,<<,0x%"PFMT64x",&,!,!,^,},%s,%s,$z,zf,:=,$p,pf,:=,%d,$s,sf,:=,cf,=", src, src, dst, val, src, dst2, bitsize - 1);
```
static void anop_esil(RAnal *a, RAnalOp *op, ut64 addr, const ut8 *buf, int len,
R_LOG_ERROR ("unknown operand size: %d", gop.insn->detail->x86.operands[0].size);
val = 256;
}
+ // OLD: esilprintf (op, "0,%s,!,!,?{,1,%s,-,%s,<<,0x%"PFMT64x",&,!,!,^,},%s,%s,$z,zf,:=,$p,pf,:=,%d,$s,sf,:=,cf,=", src, src, dst, val, src, dst2, bitsize - 1);
esilprintf (op,
"%s,0x%"PFMT64x",&,POP,$z,cf,:=,"
"%s,%s,<<=," |
codereview_new_cpp_data_12338 | R_API bool r_project_rename(RProject *p, const char *newname) {
return true;
}
}
return false;
}
same here it will leak
R_API bool r_project_rename(RProject *p, const char *newname) {
return true;
}
}
+ free (newprjdir);
return false;
}
|
codereview_new_cpp_data_12339 | static bool esil_float_ceil(REsil *esil) {
ret = esil_pushnum_float (esil, ceil(s));
}
} else {
- ERR ("esil_float_ceil: invalid parameters.");
}
free (src);
} else {
```suggestion
ERR ("esil_float_ceil: invalid parameters");
```
static bool esil_float_ceil(REsil *esil) {
ret = esil_pushnum_float (esil, ceil(s));
}
} else {
+ ERR ("esil_float_ceil: invalid parameters");
}
free (src);
} else { |
codereview_new_cpp_data_12340 | static int cmd_print(void *data, const char *input) {
cmd_pCx (core, input + 2, "pc");
break;
default:
- R_LOG_ERROR ("Usage: pC[dDaAxwc] - column output for pxa, pxA, pxw, .. ");
break;
}
break;
```suggestion
eprintf ("Usage: pC[dDaAxwc] - column output for pxa, pxA, pxw, ..\n");
```
static int cmd_print(void *data, const char *input) {
cmd_pCx (core, input + 2, "pc");
break;
default:
+ eprintf ("Usage: pC[dDaAxwc] - column output for pxa, pxA, pxw, ..\n");
break;
}
break; |
codereview_new_cpp_data_12341 | R_API int r_main_ravc2(int argc, const char **argv) {
printf ("Branch: %s\n", current_branch);
RList *uncommitted = rvc->get_uncommitted (rvc);
if (r_list_empty (uncommitted)) {
- printf ("All files are committed\n");
} else {
- printf ("The following files were NOT committed:\n");
RListIter *iter;
char *file;
r_list_foreach (uncommitted, iter, file) {
```suggestion
R_LOG_INFO ("All files are committed");
```
R_API int r_main_ravc2(int argc, const char **argv) {
printf ("Branch: %s\n", current_branch);
RList *uncommitted = rvc->get_uncommitted (rvc);
if (r_list_empty (uncommitted)) {
+ R_LOG_INFO ("All files are committed");
} else {
+ R_LOG_INFO ("The following files were NOT committed");
RListIter *iter;
char *file;
r_list_foreach (uncommitted, iter, file) { |
codereview_new_cpp_data_12342 | int file_reset(RMagic *ms) {
ms->haderr = 0;
ms->error = -1;
if (!ms->mlist) {
- eprintf ("no magic files loaded, nothing to scan\n");
return -1;
}
return 0;
```suggestion
// eprintf ("no magic files loaded, nothing to scan\n");
```
int file_reset(RMagic *ms) {
ms->haderr = 0;
ms->error = -1;
if (!ms->mlist) {
+ // eprintf ("no magic files loaded, nothing to scan\n");
return -1;
}
return 0; |
codereview_new_cpp_data_12343 | R_API void r_core_diff_show_json(RCore *c, RCore *c2) {
fcns = r_anal_get_fcns (c->anal);
if (r_list_empty (fcns)) {
- R_LOG_INFO ("No functions found, try running with -A or load a project");
return;
}
Yes, error seems more correct indeed.
R_API void r_core_diff_show_json(RCore *c, RCore *c2) {
fcns = r_anal_get_fcns (c->anal);
if (r_list_empty (fcns)) {
+ R_LOG_ERROR ("No functions found, try running with -A or load a project");
return;
}
|
codereview_new_cpp_data_12344 | static inline bool write_num_sz(ut64 n, int byte_sz, ut8 *outbuf, int outsz) {
int bits = r_num_to_bits (NULL, n);
// TODO: signedness prbly wrong...
if (bits > byte_sz * 8) {
- R_LOG_ERROR ("Arg 0x" PFMT64x " more then %d bits\n", n, bits);
false;
}
switch (byte_sz) {
```suggestion
R_LOG_ERROR ("Arg 0x" PFMT64x " more then %d bits", n, bits);
```
static inline bool write_num_sz(ut64 n, int byte_sz, ut8 *outbuf, int outsz) {
int bits = r_num_to_bits (NULL, n);
// TODO: signedness prbly wrong...
if (bits > byte_sz * 8) {
+ R_LOG_ERROR ("Arg 0x" PFMT64x " more then %d bits", n, bits);
false;
}
switch (byte_sz) { |
codereview_new_cpp_data_12345 | static bool subvar(RParse *p, RAnalFunction *f, ut64 addr, int oplen, char *data
ripend = "]";
}
char * maybe_num = neg? neg+1 : rip;
- if( r_is_valid_input_num_value(NULL, maybe_num)){
if (neg) {
- repl_num -= r_num_get(NULL, maybe_num);
} else {
- repl_num += r_num_get(NULL, maybe_num);
}
rip -= 3;
*rip = 0;
```suggestion
repl_num -= r_num_get (NULL, maybe_num);
```
static bool subvar(RParse *p, RAnalFunction *f, ut64 addr, int oplen, char *data
ripend = "]";
}
char * maybe_num = neg? neg+1 : rip;
+ if (r_is_valid_input_num_value (NULL, maybe_num)) {
if (neg) {
+ repl_num -= r_num_get (NULL, maybe_num);
} else {
+ repl_num += r_num_get (NULL, maybe_num);
}
rip -= 3;
*rip = 0; |
codereview_new_cpp_data_12346 | static bool subvar(RParse *p, RAnalFunction *f, ut64 addr, int oplen, char *data
ripend = "]";
}
char * maybe_num = neg? neg+1 : rip;
- if( r_is_valid_input_num_value(NULL, maybe_num)){
if (neg) {
- repl_num -= r_num_get(NULL, maybe_num);
} else {
- repl_num += r_num_get(NULL, maybe_num);
}
rip -= 3;
*rip = 0;
```suggestion
repl_num += r_num_get (NULL, maybe_num);
```
static bool subvar(RParse *p, RAnalFunction *f, ut64 addr, int oplen, char *data
ripend = "]";
}
char * maybe_num = neg? neg+1 : rip;
+ if (r_is_valid_input_num_value (NULL, maybe_num)) {
if (neg) {
+ repl_num -= r_num_get (NULL, maybe_num);
} else {
+ repl_num += r_num_get (NULL, maybe_num);
}
rip -= 3;
*rip = 0; |
codereview_new_cpp_data_12347 | static bool cmd_onn(RCore *core, const char* input) {
*arg_perm++ = 0;
perms = r_str_rwx (arg_perm);
}
- Onn on = {arg0, core, ptr};
ut64 addr = 0LL;
// check if file is opened already
if (r_str_startswith (input, "nnu")) {
```suggestion
Onn on = {arg0, NULL, core};
```
static bool cmd_onn(RCore *core, const char* input) {
*arg_perm++ = 0;
perms = r_str_rwx (arg_perm);
}
+ Onn on = {arg0, NULL, core};
ut64 addr = 0LL;
// check if file is opened already
if (r_str_startswith (input, "nnu")) { |
codereview_new_cpp_data_12348 | static inline RBinWasmSection *sections_first_custom_name(RBinWasmObj *bin) {
RBinWasmSection *sec;
r_list_foreach (bin->g_sections, iter, sec) {
if (sec->id == R_BIN_WASM_SECTION_CUSTOM && sec->size > 6) {
- ut8 _tmp[CUST_NAME_START_LEN];
r_buf_read_at (buf, sec->offset, _tmp, CUST_NAME_START_LEN);
if (!memcmp (CUST_NAME_START, _tmp, CUST_NAME_START_LEN)) {
return sec;
check for return value, if the read cannot happen theres no need to memcmp
static inline RBinWasmSection *sections_first_custom_name(RBinWasmObj *bin) {
RBinWasmSection *sec;
r_list_foreach (bin->g_sections, iter, sec) {
if (sec->id == R_BIN_WASM_SECTION_CUSTOM && sec->size > 6) {
+ ut8 _tmp[CUST_NAME_START_LEN] = {0};
r_buf_read_at (buf, sec->offset, _tmp, CUST_NAME_START_LEN);
if (!memcmp (CUST_NAME_START, _tmp, CUST_NAME_START_LEN)) {
return sec; |
codereview_new_cpp_data_12349 | RBinAddr *r_bin_mz_get_main_vaddr (struct r_bin_mz_obj_t *bin) {
}
ZERO_FILL (b);
if (r_buf_read_at (bin->b, entry->paddr, b, sizeof (b)) < 0) {
- R_LOG_WARN ("Warning: Cannot read entry at 0x%16" PFMT64x "", (ut64)entry->paddr);
free (entry);
return NULL;
}
```suggestion
R_LOG_WARN ("Warning: Cannot read entry at 0x%16" PFMT64x, (ut64)entry->paddr);
```
RBinAddr *r_bin_mz_get_main_vaddr (struct r_bin_mz_obj_t *bin) {
}
ZERO_FILL (b);
if (r_buf_read_at (bin->b, entry->paddr, b, sizeof (b)) < 0) {
+ R_LOG_WARN ("Warning: Cannot read entry at 0x%16" PFMT64x, (ut64)entry->paddr);
free (entry);
return NULL;
} |
codereview_new_cpp_data_12350 | static st32 parse_type(Context *ctx, const ut64 offset, RStrBuf *strbuf, ut64 *s
*visited = su;
}
if (visited && set_u_contains (*visited, offset)) {
- R_LOG_WARN ("Warning: anal.dwarf.parse_type: infinite recursion detected.");
return -1;
}
set_u_add (*visited, offset);
Remove Warning: too
static st32 parse_type(Context *ctx, const ut64 offset, RStrBuf *strbuf, ut64 *s
*visited = su;
}
if (visited && set_u_contains (*visited, offset)) {
+ R_LOG_WARN ("anal.dwarf.parse_type: infinite recursion detected.");
return -1;
}
set_u_add (*visited, offset); |
codereview_new_cpp_data_12351 | R_API bool r_core_anal_fcn(RCore *core, ut64 at, ut64 from, int reftype, int dep
if (core->io->va) {
if (!r_io_is_valid_offset (core->io, at, !core->anal->opt.noncode)) {
if (core->anal->verbose) {
- R_LOG_WARN ("Warning: Address not mapped or not executable at 0x%08"PFMT64x"", at);
}
return false;
}
```suggestion
R_LOG_WARN ("Warning: Address not mapped or not executable at 0x%08"PFMT64x, at);
```
R_API bool r_core_anal_fcn(RCore *core, ut64 at, ut64 from, int reftype, int dep
if (core->io->va) {
if (!r_io_is_valid_offset (core->io, at, !core->anal->opt.noncode)) {
if (core->anal->verbose) {
+ R_LOG_WARN ("Warning: Address not mapped or not executable at 0x%08"PFMT64x, at);
}
return false;
} |
codereview_new_cpp_data_12352 | R_API bool r_anal_function_add_bb(RAnal *a, RAnalFunction *fcn, ut64 addr, ut64
return false;
}
if (size > a->opt.bb_max_size) {
- R_LOG_WARN ("can't allocate such big bb of %"PFMT64d" bytes at 0x%08"PFMT64x"", (st64)size, addr);
r_warn_if_reached ();
return false;
}
```suggestion
R_LOG_WARN ("can't allocate such big bb of %"PFMT64d" bytes at 0x%08"PFMT64x, (st64)size, addr);
```
R_API bool r_anal_function_add_bb(RAnal *a, RAnalFunction *fcn, ut64 addr, ut64
return false;
}
if (size > a->opt.bb_max_size) {
+ R_LOG_WARN ("can't allocate such big bb of %"PFMT64d" bytes at 0x%08"PFMT64x, (st64)size, addr);
r_warn_if_reached ();
return false;
} |
codereview_new_cpp_data_12353 | static const char *recovery_apply_complete_object_locator(RRTTIMSVCAnalContext *
if (!col->td) {
if (context->vt_context->anal->verbose) {
- R_LOG_WARN ("no td for col at 0x%"PFMT64x"", col->addr);
}
return NULL;
}
```suggestion
R_LOG_WARN ("no td for col at 0x%"PFMT64x, col->addr);
```
static const char *recovery_apply_complete_object_locator(RRTTIMSVCAnalContext *
if (!col->td) {
if (context->vt_context->anal->verbose) {
+ R_LOG_WARN ("no td for col at 0x%"PFMT64x, col->addr);
}
return NULL;
} |
codereview_new_cpp_data_12354 | static int i8080_op(RAnal *anal, RAnalOp *op, ut64 addr, const ut8 *data, int le
char out[32];
int ilen = i8080_disasm (data, out, len);
if (mask & R_ANAL_OP_MASK_DISASM) {
- op->mnemonic = strndup (out, sizeof(out));
}
op->addr = addr;
op->type = R_ANAL_OP_TYPE_UNK;
```suggestion
op->mnemonic = r_str_ndup (out, sizeof(out));
```
static int i8080_op(RAnal *anal, RAnalOp *op, ut64 addr, const ut8 *data, int le
char out[32];
int ilen = i8080_disasm (data, out, len);
if (mask & R_ANAL_OP_MASK_DISASM) {
+ op->mnemonic = r_str_ndup (out, sizeof(out));
}
op->addr = addr;
op->type = R_ANAL_OP_TYPE_UNK; |
codereview_new_cpp_data_12355 | static int archinfo(RAnal *anal, int q) {
case R_ANAL_ARCHINFO_MIN_OP_SIZE:
return 2;
case R_ANAL_ARCHINFO_DATA_ALIGN:
const char *cpu = anal->config->cpu;
if (strstr (cpu, "68030") || strstr (cpu, "68040") || strstr (cpu, "68060")) {
return 1;
- } else {
- return 2;
}
}
return 2;
```suggestion
{
const char *cpu = anal->config->cpu;
```
static int archinfo(RAnal *anal, int q) {
case R_ANAL_ARCHINFO_MIN_OP_SIZE:
return 2;
case R_ANAL_ARCHINFO_DATA_ALIGN:
+ {
const char *cpu = anal->config->cpu;
if (strstr (cpu, "68030") || strstr (cpu, "68040") || strstr (cpu, "68060")) {
return 1;
+ }
+ return 2;
}
}
return 2; |
codereview_new_cpp_data_12356 | static int cmd_info(void *data, const char *input) {
r_cons_printf ("agn %s\n", cls->name);
}
}
- }
- else if (fullGraph) {
r_list_foreach (obj->classes, iter, cls) {
if (cls->super) {
r_cons_printf ("agn %s\n", cls->super);
never use naked blocks. this shuold be `} else ` as i mentioned in the review.
static int cmd_info(void *data, const char *input) {
r_cons_printf ("agn %s\n", cls->name);
}
}
+ } else if (fullGraph) {
r_list_foreach (obj->classes, iter, cls) {
if (cls->super) {
r_cons_printf ("agn %s\n", cls->super); |
codereview_new_cpp_data_12778 | get_signal_state(PyObject *module)
static inline int
compare_handler(PyObject *func, PyObject *dfl_ign_handler)
{
if (func == NULL || dfl_ign_handler == NULL) {
return 0;
}
It's not really clear to me _why_ this situation could arise but it is a valid check regardless.
get_signal_state(PyObject *module)
static inline int
compare_handler(PyObject *func, PyObject *dfl_ign_handler)
{
+ // See https://github.com/python/cpython/pull/102399
if (func == NULL || dfl_ign_handler == NULL) {
return 0;
} |
codereview_new_cpp_data_12779 | test_code_extra(PyObject* self, PyObject *Py_UNUSED(callable))
goto finally;
}
assert ((uintptr_t)extra == 77);
- // Need to reset code extra value.
res = PyUnstable_Code_SetExtra(test_func_code, code_extra_index, NULL);
if (res < 0) {
goto finally;
```suggestion
// Revert to initial code extra value.
```
test_code_extra(PyObject* self, PyObject *Py_UNUSED(callable))
goto finally;
}
assert ((uintptr_t)extra == 77);
+ // Revert to initial code extra value.
res = PyUnstable_Code_SetExtra(test_func_code, code_extra_index, NULL);
if (res < 0) {
goto finally; |
codereview_new_cpp_data_12780 | winreg_QueryValueEx_impl(PyObject *module, HKEY key, const Py_UNICODE *name)
while (1) {
Py_BEGIN_ALLOW_THREADS
- rc = RegQueryValueExW(key, name, NULL, &typ, pbuf,
- &size);
Py_END_ALLOW_THREADS
if (rc != ERROR_MORE_DATA) {
break;
```suggestion
rc = RegQueryValueExW(key, name, NULL, &typ, pbuf, &size);
```
winreg_QueryValueEx_impl(PyObject *module, HKEY key, const Py_UNICODE *name)
while (1) {
Py_BEGIN_ALLOW_THREADS
+ rc = RegQueryValueExW(key, name, NULL, &typ, pbuf, &size);
Py_END_ALLOW_THREADS
if (rc != ERROR_MORE_DATA) {
break; |
codereview_new_cpp_data_12781 |
# include <process.h> // getpid()
#endif
-#ifdef MS_WINDOWS_NON_DESKTOP
-# include <windows.h>
#endif
/* Period parameters -- These are all magic. Don't change. */
Any harm in making this `MS_WINDOWS`? That would be obvious, but using a more specific flag suggests that something weird is going on.
Or is it even necessary?
# include <process.h> // getpid()
#endif
+#ifdef MS_WINDOWS
+# include <processthreadsapi.h>
#endif
/* Period parameters -- These are all magic. Don't change. */ |
codereview_new_cpp_data_12782 |
/* Include symbols from _socket module */
#include "socketmodule.h"
-#ifdef MS_WINDOWS_GAMES
# include <wincrypt.h>
#endif
Similarly, any harm in just making this `MS_WINDOWS`?
/* Include symbols from _socket module */
#include "socketmodule.h"
+#ifdef MS_WINDOWS
# include <wincrypt.h>
#endif
|
codereview_new_cpp_data_12783 | _Py_abspath(const wchar_t *path, wchar_t **abspath_p)
// The Windows Games API family does not provide these functions
// so provide our own implementations. Remove them in case they get added
// to the Games API family
#if defined(MS_WINDOWS) && !defined(MS_WINDOWS_APP) && !defined(MS_WINDOWS_SYSTEM)
HRESULT
PathCchSkipRoot(const wchar_t *path, const wchar_t **rootEnd)
```suggestion
// to the Games API family
// Note that this implementation does not handle all the same cases as the real
// function, but we expect games are very unlikely to encounter the more obscure
// cases.
```
_Py_abspath(const wchar_t *path, wchar_t **abspath_p)
// The Windows Games API family does not provide these functions
// so provide our own implementations. Remove them in case they get added
// to the Games API family
+// Note that this implementation does not handle all the same cases as the real
+// function, but we expect games are very unlikely to encounter the more obscure
+// cases.
#if defined(MS_WINDOWS) && !defined(MS_WINDOWS_APP) && !defined(MS_WINDOWS_SYSTEM)
HRESULT
PathCchSkipRoot(const wchar_t *path, const wchar_t **rootEnd) |
codereview_new_cpp_data_12784 | _PyGen_FetchStopIterationValue(PyObject **pvalue)
PyObject *value = NULL;
if (PyErr_ExceptionMatches(PyExc_StopIteration)) {
PyObject *exc = PyErr_GetRaisedException();
- if (exc) {
- /* exception will usually be normalised already */
- value = Py_NewRef(((PyStopIterationObject *)exc)->value);
- Py_DECREF(exc);
- }
} else if (PyErr_Occurred()) {
return -1;
}
This comment should go
_PyGen_FetchStopIterationValue(PyObject **pvalue)
PyObject *value = NULL;
if (PyErr_ExceptionMatches(PyExc_StopIteration)) {
PyObject *exc = PyErr_GetRaisedException();
+ value = Py_NewRef(((PyStopIterationObject *)exc)->value);
+ Py_DECREF(exc);
} else if (PyErr_Occurred()) {
return -1;
} |
codereview_new_cpp_data_12787 | positional_only_passed_as_keyword(PyThreadState *tstate, PyCodeObject *co,
int posonly_conflicts = 0;
PyObject* posonly_names = PyList_New(0);
if (posonly_names == NULL)
- return 1;
for(int k=0; k < co->co_posonlyargcount; k++){
PyObject* posonly_name = PyTuple_GET_ITEM(co->co_localsplusnames, k);
```suggestion
goto fail;
```
positional_only_passed_as_keyword(PyThreadState *tstate, PyCodeObject *co,
int posonly_conflicts = 0;
PyObject* posonly_names = PyList_New(0);
if (posonly_names == NULL)
+ goto fail;
for(int k=0; k < co->co_posonlyargcount; k++){
PyObject* posonly_name = PyTuple_GET_ITEM(co->co_localsplusnames, k);
|
codereview_new_cpp_data_12788 | _io__WindowsConsoleIO___init___impl(winconsoleio *self, PyObject *nameobj,
int fd_is_own = 0;
HANDLE handle = NULL;
- assert(PyObject_TypeCheck((self), (PyTypeObject *)&PyWindowsConsoleIO_Type));
if (self->fd >= 0) {
if (self->closefd) {
/* Have to close the existing file first. */
```suggestion
assert(PyObject_TypeCheck(self, (PyTypeObject *)&PyWindowsConsoleIO_Type));
```
_io__WindowsConsoleIO___init___impl(winconsoleio *self, PyObject *nameobj,
int fd_is_own = 0;
HANDLE handle = NULL;
+ assert(PyObject_TypeCheck(self, (PyTypeObject *)&PyWindowsConsoleIO_Type));
if (self->fd >= 0) {
if (self->closefd) {
/* Have to close the existing file first. */ |
codereview_new_cpp_data_12791 | bytearrayiter_reduce(bytesiterobject *it, PyObject *Py_UNUSED(ignored))
/* _PyEval_GetBuiltin can invoke arbitrary code.
* calls must be *before* access of `it` pointers,
- * since C/C++ parameter eval order is undefined.
* see issue #101765 */
if (it->it_seq != NULL) {
```suggestion
* since C parameter eval order is undefined.
```
This is C code, so we don't care about C++.
bytearrayiter_reduce(bytesiterobject *it, PyObject *Py_UNUSED(ignored))
/* _PyEval_GetBuiltin can invoke arbitrary code.
* calls must be *before* access of `it` pointers,
+ * since C parameter eval order is undefined.
* see issue #101765 */
if (it->it_seq != NULL) { |
codereview_new_cpp_data_12792 | odict_repr(PyODictObject *self)
result = PyUnicode_FromFormat("%s(%R)",
_PyType_Name(Py_TYPE(self)),
dcopy);
Done:
Py_ReprLeave((PyObject *)self);
- Py_XDECREF(dcopy);
return result;
}
You can move it above the `Done` label and change `Py_XDECREF` to `Py_DECREF`.
odict_repr(PyODictObject *self)
result = PyUnicode_FromFormat("%s(%R)",
_PyType_Name(Py_TYPE(self)),
dcopy);
+ Py_DECREF(dcopy);
Done:
Py_ReprLeave((PyObject *)self);
return result;
}
|
codereview_new_cpp_data_12794 | typedef struct {
static inline itertools_state *
get_module_state(PyObject *mod)
{
- void *state = PyModule_GetState(mod);
assert(state != NULL);
return (itertools_state *)state;
}
```suggestion
void *state = _PyModule_GetState(mod);
```
typedef struct {
static inline itertools_state *
get_module_state(PyObject *mod)
{
+ void *state = _PyModule_GetState(mod);
assert(state != NULL);
return (itertools_state *)state;
} |
codereview_new_cpp_data_12801 | _Py_Specialize_CompareAndBranch(PyObject *lhs, PyObject *rhs, _Py_CODEUNIT *inst
{
assert(_PyOpcode_Caches[COMPARE_AND_BRANCH] == INLINE_CACHE_ENTRIES_COMPARE_OP);
_PyCompareOpCache *cache = (_PyCompareOpCache *)(instr + 1);
int next_opcode = _Py_OPCODE(instr[INLINE_CACHE_ENTRIES_COMPARE_OP + 1]);
assert(next_opcode == POP_JUMP_IF_FALSE || next_opcode == POP_JUMP_IF_TRUE);
if (Py_TYPE(lhs) != Py_TYPE(rhs)) {
SPECIALIZATION_FAIL(COMPARE_AND_BRANCH, compare_op_fail_kind(lhs, rhs));
goto failure;
There's an unused variable warning here.
_Py_Specialize_CompareAndBranch(PyObject *lhs, PyObject *rhs, _Py_CODEUNIT *inst
{
assert(_PyOpcode_Caches[COMPARE_AND_BRANCH] == INLINE_CACHE_ENTRIES_COMPARE_OP);
_PyCompareOpCache *cache = (_PyCompareOpCache *)(instr + 1);
+#ifndef NDEBUG
int next_opcode = _Py_OPCODE(instr[INLINE_CACHE_ENTRIES_COMPARE_OP + 1]);
assert(next_opcode == POP_JUMP_IF_FALSE || next_opcode == POP_JUMP_IF_TRUE);
+#endif
if (Py_TYPE(lhs) != Py_TYPE(rhs)) {
SPECIALIZATION_FAIL(COMPARE_AND_BRANCH, compare_op_fail_kind(lhs, rhs));
goto failure; |
codereview_new_cpp_data_12806 | PyCode_GetFreevars(PyCodeObject *code)
int
_PyCode_GetNumFrameSlots(PyCodeObject *code)
{
- /* This function needs to remain in sync with Tools/build/deepfreeze.py */
assert(code->co_framesize >= (int)FRAME_SPECIALS_SIZE);
return code->co_framesize - FRAME_SPECIALS_SIZE;
}
Odd that one line needs an `(int)` cast and the other doesn't. This actually makes me think that the cast belongs in the definition of `FRAME_SPECIALS_SIZE`. @markshannon ?
PyCode_GetFreevars(PyCodeObject *code)
int
_PyCode_GetNumFrameSlots(PyCodeObject *code)
{
+ /* This function needs to remain in sync with the calculation of
+ * co_framesize in Tools/build/deepfreeze.py */
assert(code->co_framesize >= (int)FRAME_SPECIALS_SIZE);
return code->co_framesize - FRAME_SPECIALS_SIZE;
} |
codereview_new_cpp_data_12807 | PyCode_GetFreevars(PyCodeObject *code)
int
_PyCode_GetNumFrameSlots(PyCodeObject *code)
{
- /* This function needs to remain in sync with Tools/build/deepfreeze.py */
assert(code->co_framesize >= (int)FRAME_SPECIALS_SIZE);
return code->co_framesize - FRAME_SPECIALS_SIZE;
}
```suggestion
/* This function needs to remain in sync with the calculation of
* co_framesize in Tools/build/deepfreeze.py */
```
PyCode_GetFreevars(PyCodeObject *code)
int
_PyCode_GetNumFrameSlots(PyCodeObject *code)
{
+ /* This function needs to remain in sync with the calculation of
+ * co_framesize in Tools/build/deepfreeze.py */
assert(code->co_framesize >= (int)FRAME_SPECIALS_SIZE);
return code->co_framesize - FRAME_SPECIALS_SIZE;
} |
codereview_new_cpp_data_12808 | int___sizeof___impl(PyObject *self)
{
Py_ssize_t res;
- res = offsetof(PyLongObject, ob_digit) + Py_MAX(Py_ABS(Py_SIZE(self)), 1)*sizeof(digit);
return res;
}
Minor suggestion: break this up to avoid the long line (we try but don't always succeed to keep line length to 79 characters), and add a comment explaining what's going on:
E.g.:
```suggestion
res = offsetof(PyLongObject, ob_digit)
/* using Py_MAX(..., 1) because we always allocate space for at least
one digit, even though the integer zero has a Py_SIZE of 0 */
+ Py_MAX(Py_ABS(Py_SIZE(self)), 1)*sizeof(digit);
```
int___sizeof___impl(PyObject *self)
{
Py_ssize_t res;
+ res = offsetof(PyLongObject, ob_digit)
+ /* using Py_MAX(..., 1) because we always allocate space for at least
+ one digit, even though the integer zero has a Py_SIZE of 0 */
+ + Py_MAX(Py_ABS(Py_SIZE(self)), 1)*sizeof(digit);
return res;
}
|
codereview_new_cpp_data_12810 | symtable_extend_namedexpr_scope(struct symtable *st, expr_ty e)
if (ste->ste_comprehension) {
long target_in_scope = _PyST_GetSymbol(ste, target_name);
if ((target_in_scope & DEF_COMP_ITER) &&
- (target_in_scope & (DEF_LOCAL | DEF_GLOBAL))) {
PyErr_Format(PyExc_SyntaxError, NAMED_EXPR_COMP_CONFLICT, target_name);
PyErr_RangedSyntaxLocationObject(st->st_filename,
e->lineno,
```suggestion
(target_in_scope & DEF_LOCAL)) {
```
symtable_extend_namedexpr_scope(struct symtable *st, expr_ty e)
if (ste->ste_comprehension) {
long target_in_scope = _PyST_GetSymbol(ste, target_name);
if ((target_in_scope & DEF_COMP_ITER) &&
+ (target_in_scope & DEF_LOCAL)) {
PyErr_Format(PyExc_SyntaxError, NAMED_EXPR_COMP_CONFLICT, target_name);
PyErr_RangedSyntaxLocationObject(st->st_filename,
e->lineno, |
codereview_new_cpp_data_12821 | void addMmt4dTilingExpertPassPipeline(OpPassManager &passManager,
{
LinalgSingleTilingExpertPassOptions options;
options.vectorize = true;
- options.vectorize = enableVectorMasking;
nestedModulePM.addNestedPass<func::FuncOp>(
createLinalgSingleTilingExpertPass(options));
}
This should be `options.enableVectorMasking = enableVectorMasking;`
void addMmt4dTilingExpertPassPipeline(OpPassManager &passManager,
{
LinalgSingleTilingExpertPassOptions options;
options.vectorize = true;
+ options.enableVectorMasking = enableVectorMasking;
nestedModulePM.addNestedPass<func::FuncOp>(
createLinalgSingleTilingExpertPass(options));
} |
codereview_new_cpp_data_12822 | struct SwitchStoreOfIfResultValue
auto resultNumber = storeOp.getValue().cast<OpResult>().getResultNumber();
auto moveStoreInsideBody = [&](Block *body) {
- OpBuilder::InsertionGuard g2(rewriter);
auto yieldOp = cast<scf::YieldOp>(body->getTerminator());
rewriter.setInsertionPoint(yieldOp);
auto yieldedVal = yieldOp.getOperand(resultNumber);
Why do we name it as `g2`? Maybe it can just be `g` or `guard`.
struct SwitchStoreOfIfResultValue
auto resultNumber = storeOp.getValue().cast<OpResult>().getResultNumber();
auto moveStoreInsideBody = [&](Block *body) {
+ OpBuilder::InsertionGuard guard(rewriter);
auto yieldOp = cast<scf::YieldOp>(body->getTerminator());
rewriter.setInsertionPoint(yieldOp);
auto yieldedVal = yieldOp.getOperand(resultNumber); |
codereview_new_cpp_data_12827 | FailureOr<TileAndFuseResult> tileAndFuseDispatchUsingSCFForOp(
return rewriter.notifyMatchFailure(sliceOp,
"fusion along slice op failed");
}
- Operation *tiledProducer = tiledProducerVal->getDefiningOp();
- if (!llvm::dyn_cast_or_null<TilingInterface>(tiledProducer)) {
return rewriter.notifyMatchFailure(
- tiledProducer,
"expected tiled implementation to implement TilingInterface as well");
}
if (tiledProducer->getNumResults() != fusableProducer->getNumResults()) {
the impl of getDefiningOp does this:
```c++
template <typename OpTy>
OpTy getDefiningOp() const {
return llvm::dyn_cast_or_null<OpTy>(getDefiningOp());
}
```
so should have the same behavior?
FailureOr<TileAndFuseResult> tileAndFuseDispatchUsingSCFForOp(
return rewriter.notifyMatchFailure(sliceOp,
"fusion along slice op failed");
}
+ auto tiledProducer = tiledProducerVal->getDefiningOp<TilingInterface>();
+ if (!tiledProducer) {
return rewriter.notifyMatchFailure(
+ tiledProducerVal->getDefiningOp(),
"expected tiled implementation to implement TilingInterface as well");
}
if (tiledProducer->getNumResults() != fusableProducer->getNumResults()) { |
codereview_new_cpp_data_12828 | static LogicalResult adaptComputeConsumerToAvoidStackAllocation(
/// of tensor.unpack op for more details.
static LogicalResult replaceUnpackEmptyWithAllocTensor(OpBuilder &b,
func::FuncOp funcOp) {
- funcOp.walk<WalkOrder::PreOrder>([&](tensor::EmptyOp emptyOp) {
- bool isUsedByNonPerfectUnpack = false;
- for (const auto &use : emptyOp->getUses()) {
- if (auto unpack = dyn_cast<IREE::LinalgExt::UnPackOp>(use.getOwner())) {
- if (unpack->hasOneUse() &&
- isa<tensor::ExtractSliceOp>(*(unpack->user_begin()))) {
- isUsedByNonPerfectUnpack = true;
- }
- }
}
- if (!isUsedByNonPerfectUnpack) return;
OpBuilder::InsertionGuard g(b);
b.setInsertionPointAfter(emptyOp);
Wouldn't this be easier if you just walk for the unpack operation and look at operand that is a `tensor.empty`. (You can delete the `tensor.empty` if it has no other uses).
static LogicalResult adaptComputeConsumerToAvoidStackAllocation(
/// of tensor.unpack op for more details.
static LogicalResult replaceUnpackEmptyWithAllocTensor(OpBuilder &b,
func::FuncOp funcOp) {
+ funcOp.walk([&](IREE::LinalgExt::UnPackOp unpackOp) {
+ if (!unpackOp->hasOneUse() ||
+ !isa<tensor::ExtractSliceOp>(*(unpackOp->user_begin()))) {
+ return;
}
+ auto emptyOp = unpackOp.getOutput().getDefiningOp<tensor::EmptyOp>();
+ if (!emptyOp) return;
OpBuilder::InsertionGuard g(b);
b.setInsertionPointAfter(emptyOp); |
codereview_new_cpp_data_12829 | static unsigned decideFusableLinalgOps(FunctionOpInterface funcOp,
for (Operation &op : llvm::reverse(block)) {
// If it is part of a fusion group or root op, ignore it.
if (hasFusionGroupsAttribute(&op) || hasRootOpAttribute(&op)) continue;
- // Only look for Linalg ops here. Avoid moving `linalg.fill` that aren't
- // fused with anything else into their own dispatches since it is better
- // to convert them to splats.
if (!isa<linalg::LinalgOp, tensor::PackOp, tensor::UnPackOp>(op) ||
isa<linalg::FillOp>(op)) {
continue;
Update this comment?
static unsigned decideFusableLinalgOps(FunctionOpInterface funcOp,
for (Operation &op : llvm::reverse(block)) {
// If it is part of a fusion group or root op, ignore it.
if (hasFusionGroupsAttribute(&op) || hasRootOpAttribute(&op)) continue;
+ // Only look for Linalg ops here. Avoid moving `linalg.fill`,
+ // `tensor.pack`, and `tensor.unpack` that aren't fused with anything else
+ // into their own dispatches since it is better to convert them to splats.
if (!isa<linalg::LinalgOp, tensor::PackOp, tensor::UnPackOp>(op) ||
isa<linalg::FillOp>(op)) {
continue; |
codereview_new_cpp_data_12830 | static FailureOr<IREE::Codegen::MicroKernelOpInterface> matchDAGForMicroKernel(
Type outElemType = outType.getElementType();
if (lhsElemType.isSignlessInteger(8) && rhsElemType.isSignlessInteger(8) &&
outElemType.isSignlessInteger(32)) {
- fnName = "vmvx.matmul.i8i8i32";
} else if (lhsElemType.isF32() && rhsElemType.isF32() &&
outElemType.isF32()) {
fnName = "vmvx.matmul.f32.f32.f32";
So Linalg ops have a first-class StringRef attribute to encode a library call name and that [connects](https://github.com/llvm/llvm-project/blob/main/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir) well at the tensor level.
Seeing the amount of post-hoc integration needed for something that is expected to be generic here makes me wonder whether we can reuse/evolve that upstream mechanism as needed so that IREE can just set the fnName at the place of definition?
I suspect we can significantly automate a lot of the current and future logic.
static FailureOr<IREE::Codegen::MicroKernelOpInterface> matchDAGForMicroKernel(
Type outElemType = outType.getElementType();
if (lhsElemType.isSignlessInteger(8) && rhsElemType.isSignlessInteger(8) &&
outElemType.isSignlessInteger(32)) {
+ fnName = "vmvx.matmul.i8.i8.i32";
} else if (lhsElemType.isF32() && rhsElemType.isF32() &&
outElemType.isF32()) {
fnName = "vmvx.matmul.f32.f32.f32"; |
codereview_new_cpp_data_12834 | static iree_status_t iree_hal_cuda_driver_create_internal(
iree_status_t status =
iree_hal_cuda_dynamic_symbols_initialize(host_allocator, &driver->syms);
- if (!iree_status_is_ok(status)) {
- iree_hal_driver_release((iree_hal_driver_t*)driver);
- return status;
- }
-
if (iree_status_is_ok(status)) {
// Initialize NCCL if NPROCS is set.
if (driver->default_params.nccl_default_count > 0) {
you can remove this early return as the status check below handles it
static iree_status_t iree_hal_cuda_driver_create_internal(
iree_status_t status =
iree_hal_cuda_dynamic_symbols_initialize(host_allocator, &driver->syms);
if (iree_status_is_ok(status)) {
// Initialize NCCL if NPROCS is set.
if (driver->default_params.nccl_default_count > 0) { |
codereview_new_cpp_data_12838 | class SPIRVAnnotateLoopsPass final
void runOnOperation() override {
func::FuncOp funcOp = getOperation();
SmallVector<scf::ForOp, 4> forOps;
- bool afterWorkgroupLoops{false};
- funcOp.walk([&](Operation *op) {
- if (isa<IREE::Flow::DispatchTensorLoadOp>(op)) {
- afterWorkgroupLoops = true;
- }
- if (isa<IREE::Flow::DispatchTensorStoreOp>(op)) {
- afterWorkgroupLoops = false;
- }
- if (afterWorkgroupLoops) {
- if (auto forOp = dyn_cast<scf::ForOp>(op)) forOps.push_back(forOp);
- }
});
MLIRContext *context = &getContext();
OpBuilder builder(context);
const char *attrName = getSPIRVDistributeAttrName();
- // Can only distribute to a maximum of 3 loops
- int maxIndex{2};
for (auto forOp : llvm::enumerate(forOps)) {
- if (forOp.index() > maxIndex) break;
forOp.value()->setAttr(attrName, builder.getIndexAttr(forOp.index()));
}
}
Hmm, it's fragile to rely on this to tell whether we are handling loops tiled and distributed to workgroups. You can actually check by using `isTiledAndDistributedLoop` in `Codegen/Utils/Utils.h`.
class SPIRVAnnotateLoopsPass final
void runOnOperation() override {
func::FuncOp funcOp = getOperation();
SmallVector<scf::ForOp, 4> forOps;
+ funcOp.walk([&](scf::ForOp forOp) {
+ if (!isTiledAndDistributedLoop(forOp)) forOps.push_back(forOp);
});
MLIRContext *context = &getContext();
OpBuilder builder(context);
const char *attrName = getSPIRVDistributeAttrName();
for (auto forOp : llvm::enumerate(forOps)) {
+ if (forOp.index() > kNumGPUDims) break;
forOp.value()->setAttr(attrName, builder.getIndexAttr(forOp.index()));
}
} |
codereview_new_cpp_data_12841 | iree_status_t iree_hal_heap_buffer_wrap(
IREE_ASSERT_ARGUMENT(out_buffer);
IREE_TRACE_ZONE_BEGIN(z0);
- if (!iree_host_size_has_alignment((uintptr_t)data.data_length,
IREE_HAL_HEAP_BUFFER_ALIGNMENT)) {
IREE_TRACE_ZONE_END(z0);
return iree_make_status(
IREE_STATUS_OUT_OF_RANGE,
- "imported heap buffer data must be aligned to %d; got %" PRIhsz,
- (int)IREE_HAL_HEAP_BUFFER_ALIGNMENT, data.data_length);
}
iree_allocator_t host_allocator =
this is incorrect - the base of the buffer pointer is what needs to be aligned, not the size
if the pointers coming back from wgpuBufferGetConstMappedRange are unaligned that's likely to be a problem (but one we could work around in the short term with a flag that disables this check)
iree_status_t iree_hal_heap_buffer_wrap(
IREE_ASSERT_ARGUMENT(out_buffer);
IREE_TRACE_ZONE_BEGIN(z0);
+ if (!iree_host_size_has_alignment((uintptr_t)data.data,
IREE_HAL_HEAP_BUFFER_ALIGNMENT)) {
IREE_TRACE_ZONE_END(z0);
return iree_make_status(
IREE_STATUS_OUT_OF_RANGE,
+ "imported heap buffer data must be aligned to %d; got %p",
+ (int)IREE_HAL_HEAP_BUFFER_ALIGNMENT, data.data);
}
iree_allocator_t host_allocator = |
codereview_new_cpp_data_12842 | static bool isFusableWithProducer(OpOperand &operand, bool aggressiveFusion) {
Operation *producer = operand.get().getDefiningOp();
Operation *consumer = operand.getOwner();
- // Fuse linalg ops with set encoding op if the operand is an `outs` value.
- if (isa<linalg::LinalgOp>(consumer) &&
- isa<IREE::LinalgExt::SetEncodingOp>(producer)) {
- return cast<DestinationStyleOpInterface>(consumer).isDpsInit(&operand);
- }
-
if (!isa<linalg::LinalgOp>(consumer) || !isa<linalg::LinalgOp>(producer)) {
return false;
}
I added for the case where you start with a `linalg.fill` -> `linalg.matmul` dependence through the `outs` operands of the latter. Then `SetEncodingPass` will create a `iree_linalg_ext.set_encoding` operation on the outs operand of the `linalg.matmul`. So you get `linalg.fill` -> `iree_linalg_ext.set_encoding` -> `linalg.matmul`. I was trying to pull those in. But maybe that is not the right approach. I can fold the `linalg.fill` -> `iree_linalg_ext.set_encoding` to a `linalg.fill` of the encoded type. That should get us to the same place.....
static bool isFusableWithProducer(OpOperand &operand, bool aggressiveFusion) {
Operation *producer = operand.get().getDefiningOp();
Operation *consumer = operand.getOwner();
if (!isa<linalg::LinalgOp>(consumer) || !isa<linalg::LinalgOp>(producer)) {
return false;
} |
codereview_new_cpp_data_12843 | static LogicalResult isEquivalentToOpImpl(PatternRewriter &rewriter,
linalg::OpOperandVector opOutputs = linalgOp.getOutputOperands();
linalg::OpOperandVector modelOutputs = linalgModelOp.getOutputOperands();
auto notEqualFn = [](std::tuple<OpOperand *, OpOperand *> in) -> bool {
- return std::get<0>(in) != std::get<1>(in);
};
if (opInputs.size() != modelInputs.size() ||
Nit: I think this function is not needed. Equality of `SmallVector<T>` already compares the values of each elements.
static LogicalResult isEquivalentToOpImpl(PatternRewriter &rewriter,
linalg::OpOperandVector opOutputs = linalgOp.getOutputOperands();
linalg::OpOperandVector modelOutputs = linalgModelOp.getOutputOperands();
auto notEqualFn = [](std::tuple<OpOperand *, OpOperand *> in) -> bool {
+ return std::get<0>(in)->get() != std::get<1>(in)->get();
};
if (opInputs.size() != modelInputs.size() || |
codereview_new_cpp_data_12844 | static SmallVector<T> interchange(ArrayRef<T> elements,
SmallVector<T> rearrangedElements = llvm::to_vector(elements);
if (interchangeVector.empty())
return rearrangedElements;
- // assert((rearrangedElements.size() - offset) == interchangeVector.size() &&
- // "number of elements must equal number of permutations");
for (int64_t idx = 0, end = interchangeVector.size(); idx < end; idx++) {
rearrangedElements[interchangeVector[idx] + offset] =
elements[idx + offset];
delete the comment?
static SmallVector<T> interchange(ArrayRef<T> elements,
SmallVector<T> rearrangedElements = llvm::to_vector(elements);
if (interchangeVector.empty())
return rearrangedElements;
for (int64_t idx = 0, end = interchangeVector.size(); idx < end; idx++) {
rearrangedElements[interchangeVector[idx] + offset] =
elements[idx + offset]; |
codereview_new_cpp_data_12846 | static bool contractOpFilter(Operation *op) {
auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
if (!linalgOp) return false;
- // Can't promote dynamic shapes.
if (linalgOp.hasDynamicShape()) return false;
SmallVector<unsigned> dims;
I think it makes sense to do when doing specialization. I would add a descriptive comment though.
static bool contractOpFilter(Operation *op) {
auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
if (!linalgOp) return false;
+ // The workgroup specialization already makes static shapes available for the
+ // main tile part and makes the partial tile computation small, so promoting
+ // to shared memory for the partial tile actually hurts the performance.
if (linalgOp.hasDynamicShape()) return false;
SmallVector<unsigned> dims; |
codereview_new_cpp_data_12847 | SmallVector<Value, 4> getTileSizes(OpBuilder &b, Operation *op,
}));
}
void setLoweringConfig(Operation *op,
IREE::Codegen::LoweringConfigAttr config) {
op->setAttr(kConfigAttrName, config);
nit: while line
SmallVector<Value, 4> getTileSizes(OpBuilder &b, Operation *op,
}));
}
+unsigned getNumTileLevels(Operation *op) {
+ IREE::Codegen::LoweringConfigAttr configAttr = getLoweringConfig(op);
+ if (!configAttr) return 0;
+ return configAttr.getTileSizes().size();
+}
+
void setLoweringConfig(Operation *op,
IREE::Codegen::LoweringConfigAttr config) {
op->setAttr(kConfigAttrName, config); |
codereview_new_cpp_data_12848 | void addGPUMatmulTensorCorePassPipeline(OpPassManager &pm,
}
void addGPUTransposePassPipeline(OpPassManager &pm) {
- // tileAndBufferize(pm);
tileAndDistributeToWorkgroup(pm);
-
auto &nestedModulePM = pm.nest<ModuleOp>();
- // Distribute linalg onto threads within the workgroup.
- // nestedModulePM.addNestedPass<func::FuncOp>(createLLVMGPUTileAndDistribute(
- // false, GPUPromoteSharedMemPattern::TransposeOpPattern));
nestedModulePM.addNestedPass<func::FuncOp>(
createRemoveSingleIterationLoopPass());
@ThomasRaoux Can you advise if this is the correct pipeline arrangement? I've compared IR before/after the change. The mechanics of shared mem transposed are unchanged, however the IRs do differ in the use of memeref.subviews.
void addGPUMatmulTensorCorePassPipeline(OpPassManager &pm,
}
void addGPUTransposePassPipeline(OpPassManager &pm) {
tileAndDistributeToWorkgroup(pm);
auto &nestedModulePM = pm.nest<ModuleOp>();
nestedModulePM.addNestedPass<func::FuncOp>(
createRemoveSingleIterationLoopPass()); |
codereview_new_cpp_data_12849 | static SmallVector<int64_t> getMinTilingSizesForEachDim(
std::max<int64_t>(minTileSizes[fastestVaryingDim], tileSize);
}
- // Limit unroll factor. For know, we assume the rightmost non-one tiled
// dimension is for vectorization and any other non-one dimension is for
// unrolling.
auto limitUnrollFactor = [&](int64_t maxUnrollFactor) {
nit: typo `s/know/now`
static SmallVector<int64_t> getMinTilingSizesForEachDim(
std::max<int64_t>(minTileSizes[fastestVaryingDim], tileSize);
}
+ // Limit unroll factor. For now, we assume the rightmost non-one tiled
// dimension is for vectorization and any other non-one dimension is for
// unrolling.
auto limitUnrollFactor = [&](int64_t maxUnrollFactor) { |
codereview_new_cpp_data_12850 | static LogicalResult setTranslationInfoAndRootConfig(
for (auto computeOp : computeOps) {
if (IREE::Codegen::CompilationInfoAttr compilationInfo =
getCompilationInfo(computeOp)) {
- (void)setUserConfig(entryPointFn, computeOp, compilationInfo);
}
}
We need to surface errors up if it happens:
```
if (failed(setUserConfig(...)) return failure();
static LogicalResult setTranslationInfoAndRootConfig(
for (auto computeOp : computeOps) {
if (IREE::Codegen::CompilationInfoAttr compilationInfo =
getCompilationInfo(computeOp)) {
+ if (failed(setUserConfig(entryPointFn, computeOp, compilationInfo)))
+ return failure();
}
}
|
codereview_new_cpp_data_12851 | static LogicalResult setTransposeConfig(func::FuncOp entryPoint,
// Check alignment with tile size for each transpose.
if (auto genericOp = dyn_cast<linalg::GenericOp>(op)) {
- for (auto operandIndexPair :
- llvm::zip(genericOp.getOperands(), genericOp.getIndexingMapsArray())) {
- if (isSharedMemTranspose(std::get<1>(operandIndexPair))) {
- auto inputShape = std::get<0>(operandIndexPair)
- .getType()
- .cast<ShapedType>()
- .getShape();
- if (inputShape[inputShape.size() - 1] % tileM != 0 ||
- inputShape[inputShape.size() - 2] % tileN != 0) {
- return failure();
- }
}
}
} else {
All the operands dimensions must be matching, instead you should just do `generic.getStaticLoopRanges()` and check that the dimension are divisible. Since you know there are only two dimensions you know it will get you M, N
static LogicalResult setTransposeConfig(func::FuncOp entryPoint,
// Check alignment with tile size for each transpose.
if (auto genericOp = dyn_cast<linalg::GenericOp>(op)) {
+ auto loopRanges = genericOp.getStaticLoopRanges();
+ for (auto loopRange : loopRanges) {
+ if (loopRange % 32 != 0) {
+ return failure();
}
}
} else { |
codereview_new_cpp_data_12852 | static iree_status_t iree_cpu_has_required_target_features(
iree_string_view_t required_feature = iree_yaml_node_as_string(item_node);
if (iree_string_view_is_empty(required_feature)) continue;
int64_t feature_is_supported = 0;
- fprintf(stderr, "Looking up feature by key: %.*s\n",
- (int)required_feature.size, required_feature.data);
IREE_RETURN_IF_ERROR(
iree_cpu_lookup_data_by_key(required_feature, &feature_is_supported));
if (!feature_is_supported) {
Probably want to remove this `fprintf`, so it doesn't appear in test / CI output for every run?
static iree_status_t iree_cpu_has_required_target_features(
iree_string_view_t required_feature = iree_yaml_node_as_string(item_node);
if (iree_string_view_is_empty(required_feature)) continue;
int64_t feature_is_supported = 0;
IREE_RETURN_IF_ERROR(
iree_cpu_lookup_data_by_key(required_feature, &feature_is_supported));
if (!feature_is_supported) { |
codereview_new_cpp_data_12853 | class WGSLReplacePushConstantsPass
alignmentAttr = constantLoadOps[0].getAlignmentAttr();
}
- // hal.interface.binding.subspan -> !flow.dispatch.tensor<readonly:i32>
// * Group all push constants into a single tensor<Nxi32>
// * If individual data types differ, they'll be bitcast when extracted
auto dispatchTensorType = IREE::Flow::DispatchTensorType::get(
- IREE::Flow::TensorAccess::ReadOnly, {}, builder.getI32Type());
SmallVector<Value> dynamicDims;
// Note: we're ignoring all potential 'values' hints (if provided) on ops -
// InterfaceBindingSubspanOp has no matching concept and we assume that any
Drop `dynamicDims` and just use `{}` everywhere. Less confusing?
class WGSLReplacePushConstantsPass
alignmentAttr = constantLoadOps[0].getAlignmentAttr();
}
+ // hal.interface.binding.subspan -> !flow.dispatch.tensor<readonly:Nxi32>
// * Group all push constants into a single tensor<Nxi32>
// * If individual data types differ, they'll be bitcast when extracted
auto dispatchTensorType = IREE::Flow::DispatchTensorType::get(
+ IREE::Flow::TensorAccess::ReadOnly,
+ {static_cast<int64_t>(maxConstantIndex + 1)}, builder.getI32Type());
SmallVector<Value> dynamicDims;
// Note: we're ignoring all potential 'values' hints (if provided) on ops -
// InterfaceBindingSubspanOp has no matching concept and we assume that any |
codereview_new_cpp_data_12854 | static bool isTransposeOp(linalg::LinalgOp linalgOp) {
static LogicalResult setTransposeConfig(func::FuncOp entryPoint,
Operation *op) {
TileSizesListType tileSizes;
- tileSizes.push_back({32, 32});
// Check alignment with tile size
if (auto genericOp = dyn_cast<linalg::GenericOp>(op)) {
auto inputShape =
genericOp.inputs()[0].getType().cast<ShapedType>().getShape();
- if (inputShape[0] % 32 != 0 || inputShape[1] % 32 != 0) {
return failure();
}
} else {
can you use constant variables for those `32` with a good name like `tileM` `tileN`
static bool isTransposeOp(linalg::LinalgOp linalgOp) {
static LogicalResult setTransposeConfig(func::FuncOp entryPoint,
Operation *op) {
+ int32_t tileM = 32;
+ int32_t tileN = 32;
TileSizesListType tileSizes;
+ tileSizes.push_back({tileM, tileN});
// Check alignment with tile size
if (auto genericOp = dyn_cast<linalg::GenericOp>(op)) {
auto inputShape =
genericOp.inputs()[0].getType().cast<ShapedType>().getShape();
+ if (inputShape[0] % tileM != 0 || inputShape[1] % tileN != 0) {
return failure();
}
} else { |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.