idx
int64
project
string
commit_id
string
project_url
string
commit_url
string
commit_message
string
target
int64
func
string
func_hash
string
file_name
string
file_hash
string
cwe
string
cve
string
cve_desc
string
nvd_url
string
194,963
ImageMagick6
dc070da861a015d3c97488fdcca6063b44d47a7b
https://github.com/ImageMagick/ImageMagick6
https://github.com/ImageMagick/ImageMagick6/commit/dc070da861a015d3c97488fdcca6063b44d47a7b
https://github.com/ImageMagick/ImageMagick/pull/5034
1
static MagickBooleanType GetEXIFProperty(const Image *image, const char *property) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define EXIF_FMT_BYTE 1 #define EXIF_FMT_STRING 2 #define EXIF_FMT_USHORT 3 #define EXIF_FMT_ULONG 4 #define EXIF_FMT_URATIONAL 5 #define EXIF_FMT_SBYTE 6 #define EXIF_FMT_UNDEFINED 7 #define EXIF_FMT_SSHORT 8 #define EXIF_FMT_SLONG 9 #define EXIF_FMT_SRATIONAL 10 #define EXIF_FMT_SINGLE 11 #define EXIF_FMT_DOUBLE 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_GPS_OFFSET 0x8825 #define TAG_INTEROP_OFFSET 0xa005 #define EXIFMultipleValues(size,format,arg) \ { \ ssize_t \ component; \ \ size_t \ length; \ \ unsigned char \ *p1; \ \ length=0; \ p1=p; \ for (component=0; component < components; component++) \ { \ length+=FormatLocaleString(buffer+length,MaxTextExtent-length, \ format", ",arg); \ if (length >= (MaxTextExtent-1)) \ length=MaxTextExtent-1; \ p1+=size; \ } \ if (length > 1) \ buffer[length-2]='\0'; \ value=AcquireString(buffer); \ } #define EXIFMultipleFractions(size,format,arg1,arg2) \ { \ ssize_t \ component; \ \ size_t \ length; \ \ unsigned char \ *p1; \ \ length=0; \ p1=p; \ for (component=0; component < components; component++) \ { \ length+=FormatLocaleString(buffer+length,MaxTextExtent-length, \ format", ",(arg1),(arg2)); \ if (length >= (MaxTextExtent-1)) \ length=MaxTextExtent-1; \ p1+=size; \ } \ if (length > 1) \ buffer[length-2]='\0'; \ value=AcquireString(buffer); \ } typedef struct _DirectoryInfo { const unsigned char *directory; size_t entry; ssize_t offset; } DirectoryInfo; typedef struct _TagInfo { size_t tag; const char description[36]; } TagInfo; static const TagInfo EXIFTag[] = { { 0x001, "exif:InteroperabilityIndex" }, { 0x002, "exif:InteroperabilityVersion" }, { 0x100, "exif:ImageWidth" }, { 0x101, "exif:ImageLength" }, { 0x102, "exif:BitsPerSample" }, { 0x103, "exif:Compression" }, { 0x106, "exif:PhotometricInterpretation" }, { 0x10a, "exif:FillOrder" }, { 0x10d, "exif:DocumentName" }, { 0x10e, "exif:ImageDescription" }, { 0x10f, "exif:Make" }, { 0x110, "exif:Model" }, { 0x111, "exif:StripOffsets" }, { 0x112, "exif:Orientation" }, { 0x115, "exif:SamplesPerPixel" }, { 0x116, "exif:RowsPerStrip" }, { 0x117, "exif:StripByteCounts" }, { 0x11a, "exif:XResolution" }, { 0x11b, "exif:YResolution" }, { 0x11c, "exif:PlanarConfiguration" }, { 0x11d, "exif:PageName" }, { 0x11e, "exif:XPosition" }, { 0x11f, "exif:YPosition" }, { 0x118, "exif:MinSampleValue" }, { 0x119, "exif:MaxSampleValue" }, { 0x120, "exif:FreeOffsets" }, { 0x121, "exif:FreeByteCounts" }, { 0x122, "exif:GrayResponseUnit" }, { 0x123, "exif:GrayResponseCurve" }, { 0x124, "exif:T4Options" }, { 0x125, "exif:T6Options" }, { 0x128, "exif:ResolutionUnit" }, { 0x12d, "exif:TransferFunction" }, { 0x131, "exif:Software" }, { 0x132, "exif:DateTime" }, { 0x13b, "exif:Artist" }, { 0x13e, "exif:WhitePoint" }, { 0x13f, "exif:PrimaryChromaticities" }, { 0x140, "exif:ColorMap" }, { 0x141, "exif:HalfToneHints" }, { 0x142, "exif:TileWidth" }, { 0x143, "exif:TileLength" }, { 0x144, "exif:TileOffsets" }, { 0x145, "exif:TileByteCounts" }, { 0x14a, "exif:SubIFD" }, { 0x14c, "exif:InkSet" }, { 0x14d, "exif:InkNames" }, { 0x14e, "exif:NumberOfInks" }, { 0x150, "exif:DotRange" }, { 0x151, "exif:TargetPrinter" }, { 0x152, "exif:ExtraSample" }, { 0x153, "exif:SampleFormat" }, { 0x154, "exif:SMinSampleValue" }, { 0x155, "exif:SMaxSampleValue" }, { 0x156, "exif:TransferRange" }, { 0x157, "exif:ClipPath" }, { 0x158, "exif:XClipPathUnits" }, { 0x159, "exif:YClipPathUnits" }, { 0x15a, "exif:Indexed" }, { 0x15b, "exif:JPEGTables" }, { 0x15f, "exif:OPIProxy" }, { 0x200, "exif:JPEGProc" }, { 0x201, "exif:JPEGInterchangeFormat" }, { 0x202, "exif:JPEGInterchangeFormatLength" }, { 0x203, "exif:JPEGRestartInterval" }, { 0x205, "exif:JPEGLosslessPredictors" }, { 0x206, "exif:JPEGPointTransforms" }, { 0x207, "exif:JPEGQTables" }, { 0x208, "exif:JPEGDCTables" }, { 0x209, "exif:JPEGACTables" }, { 0x211, "exif:YCbCrCoefficients" }, { 0x212, "exif:YCbCrSubSampling" }, { 0x213, "exif:YCbCrPositioning" }, { 0x214, "exif:ReferenceBlackWhite" }, { 0x2bc, "exif:ExtensibleMetadataPlatform" }, { 0x301, "exif:Gamma" }, { 0x302, "exif:ICCProfileDescriptor" }, { 0x303, "exif:SRGBRenderingIntent" }, { 0x320, "exif:ImageTitle" }, { 0x5001, "exif:ResolutionXUnit" }, { 0x5002, "exif:ResolutionYUnit" }, { 0x5003, "exif:ResolutionXLengthUnit" }, { 0x5004, "exif:ResolutionYLengthUnit" }, { 0x5005, "exif:PrintFlags" }, { 0x5006, "exif:PrintFlagsVersion" }, { 0x5007, "exif:PrintFlagsCrop" }, { 0x5008, "exif:PrintFlagsBleedWidth" }, { 0x5009, "exif:PrintFlagsBleedWidthScale" }, { 0x500A, "exif:HalftoneLPI" }, { 0x500B, "exif:HalftoneLPIUnit" }, { 0x500C, "exif:HalftoneDegree" }, { 0x500D, "exif:HalftoneShape" }, { 0x500E, "exif:HalftoneMisc" }, { 0x500F, "exif:HalftoneScreen" }, { 0x5010, "exif:JPEGQuality" }, { 0x5011, "exif:GridSize" }, { 0x5012, "exif:ThumbnailFormat" }, { 0x5013, "exif:ThumbnailWidth" }, { 0x5014, "exif:ThumbnailHeight" }, { 0x5015, "exif:ThumbnailColorDepth" }, { 0x5016, "exif:ThumbnailPlanes" }, { 0x5017, "exif:ThumbnailRawBytes" }, { 0x5018, "exif:ThumbnailSize" }, { 0x5019, "exif:ThumbnailCompressedSize" }, { 0x501a, "exif:ColorTransferFunction" }, { 0x501b, "exif:ThumbnailData" }, { 0x5020, "exif:ThumbnailImageWidth" }, { 0x5021, "exif:ThumbnailImageHeight" }, { 0x5022, "exif:ThumbnailBitsPerSample" }, { 0x5023, "exif:ThumbnailCompression" }, { 0x5024, "exif:ThumbnailPhotometricInterp" }, { 0x5025, "exif:ThumbnailImageDescription" }, { 0x5026, "exif:ThumbnailEquipMake" }, { 0x5027, "exif:ThumbnailEquipModel" }, { 0x5028, "exif:ThumbnailStripOffsets" }, { 0x5029, "exif:ThumbnailOrientation" }, { 0x502a, "exif:ThumbnailSamplesPerPixel" }, { 0x502b, "exif:ThumbnailRowsPerStrip" }, { 0x502c, "exif:ThumbnailStripBytesCount" }, { 0x502d, "exif:ThumbnailResolutionX" }, { 0x502e, "exif:ThumbnailResolutionY" }, { 0x502f, "exif:ThumbnailPlanarConfig" }, { 0x5030, "exif:ThumbnailResolutionUnit" }, { 0x5031, "exif:ThumbnailTransferFunction" }, { 0x5032, "exif:ThumbnailSoftwareUsed" }, { 0x5033, "exif:ThumbnailDateTime" }, { 0x5034, "exif:ThumbnailArtist" }, { 0x5035, "exif:ThumbnailWhitePoint" }, { 0x5036, "exif:ThumbnailPrimaryChromaticities" }, { 0x5037, "exif:ThumbnailYCbCrCoefficients" }, { 0x5038, "exif:ThumbnailYCbCrSubsampling" }, { 0x5039, "exif:ThumbnailYCbCrPositioning" }, { 0x503A, "exif:ThumbnailRefBlackWhite" }, { 0x503B, "exif:ThumbnailCopyRight" }, { 0x5090, "exif:LuminanceTable" }, { 0x5091, "exif:ChrominanceTable" }, { 0x5100, "exif:FrameDelay" }, { 0x5101, "exif:LoopCount" }, { 0x5110, "exif:PixelUnit" }, { 0x5111, "exif:PixelPerUnitX" }, { 0x5112, "exif:PixelPerUnitY" }, { 0x5113, "exif:PaletteHistogram" }, { 0x1000, "exif:RelatedImageFileFormat" }, { 0x1001, "exif:RelatedImageLength" }, { 0x1002, "exif:RelatedImageWidth" }, { 0x800d, "exif:ImageID" }, { 0x80e3, "exif:Matteing" }, { 0x80e4, "exif:DataType" }, { 0x80e5, "exif:ImageDepth" }, { 0x80e6, "exif:TileDepth" }, { 0x828d, "exif:CFARepeatPatternDim" }, { 0x828e, "exif:CFAPattern2" }, { 0x828f, "exif:BatteryLevel" }, { 0x8298, "exif:Copyright" }, { 0x829a, "exif:ExposureTime" }, { 0x829d, "exif:FNumber" }, { 0x83bb, "exif:IPTC/NAA" }, { 0x84e3, "exif:IT8RasterPadding" }, { 0x84e5, "exif:IT8ColorTable" }, { 0x8649, "exif:ImageResourceInformation" }, { 0x8769, "exif:ExifOffset" }, /* specs as "Exif IFD Pointer"? */ { 0x8773, "exif:InterColorProfile" }, { 0x8822, "exif:ExposureProgram" }, { 0x8824, "exif:SpectralSensitivity" }, { 0x8825, "exif:GPSInfo" }, /* specs as "GPSInfo IFD Pointer"? */ { 0x8827, "exif:PhotographicSensitivity" }, { 0x8828, "exif:OECF" }, { 0x8829, "exif:Interlace" }, { 0x882a, "exif:TimeZoneOffset" }, { 0x882b, "exif:SelfTimerMode" }, { 0x8830, "exif:SensitivityType" }, { 0x8831, "exif:StandardOutputSensitivity" }, { 0x8832, "exif:RecommendedExposureIndex" }, { 0x8833, "exif:ISOSpeed" }, { 0x8834, "exif:ISOSpeedLatitudeyyy" }, { 0x8835, "exif:ISOSpeedLatitudezzz" }, { 0x9000, "exif:ExifVersion" }, { 0x9003, "exif:DateTimeOriginal" }, { 0x9004, "exif:DateTimeDigitized" }, { 0x9010, "exif:OffsetTime" }, { 0x9011, "exif:OffsetTimeOriginal" }, { 0x9012, "exif:OffsetTimeDigitized" }, { 0x9101, "exif:ComponentsConfiguration" }, { 0x9102, "exif:CompressedBitsPerPixel" }, { 0x9201, "exif:ShutterSpeedValue" }, { 0x9202, "exif:ApertureValue" }, { 0x9203, "exif:BrightnessValue" }, { 0x9204, "exif:ExposureBiasValue" }, { 0x9205, "exif:MaxApertureValue" }, { 0x9206, "exif:SubjectDistance" }, { 0x9207, "exif:MeteringMode" }, { 0x9208, "exif:LightSource" }, { 0x9209, "exif:Flash" }, { 0x920a, "exif:FocalLength" }, { 0x920b, "exif:FlashEnergy" }, { 0x920c, "exif:SpatialFrequencyResponse" }, { 0x920d, "exif:Noise" }, { 0x9214, "exif:SubjectArea" }, { 0x9290, "exif:SubSecTime" }, { 0x9291, "exif:SubSecTimeOriginal" }, { 0x9292, "exif:SubSecTimeDigitized" }, { 0x9211, "exif:ImageNumber" }, { 0x9212, "exif:SecurityClassification" }, { 0x9213, "exif:ImageHistory" }, { 0x9214, "exif:SubjectArea" }, { 0x9215, "exif:ExposureIndex" }, { 0x9216, "exif:TIFF-EPStandardID" }, { 0x927c, "exif:MakerNote" }, { 0x9286, "exif:UserComment" }, { 0x9290, "exif:SubSecTime" }, { 0x9291, "exif:SubSecTimeOriginal" }, { 0x9292, "exif:SubSecTimeDigitized" }, { 0x9400, "exif:Temperature" }, { 0x9401, "exif:Humidity" }, { 0x9402, "exif:Pressure" }, { 0x9403, "exif:WaterDepth" }, { 0x9404, "exif:Acceleration" }, { 0x9405, "exif:CameraElevationAngle" }, { 0x9C9b, "exif:WinXP-Title" }, { 0x9C9c, "exif:WinXP-Comments" }, { 0x9C9d, "exif:WinXP-Author" }, { 0x9C9e, "exif:WinXP-Keywords" }, { 0x9C9f, "exif:WinXP-Subject" }, { 0xa000, "exif:FlashPixVersion" }, { 0xa001, "exif:ColorSpace" }, { 0xa002, "exif:PixelXDimension" }, { 0xa003, "exif:PixelYDimension" }, { 0xa004, "exif:RelatedSoundFile" }, { 0xa005, "exif:InteroperabilityOffset" }, { 0xa20b, "exif:FlashEnergy" }, { 0xa20c, "exif:SpatialFrequencyResponse" }, { 0xa20d, "exif:Noise" }, { 0xa20e, "exif:FocalPlaneXResolution" }, { 0xa20f, "exif:FocalPlaneYResolution" }, { 0xa210, "exif:FocalPlaneResolutionUnit" }, { 0xa214, "exif:SubjectLocation" }, { 0xa215, "exif:ExposureIndex" }, { 0xa216, "exif:TIFF/EPStandardID" }, { 0xa217, "exif:SensingMethod" }, { 0xa300, "exif:FileSource" }, { 0xa301, "exif:SceneType" }, { 0xa302, "exif:CFAPattern" }, { 0xa401, "exif:CustomRendered" }, { 0xa402, "exif:ExposureMode" }, { 0xa403, "exif:WhiteBalance" }, { 0xa404, "exif:DigitalZoomRatio" }, { 0xa405, "exif:FocalLengthIn35mmFilm" }, { 0xa406, "exif:SceneCaptureType" }, { 0xa407, "exif:GainControl" }, { 0xa408, "exif:Contrast" }, { 0xa409, "exif:Saturation" }, { 0xa40a, "exif:Sharpness" }, { 0xa40b, "exif:DeviceSettingDescription" }, { 0xa40c, "exif:SubjectDistanceRange" }, { 0xa420, "exif:ImageUniqueID" }, { 0xa430, "exif:CameraOwnerName" }, { 0xa431, "exif:BodySerialNumber" }, { 0xa432, "exif:LensSpecification" }, { 0xa433, "exif:LensMake" }, { 0xa434, "exif:LensModel" }, { 0xa435, "exif:LensSerialNumber" }, { 0xc4a5, "exif:PrintImageMatching" }, { 0xa500, "exif:Gamma" }, { 0xc640, "exif:CR2Slice" }, { 0x10000, "exif:GPSVersionID" }, { 0x10001, "exif:GPSLatitudeRef" }, { 0x10002, "exif:GPSLatitude" }, { 0x10003, "exif:GPSLongitudeRef" }, { 0x10004, "exif:GPSLongitude" }, { 0x10005, "exif:GPSAltitudeRef" }, { 0x10006, "exif:GPSAltitude" }, { 0x10007, "exif:GPSTimeStamp" }, { 0x10008, "exif:GPSSatellites" }, { 0x10009, "exif:GPSStatus" }, { 0x1000a, "exif:GPSMeasureMode" }, { 0x1000b, "exif:GPSDop" }, { 0x1000c, "exif:GPSSpeedRef" }, { 0x1000d, "exif:GPSSpeed" }, { 0x1000e, "exif:GPSTrackRef" }, { 0x1000f, "exif:GPSTrack" }, { 0x10010, "exif:GPSImgDirectionRef" }, { 0x10011, "exif:GPSImgDirection" }, { 0x10012, "exif:GPSMapDatum" }, { 0x10013, "exif:GPSDestLatitudeRef" }, { 0x10014, "exif:GPSDestLatitude" }, { 0x10015, "exif:GPSDestLongitudeRef" }, { 0x10016, "exif:GPSDestLongitude" }, { 0x10017, "exif:GPSDestBearingRef" }, { 0x10018, "exif:GPSDestBearing" }, { 0x10019, "exif:GPSDestDistanceRef" }, { 0x1001a, "exif:GPSDestDistance" }, { 0x1001b, "exif:GPSProcessingMethod" }, { 0x1001c, "exif:GPSAreaInformation" }, { 0x1001d, "exif:GPSDateStamp" }, { 0x1001e, "exif:GPSDifferential" }, { 0x1001f, "exif:GPSHPositioningError" }, { 0x00000, "" } }; /* http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf */ const StringInfo *profile; const unsigned char *directory, *exif; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; MagickBooleanType status; ssize_t i; size_t entry, length, number_entries, tag, tag_value; SplayTreeInfo *exif_resources; ssize_t all, id, level, offset, tag_offset; static int tag_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; /* If EXIF data exists, then try to parse the request for a tag. */ profile=GetImageProfile(image,"exif"); if (profile == (const StringInfo *) NULL) return(MagickFalse); if ((property == (const char *) NULL) || (*property == '\0')) return(MagickFalse); while (isspace((int) ((unsigned char) *property)) != 0) property++; if (strlen(property) <= 5) return(MagickFalse); all=0; tag=(~0UL); switch (*(property+5)) { case '*': { /* Caller has asked for all the tags in the EXIF data. */ tag=0; all=1; /* return the data in description=value format */ break; } case '!': { tag=0; all=2; /* return the data in tagid=value format */ break; } case '#': case '@': { int c; size_t n; /* Check for a hex based tag specification first. */ tag=(*(property+5) == '@') ? 1UL : 0UL; property+=6; n=strlen(property); if (n != 4) return(MagickFalse); /* Parse tag specification as a hex number. */ n/=4; do { for (i=(ssize_t) n-1L; i >= 0; i--) { c=(*property++); tag<<=4; if ((c >= '0') && (c <= '9')) tag|=(c-'0'); else if ((c >= 'A') && (c <= 'F')) tag|=(c-('A'-10)); else if ((c >= 'a') && (c <= 'f')) tag|=(c-('a'-10)); else return(MagickFalse); } } while (*property != '\0'); break; } default: { /* Try to match the text with a tag name instead. */ for (i=0; ; i++) { if (EXIFTag[i].tag == 0) break; if (LocaleCompare(EXIFTag[i].description,property) == 0) { tag=(size_t) EXIFTag[i].tag; break; } } break; } } if (tag == (~0UL)) return(MagickFalse); length=GetStringInfoLength(profile); if (length < 6) return(MagickFalse); exif=GetStringInfoDatum(profile); while (length != 0) { if (ReadPropertyByte(&exif,&length) != 0x45) continue; if (ReadPropertyByte(&exif,&length) != 0x78) continue; if (ReadPropertyByte(&exif,&length) != 0x69) continue; if (ReadPropertyByte(&exif,&length) != 0x66) continue; if (ReadPropertyByte(&exif,&length) != 0x00) continue; if (ReadPropertyByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadPropertySignedShort(LSBEndian,exif); endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadPropertyUnsignedShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadPropertySignedLong(endian,exif+4); if ((offset < 0) || (size_t) offset >= length) return(MagickFalse); /* Set the pointer to the first IFD and follow it were it leads. */ status=MagickFalse; directory=exif+offset; level=0; entry=0; tag_offset=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { /* If there is anything on the stack then pop it off. */ if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; tag_offset=directory_stack[level].offset; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=(size_t) ReadPropertyUnsignedShort(endian,directory); for ( ; entry < number_entries; entry++) { unsigned char *p, *q; size_t format; ssize_t number_bytes, components; q=(unsigned char *) (directory+(12*entry)+2); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(size_t) ReadPropertyUnsignedShort(endian,q)+tag_offset; format=(size_t) ReadPropertyUnsignedShort(endian,q+2); if (format >= (sizeof(tag_bytes)/sizeof(*tag_bytes))) break; if (format == 0) break; /* corrupt EXIF */ components=(ssize_t) ReadPropertySignedLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*tag_bytes[format]; if (number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { ssize_t dir_offset; /* The directory entry contains an offset. */ dir_offset=(ssize_t) ReadPropertySignedLong(endian,q+8); if ((dir_offset < 0) || (size_t) dir_offset >= length) continue; if (((size_t) dir_offset+number_bytes) < (size_t) dir_offset) continue; /* prevent overflow */ if (((size_t) dir_offset+number_bytes) > length) continue; p=(unsigned char *) (exif+dir_offset); } if ((all != 0) || (tag == (size_t) tag_value)) { char buffer[MaxTextExtent], *value; if ((p < exif) || (p > (exif+length-tag_bytes[format]))) break; value=(char *) NULL; *buffer='\0'; switch (format) { case EXIF_FMT_BYTE: case EXIF_FMT_UNDEFINED: { value=(char *) NULL; if (~((size_t) number_bytes) >= 1) value=(char *) AcquireQuantumMemory((size_t) number_bytes+1UL, sizeof(*value)); if (value != (char *) NULL) { for (i=0; i < (ssize_t) number_bytes; i++) { value[i]='.'; if (isprint((int) p[i]) != 0) value[i]=(char) p[i]; } value[i]='\0'; } break; } case EXIF_FMT_SBYTE: { EXIFMultipleValues(1,"%.20g",(double) (*(signed char *) p1)); break; } case EXIF_FMT_SSHORT: { EXIFMultipleValues(2,"%hd",ReadPropertySignedShort(endian,p1)); break; } case EXIF_FMT_USHORT: { EXIFMultipleValues(2,"%hu",ReadPropertyUnsignedShort(endian,p1)); break; } case EXIF_FMT_ULONG: { EXIFMultipleValues(4,"%.20g",(double) ReadPropertyUnsignedLong(endian,p1)); break; } case EXIF_FMT_SLONG: { EXIFMultipleValues(4,"%.20g",(double) ReadPropertySignedLong(endian,p1)); break; } case EXIF_FMT_URATIONAL: { EXIFMultipleFractions(8,"%.20g/%.20g",(double) ReadPropertyUnsignedLong(endian,p1),(double) ReadPropertyUnsignedLong(endian,p1+4)); break; } case EXIF_FMT_SRATIONAL: { EXIFMultipleFractions(8,"%.20g/%.20g",(double) ReadPropertySignedLong(endian,p1),(double) ReadPropertySignedLong(endian,p1+4)); break; } case EXIF_FMT_SINGLE: { EXIFMultipleValues(4,"%f",(double) *(float *) p1); break; } case EXIF_FMT_DOUBLE: { EXIFMultipleValues(8,"%f",*(double *) p1); break; } case EXIF_FMT_STRING: default: { if ((p < exif) || (p > (exif+length-number_bytes))) break; value=(char *) NULL; if (~((size_t) number_bytes) >= 1) value=(char *) AcquireQuantumMemory((size_t) number_bytes+1UL, sizeof(*value)); if (value != (char *) NULL) { ssize_t i; for (i=0; i < (ssize_t) number_bytes; i++) { value[i]='.'; if ((isprint((int) p[i]) != 0) || (p[i] == '\0')) value[i]=(char) p[i]; } value[i]='\0'; } break; } } if (value != (char *) NULL) { char *key; const char *p; key=AcquireString(property); switch (all) { case 1: { const char *description; ssize_t i; description="unknown"; for (i=0; ; i++) { if (EXIFTag[i].tag == 0) break; if (EXIFTag[i].tag == tag_value) { description=EXIFTag[i].description; break; } } (void) FormatLocaleString(key,MaxTextExtent,"%s", description); if (level == 2) (void) SubstituteString(&key,"exif:","exif:thumbnail:"); break; } case 2: { if (tag_value < 0x10000) (void) FormatLocaleString(key,MaxTextExtent,"#%04lx", (unsigned long) tag_value); else if (tag_value < 0x20000) (void) FormatLocaleString(key,MaxTextExtent,"@%04lx", (unsigned long) (tag_value & 0xffff)); else (void) FormatLocaleString(key,MaxTextExtent,"unknown"); break; } default: { if (level == 2) (void) SubstituteString(&key,"exif:","exif:thumbnail:"); } } p=(const char *) NULL; if (image->properties != (void *) NULL) p=(const char *) GetValueFromSplayTree((SplayTreeInfo *) image->properties,key); if (p == (const char *) NULL) (void) SetImageProperty((Image *) image,key,value); value=DestroyString(value); key=DestroyString(key); status=MagickTrue; } } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET) || (tag_value == TAG_GPS_OFFSET)) { ssize_t offset; offset=(ssize_t) ReadPropertySignedLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { ssize_t tag_offset1; tag_offset1=(ssize_t) ((tag_value == TAG_GPS_OFFSET) ? 0x10000 : 0); directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; directory_stack[level].offset=tag_offset; level++; /* Check for duplicate tag. */ for (i=0; i < level; i++) if (directory_stack[i].directory == (exif+tag_offset1)) break; if (i < level) break; /* duplicate tag */ directory_stack[level].directory=exif+offset; directory_stack[level].offset=tag_offset1; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)+4) > (exif+length)) break; offset=(ssize_t) ReadPropertySignedLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; directory_stack[level].offset=tag_offset1; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(status); }
292096308156704952246887123009503225331
property.c
122751008107964047346147343124174074065
CWE-704
CVE-2022-32547
In ImageMagick, there is load of misaligned address for type 'double', which requires 8 byte alignment and for type 'float', which requires 4 byte alignment at MagickCore/property.c. Whenever crafted or untrusted input is processed by ImageMagick, this causes a negative impact to application availability or other problems related to undefined behavior.
https://nvd.nist.gov/vuln/detail/CVE-2022-32547
217,569
ImageMagick6
dc070da861a015d3c97488fdcca6063b44d47a7b
https://github.com/ImageMagick/ImageMagick6
https://github.com/ImageMagick/ImageMagick6/commit/dc070da861a015d3c97488fdcca6063b44d47a7b
https://github.com/ImageMagick/ImageMagick/pull/5034
0
static MagickBooleanType GetEXIFProperty(const Image *image, const char *property) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define EXIF_FMT_BYTE 1 #define EXIF_FMT_STRING 2 #define EXIF_FMT_USHORT 3 #define EXIF_FMT_ULONG 4 #define EXIF_FMT_URATIONAL 5 #define EXIF_FMT_SBYTE 6 #define EXIF_FMT_UNDEFINED 7 #define EXIF_FMT_SSHORT 8 #define EXIF_FMT_SLONG 9 #define EXIF_FMT_SRATIONAL 10 #define EXIF_FMT_SINGLE 11 #define EXIF_FMT_DOUBLE 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_GPS_OFFSET 0x8825 #define TAG_INTEROP_OFFSET 0xa005 #define EXIFMultipleValues(size,format,arg) \ { \ ssize_t \ component; \ \ size_t \ length; \ \ unsigned char \ *p1; \ \ length=0; \ p1=p; \ for (component=0; component < components; component++) \ { \ length+=FormatLocaleString(buffer+length,MaxTextExtent-length, \ format", ",arg); \ if (length >= (MaxTextExtent-1)) \ length=MaxTextExtent-1; \ p1+=size; \ } \ if (length > 1) \ buffer[length-2]='\0'; \ value=AcquireString(buffer); \ } #define EXIFMultipleFractions(size,format,arg1,arg2) \ { \ ssize_t \ component; \ \ size_t \ length; \ \ unsigned char \ *p1; \ \ length=0; \ p1=p; \ for (component=0; component < components; component++) \ { \ length+=FormatLocaleString(buffer+length,MaxTextExtent-length, \ format", ",(arg1),(arg2)); \ if (length >= (MaxTextExtent-1)) \ length=MaxTextExtent-1; \ p1+=size; \ } \ if (length > 1) \ buffer[length-2]='\0'; \ value=AcquireString(buffer); \ } typedef struct _DirectoryInfo { const unsigned char *directory; size_t entry; ssize_t offset; } DirectoryInfo; typedef struct _TagInfo { size_t tag; const char description[36]; } TagInfo; static const TagInfo EXIFTag[] = { { 0x001, "exif:InteroperabilityIndex" }, { 0x002, "exif:InteroperabilityVersion" }, { 0x100, "exif:ImageWidth" }, { 0x101, "exif:ImageLength" }, { 0x102, "exif:BitsPerSample" }, { 0x103, "exif:Compression" }, { 0x106, "exif:PhotometricInterpretation" }, { 0x10a, "exif:FillOrder" }, { 0x10d, "exif:DocumentName" }, { 0x10e, "exif:ImageDescription" }, { 0x10f, "exif:Make" }, { 0x110, "exif:Model" }, { 0x111, "exif:StripOffsets" }, { 0x112, "exif:Orientation" }, { 0x115, "exif:SamplesPerPixel" }, { 0x116, "exif:RowsPerStrip" }, { 0x117, "exif:StripByteCounts" }, { 0x11a, "exif:XResolution" }, { 0x11b, "exif:YResolution" }, { 0x11c, "exif:PlanarConfiguration" }, { 0x11d, "exif:PageName" }, { 0x11e, "exif:XPosition" }, { 0x11f, "exif:YPosition" }, { 0x118, "exif:MinSampleValue" }, { 0x119, "exif:MaxSampleValue" }, { 0x120, "exif:FreeOffsets" }, { 0x121, "exif:FreeByteCounts" }, { 0x122, "exif:GrayResponseUnit" }, { 0x123, "exif:GrayResponseCurve" }, { 0x124, "exif:T4Options" }, { 0x125, "exif:T6Options" }, { 0x128, "exif:ResolutionUnit" }, { 0x12d, "exif:TransferFunction" }, { 0x131, "exif:Software" }, { 0x132, "exif:DateTime" }, { 0x13b, "exif:Artist" }, { 0x13e, "exif:WhitePoint" }, { 0x13f, "exif:PrimaryChromaticities" }, { 0x140, "exif:ColorMap" }, { 0x141, "exif:HalfToneHints" }, { 0x142, "exif:TileWidth" }, { 0x143, "exif:TileLength" }, { 0x144, "exif:TileOffsets" }, { 0x145, "exif:TileByteCounts" }, { 0x14a, "exif:SubIFD" }, { 0x14c, "exif:InkSet" }, { 0x14d, "exif:InkNames" }, { 0x14e, "exif:NumberOfInks" }, { 0x150, "exif:DotRange" }, { 0x151, "exif:TargetPrinter" }, { 0x152, "exif:ExtraSample" }, { 0x153, "exif:SampleFormat" }, { 0x154, "exif:SMinSampleValue" }, { 0x155, "exif:SMaxSampleValue" }, { 0x156, "exif:TransferRange" }, { 0x157, "exif:ClipPath" }, { 0x158, "exif:XClipPathUnits" }, { 0x159, "exif:YClipPathUnits" }, { 0x15a, "exif:Indexed" }, { 0x15b, "exif:JPEGTables" }, { 0x15f, "exif:OPIProxy" }, { 0x200, "exif:JPEGProc" }, { 0x201, "exif:JPEGInterchangeFormat" }, { 0x202, "exif:JPEGInterchangeFormatLength" }, { 0x203, "exif:JPEGRestartInterval" }, { 0x205, "exif:JPEGLosslessPredictors" }, { 0x206, "exif:JPEGPointTransforms" }, { 0x207, "exif:JPEGQTables" }, { 0x208, "exif:JPEGDCTables" }, { 0x209, "exif:JPEGACTables" }, { 0x211, "exif:YCbCrCoefficients" }, { 0x212, "exif:YCbCrSubSampling" }, { 0x213, "exif:YCbCrPositioning" }, { 0x214, "exif:ReferenceBlackWhite" }, { 0x2bc, "exif:ExtensibleMetadataPlatform" }, { 0x301, "exif:Gamma" }, { 0x302, "exif:ICCProfileDescriptor" }, { 0x303, "exif:SRGBRenderingIntent" }, { 0x320, "exif:ImageTitle" }, { 0x5001, "exif:ResolutionXUnit" }, { 0x5002, "exif:ResolutionYUnit" }, { 0x5003, "exif:ResolutionXLengthUnit" }, { 0x5004, "exif:ResolutionYLengthUnit" }, { 0x5005, "exif:PrintFlags" }, { 0x5006, "exif:PrintFlagsVersion" }, { 0x5007, "exif:PrintFlagsCrop" }, { 0x5008, "exif:PrintFlagsBleedWidth" }, { 0x5009, "exif:PrintFlagsBleedWidthScale" }, { 0x500A, "exif:HalftoneLPI" }, { 0x500B, "exif:HalftoneLPIUnit" }, { 0x500C, "exif:HalftoneDegree" }, { 0x500D, "exif:HalftoneShape" }, { 0x500E, "exif:HalftoneMisc" }, { 0x500F, "exif:HalftoneScreen" }, { 0x5010, "exif:JPEGQuality" }, { 0x5011, "exif:GridSize" }, { 0x5012, "exif:ThumbnailFormat" }, { 0x5013, "exif:ThumbnailWidth" }, { 0x5014, "exif:ThumbnailHeight" }, { 0x5015, "exif:ThumbnailColorDepth" }, { 0x5016, "exif:ThumbnailPlanes" }, { 0x5017, "exif:ThumbnailRawBytes" }, { 0x5018, "exif:ThumbnailSize" }, { 0x5019, "exif:ThumbnailCompressedSize" }, { 0x501a, "exif:ColorTransferFunction" }, { 0x501b, "exif:ThumbnailData" }, { 0x5020, "exif:ThumbnailImageWidth" }, { 0x5021, "exif:ThumbnailImageHeight" }, { 0x5022, "exif:ThumbnailBitsPerSample" }, { 0x5023, "exif:ThumbnailCompression" }, { 0x5024, "exif:ThumbnailPhotometricInterp" }, { 0x5025, "exif:ThumbnailImageDescription" }, { 0x5026, "exif:ThumbnailEquipMake" }, { 0x5027, "exif:ThumbnailEquipModel" }, { 0x5028, "exif:ThumbnailStripOffsets" }, { 0x5029, "exif:ThumbnailOrientation" }, { 0x502a, "exif:ThumbnailSamplesPerPixel" }, { 0x502b, "exif:ThumbnailRowsPerStrip" }, { 0x502c, "exif:ThumbnailStripBytesCount" }, { 0x502d, "exif:ThumbnailResolutionX" }, { 0x502e, "exif:ThumbnailResolutionY" }, { 0x502f, "exif:ThumbnailPlanarConfig" }, { 0x5030, "exif:ThumbnailResolutionUnit" }, { 0x5031, "exif:ThumbnailTransferFunction" }, { 0x5032, "exif:ThumbnailSoftwareUsed" }, { 0x5033, "exif:ThumbnailDateTime" }, { 0x5034, "exif:ThumbnailArtist" }, { 0x5035, "exif:ThumbnailWhitePoint" }, { 0x5036, "exif:ThumbnailPrimaryChromaticities" }, { 0x5037, "exif:ThumbnailYCbCrCoefficients" }, { 0x5038, "exif:ThumbnailYCbCrSubsampling" }, { 0x5039, "exif:ThumbnailYCbCrPositioning" }, { 0x503A, "exif:ThumbnailRefBlackWhite" }, { 0x503B, "exif:ThumbnailCopyRight" }, { 0x5090, "exif:LuminanceTable" }, { 0x5091, "exif:ChrominanceTable" }, { 0x5100, "exif:FrameDelay" }, { 0x5101, "exif:LoopCount" }, { 0x5110, "exif:PixelUnit" }, { 0x5111, "exif:PixelPerUnitX" }, { 0x5112, "exif:PixelPerUnitY" }, { 0x5113, "exif:PaletteHistogram" }, { 0x1000, "exif:RelatedImageFileFormat" }, { 0x1001, "exif:RelatedImageLength" }, { 0x1002, "exif:RelatedImageWidth" }, { 0x800d, "exif:ImageID" }, { 0x80e3, "exif:Matteing" }, { 0x80e4, "exif:DataType" }, { 0x80e5, "exif:ImageDepth" }, { 0x80e6, "exif:TileDepth" }, { 0x828d, "exif:CFARepeatPatternDim" }, { 0x828e, "exif:CFAPattern2" }, { 0x828f, "exif:BatteryLevel" }, { 0x8298, "exif:Copyright" }, { 0x829a, "exif:ExposureTime" }, { 0x829d, "exif:FNumber" }, { 0x83bb, "exif:IPTC/NAA" }, { 0x84e3, "exif:IT8RasterPadding" }, { 0x84e5, "exif:IT8ColorTable" }, { 0x8649, "exif:ImageResourceInformation" }, { 0x8769, "exif:ExifOffset" }, /* specs as "Exif IFD Pointer"? */ { 0x8773, "exif:InterColorProfile" }, { 0x8822, "exif:ExposureProgram" }, { 0x8824, "exif:SpectralSensitivity" }, { 0x8825, "exif:GPSInfo" }, /* specs as "GPSInfo IFD Pointer"? */ { 0x8827, "exif:PhotographicSensitivity" }, { 0x8828, "exif:OECF" }, { 0x8829, "exif:Interlace" }, { 0x882a, "exif:TimeZoneOffset" }, { 0x882b, "exif:SelfTimerMode" }, { 0x8830, "exif:SensitivityType" }, { 0x8831, "exif:StandardOutputSensitivity" }, { 0x8832, "exif:RecommendedExposureIndex" }, { 0x8833, "exif:ISOSpeed" }, { 0x8834, "exif:ISOSpeedLatitudeyyy" }, { 0x8835, "exif:ISOSpeedLatitudezzz" }, { 0x9000, "exif:ExifVersion" }, { 0x9003, "exif:DateTimeOriginal" }, { 0x9004, "exif:DateTimeDigitized" }, { 0x9010, "exif:OffsetTime" }, { 0x9011, "exif:OffsetTimeOriginal" }, { 0x9012, "exif:OffsetTimeDigitized" }, { 0x9101, "exif:ComponentsConfiguration" }, { 0x9102, "exif:CompressedBitsPerPixel" }, { 0x9201, "exif:ShutterSpeedValue" }, { 0x9202, "exif:ApertureValue" }, { 0x9203, "exif:BrightnessValue" }, { 0x9204, "exif:ExposureBiasValue" }, { 0x9205, "exif:MaxApertureValue" }, { 0x9206, "exif:SubjectDistance" }, { 0x9207, "exif:MeteringMode" }, { 0x9208, "exif:LightSource" }, { 0x9209, "exif:Flash" }, { 0x920a, "exif:FocalLength" }, { 0x920b, "exif:FlashEnergy" }, { 0x920c, "exif:SpatialFrequencyResponse" }, { 0x920d, "exif:Noise" }, { 0x9214, "exif:SubjectArea" }, { 0x9290, "exif:SubSecTime" }, { 0x9291, "exif:SubSecTimeOriginal" }, { 0x9292, "exif:SubSecTimeDigitized" }, { 0x9211, "exif:ImageNumber" }, { 0x9212, "exif:SecurityClassification" }, { 0x9213, "exif:ImageHistory" }, { 0x9214, "exif:SubjectArea" }, { 0x9215, "exif:ExposureIndex" }, { 0x9216, "exif:TIFF-EPStandardID" }, { 0x927c, "exif:MakerNote" }, { 0x9286, "exif:UserComment" }, { 0x9290, "exif:SubSecTime" }, { 0x9291, "exif:SubSecTimeOriginal" }, { 0x9292, "exif:SubSecTimeDigitized" }, { 0x9400, "exif:Temperature" }, { 0x9401, "exif:Humidity" }, { 0x9402, "exif:Pressure" }, { 0x9403, "exif:WaterDepth" }, { 0x9404, "exif:Acceleration" }, { 0x9405, "exif:CameraElevationAngle" }, { 0x9C9b, "exif:WinXP-Title" }, { 0x9C9c, "exif:WinXP-Comments" }, { 0x9C9d, "exif:WinXP-Author" }, { 0x9C9e, "exif:WinXP-Keywords" }, { 0x9C9f, "exif:WinXP-Subject" }, { 0xa000, "exif:FlashPixVersion" }, { 0xa001, "exif:ColorSpace" }, { 0xa002, "exif:PixelXDimension" }, { 0xa003, "exif:PixelYDimension" }, { 0xa004, "exif:RelatedSoundFile" }, { 0xa005, "exif:InteroperabilityOffset" }, { 0xa20b, "exif:FlashEnergy" }, { 0xa20c, "exif:SpatialFrequencyResponse" }, { 0xa20d, "exif:Noise" }, { 0xa20e, "exif:FocalPlaneXResolution" }, { 0xa20f, "exif:FocalPlaneYResolution" }, { 0xa210, "exif:FocalPlaneResolutionUnit" }, { 0xa214, "exif:SubjectLocation" }, { 0xa215, "exif:ExposureIndex" }, { 0xa216, "exif:TIFF/EPStandardID" }, { 0xa217, "exif:SensingMethod" }, { 0xa300, "exif:FileSource" }, { 0xa301, "exif:SceneType" }, { 0xa302, "exif:CFAPattern" }, { 0xa401, "exif:CustomRendered" }, { 0xa402, "exif:ExposureMode" }, { 0xa403, "exif:WhiteBalance" }, { 0xa404, "exif:DigitalZoomRatio" }, { 0xa405, "exif:FocalLengthIn35mmFilm" }, { 0xa406, "exif:SceneCaptureType" }, { 0xa407, "exif:GainControl" }, { 0xa408, "exif:Contrast" }, { 0xa409, "exif:Saturation" }, { 0xa40a, "exif:Sharpness" }, { 0xa40b, "exif:DeviceSettingDescription" }, { 0xa40c, "exif:SubjectDistanceRange" }, { 0xa420, "exif:ImageUniqueID" }, { 0xa430, "exif:CameraOwnerName" }, { 0xa431, "exif:BodySerialNumber" }, { 0xa432, "exif:LensSpecification" }, { 0xa433, "exif:LensMake" }, { 0xa434, "exif:LensModel" }, { 0xa435, "exif:LensSerialNumber" }, { 0xc4a5, "exif:PrintImageMatching" }, { 0xa500, "exif:Gamma" }, { 0xc640, "exif:CR2Slice" }, { 0x10000, "exif:GPSVersionID" }, { 0x10001, "exif:GPSLatitudeRef" }, { 0x10002, "exif:GPSLatitude" }, { 0x10003, "exif:GPSLongitudeRef" }, { 0x10004, "exif:GPSLongitude" }, { 0x10005, "exif:GPSAltitudeRef" }, { 0x10006, "exif:GPSAltitude" }, { 0x10007, "exif:GPSTimeStamp" }, { 0x10008, "exif:GPSSatellites" }, { 0x10009, "exif:GPSStatus" }, { 0x1000a, "exif:GPSMeasureMode" }, { 0x1000b, "exif:GPSDop" }, { 0x1000c, "exif:GPSSpeedRef" }, { 0x1000d, "exif:GPSSpeed" }, { 0x1000e, "exif:GPSTrackRef" }, { 0x1000f, "exif:GPSTrack" }, { 0x10010, "exif:GPSImgDirectionRef" }, { 0x10011, "exif:GPSImgDirection" }, { 0x10012, "exif:GPSMapDatum" }, { 0x10013, "exif:GPSDestLatitudeRef" }, { 0x10014, "exif:GPSDestLatitude" }, { 0x10015, "exif:GPSDestLongitudeRef" }, { 0x10016, "exif:GPSDestLongitude" }, { 0x10017, "exif:GPSDestBearingRef" }, { 0x10018, "exif:GPSDestBearing" }, { 0x10019, "exif:GPSDestDistanceRef" }, { 0x1001a, "exif:GPSDestDistance" }, { 0x1001b, "exif:GPSProcessingMethod" }, { 0x1001c, "exif:GPSAreaInformation" }, { 0x1001d, "exif:GPSDateStamp" }, { 0x1001e, "exif:GPSDifferential" }, { 0x1001f, "exif:GPSHPositioningError" }, { 0x00000, "" } }; /* http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf */ const StringInfo *profile; const unsigned char *directory, *exif; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; MagickBooleanType status; ssize_t i; size_t entry, length, number_entries, tag, tag_value; SplayTreeInfo *exif_resources; ssize_t all, id, level, offset, tag_offset; static int tag_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; /* If EXIF data exists, then try to parse the request for a tag. */ profile=GetImageProfile(image,"exif"); if (profile == (const StringInfo *) NULL) return(MagickFalse); if ((property == (const char *) NULL) || (*property == '\0')) return(MagickFalse); while (isspace((int) ((unsigned char) *property)) != 0) property++; if (strlen(property) <= 5) return(MagickFalse); all=0; tag=(~0UL); switch (*(property+5)) { case '*': { /* Caller has asked for all the tags in the EXIF data. */ tag=0; all=1; /* return the data in description=value format */ break; } case '!': { tag=0; all=2; /* return the data in tagid=value format */ break; } case '#': case '@': { int c; size_t n; /* Check for a hex based tag specification first. */ tag=(*(property+5) == '@') ? 1UL : 0UL; property+=6; n=strlen(property); if (n != 4) return(MagickFalse); /* Parse tag specification as a hex number. */ n/=4; do { for (i=(ssize_t) n-1L; i >= 0; i--) { c=(*property++); tag<<=4; if ((c >= '0') && (c <= '9')) tag|=(c-'0'); else if ((c >= 'A') && (c <= 'F')) tag|=(c-('A'-10)); else if ((c >= 'a') && (c <= 'f')) tag|=(c-('a'-10)); else return(MagickFalse); } } while (*property != '\0'); break; } default: { /* Try to match the text with a tag name instead. */ for (i=0; ; i++) { if (EXIFTag[i].tag == 0) break; if (LocaleCompare(EXIFTag[i].description,property) == 0) { tag=(size_t) EXIFTag[i].tag; break; } } break; } } if (tag == (~0UL)) return(MagickFalse); length=GetStringInfoLength(profile); if (length < 6) return(MagickFalse); exif=GetStringInfoDatum(profile); while (length != 0) { if (ReadPropertyByte(&exif,&length) != 0x45) continue; if (ReadPropertyByte(&exif,&length) != 0x78) continue; if (ReadPropertyByte(&exif,&length) != 0x69) continue; if (ReadPropertyByte(&exif,&length) != 0x66) continue; if (ReadPropertyByte(&exif,&length) != 0x00) continue; if (ReadPropertyByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadPropertySignedShort(LSBEndian,exif); endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadPropertyUnsignedShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadPropertySignedLong(endian,exif+4); if ((offset < 0) || (size_t) offset >= length) return(MagickFalse); /* Set the pointer to the first IFD and follow it were it leads. */ status=MagickFalse; directory=exif+offset; level=0; entry=0; tag_offset=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { /* If there is anything on the stack then pop it off. */ if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; tag_offset=directory_stack[level].offset; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=(size_t) ReadPropertyUnsignedShort(endian,directory); for ( ; entry < number_entries; entry++) { unsigned char *p, *q; size_t format; ssize_t number_bytes, components; q=(unsigned char *) (directory+(12*entry)+2); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(size_t) ReadPropertyUnsignedShort(endian,q)+tag_offset; format=(size_t) ReadPropertyUnsignedShort(endian,q+2); if (format >= (sizeof(tag_bytes)/sizeof(*tag_bytes))) break; if (format == 0) break; /* corrupt EXIF */ components=(ssize_t) ReadPropertySignedLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*tag_bytes[format]; if (number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { ssize_t dir_offset; /* The directory entry contains an offset. */ dir_offset=(ssize_t) ReadPropertySignedLong(endian,q+8); if ((dir_offset < 0) || (size_t) dir_offset >= length) continue; if (((size_t) dir_offset+number_bytes) < (size_t) dir_offset) continue; /* prevent overflow */ if (((size_t) dir_offset+number_bytes) > length) continue; p=(unsigned char *) (exif+dir_offset); } if ((all != 0) || (tag == (size_t) tag_value)) { char buffer[MaxTextExtent], *value; if ((p < exif) || (p > (exif+length-tag_bytes[format]))) break; value=(char *) NULL; *buffer='\0'; switch (format) { case EXIF_FMT_BYTE: case EXIF_FMT_UNDEFINED: { value=(char *) NULL; if (~((size_t) number_bytes) >= 1) value=(char *) AcquireQuantumMemory((size_t) number_bytes+1UL, sizeof(*value)); if (value != (char *) NULL) { for (i=0; i < (ssize_t) number_bytes; i++) { value[i]='.'; if (isprint((int) p[i]) != 0) value[i]=(char) p[i]; } value[i]='\0'; } break; } case EXIF_FMT_SBYTE: { EXIFMultipleValues(1,"%.20g",(double) (*(signed char *) p1)); break; } case EXIF_FMT_SSHORT: { EXIFMultipleValues(2,"%hd",ReadPropertySignedShort(endian,p1)); break; } case EXIF_FMT_USHORT: { EXIFMultipleValues(2,"%hu",ReadPropertyUnsignedShort(endian,p1)); break; } case EXIF_FMT_ULONG: { EXIFMultipleValues(4,"%.20g",(double) ReadPropertyUnsignedLong(endian,p1)); break; } case EXIF_FMT_SLONG: { EXIFMultipleValues(4,"%.20g",(double) ReadPropertySignedLong(endian,p1)); break; } case EXIF_FMT_URATIONAL: { EXIFMultipleFractions(8,"%.20g/%.20g",(double) ReadPropertyUnsignedLong(endian,p1),(double) ReadPropertyUnsignedLong(endian,p1+4)); break; } case EXIF_FMT_SRATIONAL: { EXIFMultipleFractions(8,"%.20g/%.20g",(double) ReadPropertySignedLong(endian,p1),(double) ReadPropertySignedLong(endian,p1+4)); break; } case EXIF_FMT_SINGLE: { EXIFMultipleValues(4,"%.20g",(double) ReadPropertySignedLong(endian,p1)); break; } case EXIF_FMT_DOUBLE: { EXIFMultipleValues(8,"%.20g",(double) ReadPropertySignedLong(endian,p1)); break; } case EXIF_FMT_STRING: default: { if ((p < exif) || (p > (exif+length-number_bytes))) break; value=(char *) NULL; if (~((size_t) number_bytes) >= 1) value=(char *) AcquireQuantumMemory((size_t) number_bytes+1UL, sizeof(*value)); if (value != (char *) NULL) { ssize_t i; for (i=0; i < (ssize_t) number_bytes; i++) { value[i]='.'; if ((isprint((int) p[i]) != 0) || (p[i] == '\0')) value[i]=(char) p[i]; } value[i]='\0'; } break; } } if (value != (char *) NULL) { char *key; const char *p; key=AcquireString(property); switch (all) { case 1: { const char *description; ssize_t i; description="unknown"; for (i=0; ; i++) { if (EXIFTag[i].tag == 0) break; if (EXIFTag[i].tag == tag_value) { description=EXIFTag[i].description; break; } } (void) FormatLocaleString(key,MaxTextExtent,"%s", description); if (level == 2) (void) SubstituteString(&key,"exif:","exif:thumbnail:"); break; } case 2: { if (tag_value < 0x10000) (void) FormatLocaleString(key,MaxTextExtent,"#%04lx", (unsigned long) tag_value); else if (tag_value < 0x20000) (void) FormatLocaleString(key,MaxTextExtent,"@%04lx", (unsigned long) (tag_value & 0xffff)); else (void) FormatLocaleString(key,MaxTextExtent,"unknown"); break; } default: { if (level == 2) (void) SubstituteString(&key,"exif:","exif:thumbnail:"); } } p=(const char *) NULL; if (image->properties != (void *) NULL) p=(const char *) GetValueFromSplayTree((SplayTreeInfo *) image->properties,key); if (p == (const char *) NULL) (void) SetImageProperty((Image *) image,key,value); value=DestroyString(value); key=DestroyString(key); status=MagickTrue; } } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET) || (tag_value == TAG_GPS_OFFSET)) { ssize_t offset; offset=(ssize_t) ReadPropertySignedLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { ssize_t tag_offset1; tag_offset1=(ssize_t) ((tag_value == TAG_GPS_OFFSET) ? 0x10000 : 0); directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; directory_stack[level].offset=tag_offset; level++; /* Check for duplicate tag. */ for (i=0; i < level; i++) if (directory_stack[i].directory == (exif+tag_offset1)) break; if (i < level) break; /* duplicate tag */ directory_stack[level].directory=exif+offset; directory_stack[level].offset=tag_offset1; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)+4) > (exif+length)) break; offset=(ssize_t) ReadPropertySignedLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; directory_stack[level].offset=tag_offset1; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(status); }
75422468811560646183620950160304672170
property.c
320426917520707901134127411021604962567
CWE-704
CVE-2022-32547
In ImageMagick, there is load of misaligned address for type 'double', which requires 8 byte alignment and for type 'float', which requires 4 byte alignment at MagickCore/property.c. Whenever crafted or untrusted input is processed by ImageMagick, this causes a negative impact to application availability or other problems related to undefined behavior.
https://nvd.nist.gov/vuln/detail/CVE-2022-32547
194,989
ImageMagick6
450949ed017f009b399c937cf362f0058eacc5fa
https://github.com/ImageMagick/ImageMagick6
https://github.com/ImageMagick/ImageMagick6/commit/450949ed017f009b399c937cf362f0058eacc5fa
Pull request: https://github.com/ImageMagick/ImageMagick/pull/4963
1
static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; const unsigned char *p; IndexPacket *indexes; PixelPacket *q; ssize_t x; size_t packet_size; unsigned short nibble; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; indexes=GetAuthenticIndexQueue(image); packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x); q++; } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit=0; bit < number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++); } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); }
50584299779312396054491404176852470969
psd.c
159316916509494023086155162326374999236
CWE-190
CVE-2022-32545
A vulnerability was found in ImageMagick, causing an outside the range of representable values of type 'unsigned char' at coders/psd.c, when crafted or untrusted input is processed. This leads to a negative impact to application availability or other problems related to undefined behavior.
https://nvd.nist.gov/vuln/detail/CVE-2022-32545
218,785
ImageMagick6
450949ed017f009b399c937cf362f0058eacc5fa
https://github.com/ImageMagick/ImageMagick6
https://github.com/ImageMagick/ImageMagick6/commit/450949ed017f009b399c937cf362f0058eacc5fa
Pull request: https://github.com/ImageMagick/ImageMagick/pull/4963
0
static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; const unsigned char *p; IndexPacket *indexes; PixelPacket *q; ssize_t x; size_t packet_size; unsigned short nibble; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; indexes=GetAuthenticIndexQueue(image); packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x); q++; } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit=0; bit < number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size, (((unsigned char) ((ssize_t) pixel)) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++); } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); }
177518249272594340059836567736761123364
psd.c
226732625250511916284298083592366716300
CWE-190
CVE-2022-32545
A vulnerability was found in ImageMagick, causing an outside the range of representable values of type 'unsigned char' at coders/psd.c, when crafted or untrusted input is processed. This leads to a negative impact to application availability or other problems related to undefined behavior.
https://nvd.nist.gov/vuln/detail/CVE-2022-32545
194,996
tensorflow
4f38b1ac8e42727e18a2f0bde06d3bee8e77b250
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/4f38b1ac8e42727e18a2f0bde06d3bee8e77b250
Prevent null dereference read in `GetInitOp`. We have a map of maps. We test that the key exists in the first map but then we don't have any validation that this also means the second map has the needed key. In the scenarios where this is not the case, we'll dereference a nullptr, if we don't have this check PiperOrigin-RevId: 408739325 Change-Id: If9bb7ed759aba1f3b56a34913f209508dbaf65ce
1
Status GetInitOp(const string& export_dir, const MetaGraphDef& meta_graph_def, string* init_op_name) { const auto& sig_def_map = meta_graph_def.signature_def(); const auto& init_op_sig_it = meta_graph_def.signature_def().find(kSavedModelInitOpSignatureKey); if (init_op_sig_it != sig_def_map.end()) { *init_op_name = init_op_sig_it->second.outputs() .find(kSavedModelInitOpSignatureKey) ->second.name(); return Status::OK(); } const auto& collection_def_map = meta_graph_def.collection_def(); string init_op_collection_key; if (collection_def_map.find(kSavedModelMainOpKey) != collection_def_map.end()) { init_op_collection_key = kSavedModelMainOpKey; } else { init_op_collection_key = kSavedModelLegacyInitOpKey; } const auto init_op_it = collection_def_map.find(init_op_collection_key); if (init_op_it != collection_def_map.end()) { if (init_op_it->second.node_list().value_size() != 1) { return errors::FailedPrecondition( strings::StrCat("Expected exactly one main op in : ", export_dir)); } *init_op_name = init_op_it->second.node_list().value(0); } return Status::OK(); }
90320046309155279319769139363770698236
loader_util.cc
223638670651747648145854147173893848422
CWE-476
CVE-2022-23577
Tensorflow is an Open Source Machine Learning Framework. The implementation of `GetInitOp` is vulnerable to a crash caused by dereferencing a null pointer. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23577
218,933
tensorflow
4f38b1ac8e42727e18a2f0bde06d3bee8e77b250
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/4f38b1ac8e42727e18a2f0bde06d3bee8e77b250
Prevent null dereference read in `GetInitOp`. We have a map of maps. We test that the key exists in the first map but then we don't have any validation that this also means the second map has the needed key. In the scenarios where this is not the case, we'll dereference a nullptr, if we don't have this check PiperOrigin-RevId: 408739325 Change-Id: If9bb7ed759aba1f3b56a34913f209508dbaf65ce
0
Status GetInitOp(const string& export_dir, const MetaGraphDef& meta_graph_def, string* init_op_name) { const auto& sig_def_map = meta_graph_def.signature_def(); const auto& init_op_sig_it = meta_graph_def.signature_def().find(kSavedModelInitOpSignatureKey); if (init_op_sig_it != sig_def_map.end()) { const auto& sig_def_outputs = init_op_sig_it->second.outputs(); const auto& sig_def_outputs_it = sig_def_outputs.find(kSavedModelInitOpSignatureKey); if (sig_def_outputs_it == sig_def_outputs.end()) { return errors::FailedPrecondition("Could not find output ", kSavedModelInitOpSignatureKey); } *init_op_name = sig_def_outputs_it->second.name(); return Status::OK(); } const auto& collection_def_map = meta_graph_def.collection_def(); string init_op_collection_key; if (collection_def_map.find(kSavedModelMainOpKey) != collection_def_map.end()) { init_op_collection_key = kSavedModelMainOpKey; } else { init_op_collection_key = kSavedModelLegacyInitOpKey; } const auto init_op_it = collection_def_map.find(init_op_collection_key); if (init_op_it != collection_def_map.end()) { if (init_op_it->second.node_list().value_size() != 1) { return errors::FailedPrecondition( strings::StrCat("Expected exactly one main op in : ", export_dir)); } *init_op_name = init_op_it->second.node_list().value(0); } return Status::OK(); }
120370294428908534368713689048437773064
loader_util.cc
225205642200693417259460288987767726126
CWE-476
CVE-2022-23577
Tensorflow is an Open Source Machine Learning Framework. The implementation of `GetInitOp` is vulnerable to a crash caused by dereferencing a null pointer. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23577
195,017
gpac
ad18ece95fa064efc0995c4ab2c985f77fb166ec
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/ad18ece95fa064efc0995c4ab2c985f77fb166ec
fixed #1904
1
u32 GetHintFormat(GF_TrackBox *trak) { GF_HintMediaHeaderBox *hmhd = (GF_HintMediaHeaderBox *)trak->Media->information->InfoHeader; if (hmhd->type != GF_ISOM_BOX_TYPE_HMHD) return 0; if (!hmhd || !hmhd->subType) { GF_Box *a = (GF_Box *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, 0); if (!hmhd) return a ? a->type : 0; if (a) hmhd->subType = a->type; return hmhd->subType; } return hmhd->subType; }
91218268849686441388880855658517990203
hint_track.c
60176895274654779679144452624639678766
CWE-476
CVE-2021-40576
The binary MP4Box in Gpac 1.0.1 has a null pointer dereference vulnerability in the gf_isom_get_payt_count function in hint_track.c, which allows attackers to cause a denial of service.
https://nvd.nist.gov/vuln/detail/CVE-2021-40576
219,912
gpac
ad18ece95fa064efc0995c4ab2c985f77fb166ec
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/ad18ece95fa064efc0995c4ab2c985f77fb166ec
fixed #1904
0
u32 GetHintFormat(GF_TrackBox *trak) { GF_HintMediaHeaderBox *hmhd = (GF_HintMediaHeaderBox *)trak->Media->information->InfoHeader; if (!hmhd || (hmhd->type != GF_ISOM_BOX_TYPE_HMHD)) return 0; if (!hmhd || !hmhd->subType) { GF_Box *a = (GF_Box *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, 0); if (!hmhd) return a ? a->type : 0; if (a) hmhd->subType = a->type; return hmhd->subType; } return hmhd->subType; }
240641657114030682383886931707833033482
hint_track.c
28976036322661795345788739460485147148
CWE-476
CVE-2021-40576
The binary MP4Box in Gpac 1.0.1 has a null pointer dereference vulnerability in the gf_isom_get_payt_count function in hint_track.c, which allows attackers to cause a denial of service.
https://nvd.nist.gov/vuln/detail/CVE-2021-40576
195,026
linux
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/ab0fc21bc7105b54bafd85bd8b82742f9e68898a
Revert "NFSv4: Handle the special Linux file open access mode" This reverts commit 44942b4e457beda00981f616402a1a791e8c616e. After secondly opening a file with O_ACCMODE|O_DIRECT flags, nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek(). Reproducer: 1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/ 2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT) 3. close(fd) 4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT) 5. lseek(fd) Reported-by: Lyu Tao <tao.lyu@epfl.ch> Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com> Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
1
nfs4_file_open(struct inode *inode, struct file *filp) { struct nfs_open_context *ctx; struct dentry *dentry = file_dentry(filp); struct dentry *parent = NULL; struct inode *dir; unsigned openflags = filp->f_flags; struct iattr attr; int err; /* * If no cached dentry exists or if it's negative, NFSv4 handled the * opens in ->lookup() or ->create(). * * We only get this far for a cached positive dentry. We skipped * revalidation, so handle it here by dropping the dentry and returning * -EOPENSTALE. The VFS will retry the lookup/create/open. */ dprintk("NFS: open file(%pd2)\n", dentry); err = nfs_check_flags(openflags); if (err) return err; if ((openflags & O_ACCMODE) == 3) return nfs_open(inode, filp); /* We can't create new files here */ openflags &= ~(O_CREAT|O_EXCL); parent = dget_parent(dentry); dir = d_inode(parent); ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp); err = PTR_ERR(ctx); if (IS_ERR(ctx)) goto out; attr.ia_valid = ATTR_OPEN; if (openflags & O_TRUNC) { attr.ia_valid |= ATTR_SIZE; attr.ia_size = 0; filemap_write_and_wait(inode->i_mapping); } inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, NULL); if (IS_ERR(inode)) { err = PTR_ERR(inode); switch (err) { default: goto out_put_ctx; case -ENOENT: case -ESTALE: case -EISDIR: case -ENOTDIR: case -ELOOP: goto out_drop; } } if (inode != d_inode(dentry)) goto out_drop; nfs_file_set_open_context(filp, ctx); nfs_fscache_open_file(inode, filp); err = 0; out_put_ctx: put_nfs_open_context(ctx); out: dput(parent); return err; out_drop: d_drop(dentry); err = -EOPENSTALE; goto out_put_ctx; }
67846125552854891508125900978071958871
nfs4file.c
109456154040292488452120321326967957719
CWE-909
CVE-2022-24448
An issue was discovered in fs/nfs/dir.c in the Linux kernel before 5.16.5. If an application sets the O_DIRECTORY flag, and tries to open a regular file, nfs_atomic_open() performs a regular lookup. If a regular file is found, ENOTDIR should occur, but the server instead returns uninitialized data in the file descriptor.
https://nvd.nist.gov/vuln/detail/CVE-2022-24448
220,100
linux
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/ab0fc21bc7105b54bafd85bd8b82742f9e68898a
Revert "NFSv4: Handle the special Linux file open access mode" This reverts commit 44942b4e457beda00981f616402a1a791e8c616e. After secondly opening a file with O_ACCMODE|O_DIRECT flags, nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek(). Reproducer: 1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/ 2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT) 3. close(fd) 4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT) 5. lseek(fd) Reported-by: Lyu Tao <tao.lyu@epfl.ch> Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com> Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
0
nfs4_file_open(struct inode *inode, struct file *filp) { struct nfs_open_context *ctx; struct dentry *dentry = file_dentry(filp); struct dentry *parent = NULL; struct inode *dir; unsigned openflags = filp->f_flags; struct iattr attr; int err; /* * If no cached dentry exists or if it's negative, NFSv4 handled the * opens in ->lookup() or ->create(). * * We only get this far for a cached positive dentry. We skipped * revalidation, so handle it here by dropping the dentry and returning * -EOPENSTALE. The VFS will retry the lookup/create/open. */ dprintk("NFS: open file(%pd2)\n", dentry); err = nfs_check_flags(openflags); if (err) return err; if ((openflags & O_ACCMODE) == 3) openflags--; /* We can't create new files here */ openflags &= ~(O_CREAT|O_EXCL); parent = dget_parent(dentry); dir = d_inode(parent); ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp); err = PTR_ERR(ctx); if (IS_ERR(ctx)) goto out; attr.ia_valid = ATTR_OPEN; if (openflags & O_TRUNC) { attr.ia_valid |= ATTR_SIZE; attr.ia_size = 0; filemap_write_and_wait(inode->i_mapping); } inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, NULL); if (IS_ERR(inode)) { err = PTR_ERR(inode); switch (err) { default: goto out_put_ctx; case -ENOENT: case -ESTALE: case -EISDIR: case -ENOTDIR: case -ELOOP: goto out_drop; } } if (inode != d_inode(dentry)) goto out_drop; nfs_file_set_open_context(filp, ctx); nfs_fscache_open_file(inode, filp); err = 0; out_put_ctx: put_nfs_open_context(ctx); out: dput(parent); return err; out_drop: d_drop(dentry); err = -EOPENSTALE; goto out_put_ctx; }
272987829557105540879962051296017178836
nfs4file.c
19160442996144037090827134285929888626
CWE-909
CVE-2022-24448
An issue was discovered in fs/nfs/dir.c in the Linux kernel before 5.16.5. If an application sets the O_DIRECTORY flag, and tries to open a regular file, nfs_atomic_open() performs a regular lookup. If a regular file is found, ENOTDIR should occur, but the server instead returns uninitialized data in the file descriptor.
https://nvd.nist.gov/vuln/detail/CVE-2022-24448
195,038
mruby
27d1e0132a0804581dca28df042e7047fd27eaa8
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/27d1e0132a0804581dca28df042e7047fd27eaa8
array.c: fix `mrb_ary_shift_m` initialization bug. The `ARY_PTR` and `ARY_LEN` may be modified in `mrb_get_args`.
1
mrb_ary_shift_m(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int len = ARY_LEN(a); mrb_int n; mrb_value val; if (mrb_get_args(mrb, "|i", &n) == 0) { return mrb_ary_shift(mrb, self); }; ary_modify_check(mrb, a); if (len == 0 || n == 0) return mrb_ary_new(mrb); if (n < 0) mrb_raise(mrb, E_ARGUMENT_ERROR, "negative array shift"); if (n > len) n = len; val = mrb_ary_new_from_values(mrb, n, ARY_PTR(a)); if (ARY_SHARED_P(a)) { L_SHIFT: a->as.heap.ptr+=n; a->as.heap.len-=n; return val; } if (len > ARY_SHIFT_SHARED_MIN) { ary_make_shared(mrb, a); goto L_SHIFT; } else if (len == n) { ARY_SET_LEN(a, 0); } else { mrb_value *ptr = ARY_PTR(a); mrb_int size = len-n; while (size--) { *ptr = *(ptr+n); ++ptr; } ARY_SET_LEN(a, len-n); } return val; }
88987793594626442814152795226896894437
array.c
131985777969528154957566525214352537878
CWE-476
CVE-2021-4188
mruby is vulnerable to NULL Pointer Dereference
https://nvd.nist.gov/vuln/detail/CVE-2021-4188
220,442
mruby
27d1e0132a0804581dca28df042e7047fd27eaa8
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/27d1e0132a0804581dca28df042e7047fd27eaa8
array.c: fix `mrb_ary_shift_m` initialization bug. The `ARY_PTR` and `ARY_LEN` may be modified in `mrb_get_args`.
0
mrb_ary_shift_m(mrb_state *mrb, mrb_value self) { mrb_int n; if (mrb_get_args(mrb, "|i", &n) == 0) { return mrb_ary_shift(mrb, self); } struct RArray *a = mrb_ary_ptr(self); mrb_int len = ARY_LEN(a); mrb_value val; ary_modify_check(mrb, a); if (len == 0 || n == 0) return mrb_ary_new(mrb); if (n < 0) mrb_raise(mrb, E_ARGUMENT_ERROR, "negative array shift"); if (n > len) n = len; val = mrb_ary_new_from_values(mrb, n, ARY_PTR(a)); if (ARY_SHARED_P(a)) { L_SHIFT: a->as.heap.ptr+=n; a->as.heap.len-=n; return val; } if (len > ARY_SHIFT_SHARED_MIN) { ary_make_shared(mrb, a); goto L_SHIFT; } else if (len == n) { ARY_SET_LEN(a, 0); } else { mrb_value *ptr = ARY_PTR(a); mrb_int size = len-n; while (size--) { *ptr = *(ptr+n); ++ptr; } ARY_SET_LEN(a, len-n); } return val; }
336824346603495353101799104649854425750
array.c
295526445825727607536544634773604768998
CWE-476
CVE-2021-4188
mruby is vulnerable to NULL Pointer Dereference
https://nvd.nist.gov/vuln/detail/CVE-2021-4188
195,039
tensorflow
e7f497570abb6b4ae5af4970620cd880e4c0c904
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/e7f497570abb6b4ae5af4970620cd880e4c0c904
Fix segfault on OOM in Conv2D. PiperOrigin-RevId: 404655317 Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
1
void operator()(OpKernelContext* ctx, const Tensor& input, const Tensor& filter, int row_stride, int col_stride, int row_dilation, int col_dilation, const Padding& padding, const std::vector<int64_t>& explicit_paddings, Tensor* output, TensorFormat data_format) { DCHECK(data_format == FORMAT_NHWC) << "Grouped conv implementation only " "supports NHWC tensor format for now."; const int64_t in_depth = input.dim_size(3); const int64_t patch_depth = filter.dim_size(2); const int64_t num_groups = in_depth / patch_depth; // Shuffle input/filter tensors to have group as a leading dimension. std::array<int64_t, 5> shuffle({3, 0, 1, 2, 4}); // Compute pre shuffle dimemnsions. auto pre_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> { return {tensor.dim_size(0), tensor.dim_size(1), tensor.dim_size(2), num_groups, tensor.dim_size(3) / num_groups}; }; // Compute post shuffle dimemnsions. auto post_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> { return {num_groups, tensor.dim_size(0), tensor.dim_size(1), tensor.dim_size(2), tensor.dim_size(3) / num_groups}; }; auto& device = ctx->eigen_device<CPUDevice>(); absl::BlockingCounter shuffles_completed(2); auto on_shuffled = [&]() { shuffles_completed.DecrementCount(); }; // Shuffle input into temporary tensor. Tensor input_shuffled(input.dtype(), TensorShape(post_shuffle(input))); input_shuffled.tensor<T, 5>().device(device, on_shuffled) = input.shaped<T, 5>(pre_shuffle(input)).shuffle(shuffle); // Shuffle filter into temporary tensor. Tensor filter_shuffled(filter.dtype(), TensorShape(post_shuffle(filter))); filter_shuffled.tensor<T, 5>().device(device, on_shuffled) = filter.shaped<T, 5>(pre_shuffle(filter)).shuffle(shuffle); // Wait for the completion of input/filter shuffles. shuffles_completed.Wait(); // Write group convolution results into temporary output tensor. Tensor output_shuffled(output->dtype(), TensorShape(post_shuffle(*output))); for (int64_t i = 0; i < num_groups; ++i) { // TODO(ezhulenev): Run this loop using `parallelFor` (regular parallelFor // will lead to deadlock, SpatialConvolution has to use async Eigen // assignment). This requires small changes to Eigen to support async // exeuction for tensor chipping operation. // TODO(ezhulenev): Grouped convolution should also support 1x1 filter // optimization. auto input_slice = input_shuffled.tensor<T, 5>().template chip<0>(i); auto filter_slice = filter_shuffled.tensor<T, 5>().template chip<0>(i); auto output_slice = output_shuffled.tensor<T, 5>().template chip<0>(i); if (padding == EXPLICIT) { functor::SpatialConvolution<CPUDevice, T>()( ctx->eigen_device<CPUDevice>(), output_slice, input_slice, filter_slice, row_stride, col_stride, row_dilation, col_dilation, static_cast<int>(explicit_paddings[2]), static_cast<int>(explicit_paddings[3]), static_cast<int>(explicit_paddings[4]), static_cast<int>(explicit_paddings[5])); } else { functor::SpatialConvolution<CPUDevice, T>()( ctx->eigen_device<CPUDevice>(), output_slice, input_slice, filter_slice, row_stride, col_stride, row_dilation, col_dilation, BrainPadding2EigenPadding(padding)); } } // Shuffle temporary output back into pre-shuffled shape. std::array<int64_t, 5> rev_shuffle({1, 2, 3, 0, 4}); output->shaped<T, 5>(pre_shuffle(*output)).device(device) = output_shuffled.tensor<T, 5>().shuffle(rev_shuffle); }
257618220779157714024325768166416151732
conv_ops.cc
252300068611383622428481854806618645318
CWE-354
CVE-2021-41206
TensorFlow is an open source platform for machine learning. In affected versions several TensorFlow operations are missing validation for the shapes of the tensor arguments involved in the call. Depending on the API, this can result in undefined behavior and segfault or `CHECK`-fail related crashes but in some scenarios writes and reads from heap populated arrays are also possible. We have discovered these issues internally via tooling while working on improving/testing GPU op determinism. As such, we don't have reproducers and there will be multiple fixes for these issues. These fixes will be included in TensorFlow 2.7.0. We will also cherrypick these commits on TensorFlow 2.6.1, TensorFlow 2.5.2, and TensorFlow 2.4.4, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2021-41206
220,449
tensorflow
e7f497570abb6b4ae5af4970620cd880e4c0c904
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/e7f497570abb6b4ae5af4970620cd880e4c0c904
Fix segfault on OOM in Conv2D. PiperOrigin-RevId: 404655317 Change-Id: I33588dbd3f5d0fef980e3c908bf5515a9ee09ce7
0
void operator()(OpKernelContext* ctx, const Tensor& input, const Tensor& filter, int row_stride, int col_stride, int row_dilation, int col_dilation, const Padding& padding, const std::vector<int64_t>& explicit_paddings, Tensor* output, TensorFormat data_format) { DCHECK(data_format == FORMAT_NHWC) << "Grouped conv implementation only " "supports NHWC tensor format for now."; const int64_t in_depth = input.dim_size(3); const int64_t patch_depth = filter.dim_size(2); const int64_t num_groups = in_depth / patch_depth; // Shuffle input/filter tensors to have group as a leading dimension. std::array<int64_t, 5> shuffle({3, 0, 1, 2, 4}); // Compute pre shuffle dimemnsions. auto pre_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> { return {tensor.dim_size(0), tensor.dim_size(1), tensor.dim_size(2), num_groups, tensor.dim_size(3) / num_groups}; }; // Compute post shuffle dimemnsions. auto post_shuffle = [&](const Tensor& tensor) -> std::array<int64, 5> { return {num_groups, tensor.dim_size(0), tensor.dim_size(1), tensor.dim_size(2), tensor.dim_size(3) / num_groups}; }; auto& device = ctx->eigen_device<CPUDevice>(); absl::BlockingCounter shuffles_completed(2); auto on_shuffled = [&]() { shuffles_completed.DecrementCount(); }; // Shuffle input into temporary tensor. Tensor input_shuffled; OP_REQUIRES_OK( ctx, ctx->allocate_temp(input.dtype(), TensorShape(post_shuffle(input)), &input_shuffled)); input_shuffled.tensor<T, 5>().device(device, on_shuffled) = input.shaped<T, 5>(pre_shuffle(input)).shuffle(shuffle); // Shuffle filter into temporary tensor. Tensor filter_shuffled; OP_REQUIRES_OK(ctx, ctx->allocate_temp(filter.dtype(), TensorShape(post_shuffle(filter)), &filter_shuffled)); filter_shuffled.tensor<T, 5>().device(device, on_shuffled) = filter.shaped<T, 5>(pre_shuffle(filter)).shuffle(shuffle); // Wait for the completion of input/filter shuffles. shuffles_completed.Wait(); // Write group convolution results into temporary output tensor. Tensor output_shuffled; OP_REQUIRES_OK(ctx, ctx->allocate_temp(output->dtype(), TensorShape(post_shuffle(*output)), &output_shuffled)); for (int64_t i = 0; i < num_groups; ++i) { // TODO(ezhulenev): Run this loop using `parallelFor` (regular parallelFor // will lead to deadlock, SpatialConvolution has to use async Eigen // assignment). This requires small changes to Eigen to support async // exeuction for tensor chipping operation. // TODO(ezhulenev): Grouped convolution should also support 1x1 filter // optimization. auto input_slice = input_shuffled.tensor<T, 5>().template chip<0>(i); auto filter_slice = filter_shuffled.tensor<T, 5>().template chip<0>(i); auto output_slice = output_shuffled.tensor<T, 5>().template chip<0>(i); if (padding == EXPLICIT) { functor::SpatialConvolution<CPUDevice, T>()( ctx->eigen_device<CPUDevice>(), output_slice, input_slice, filter_slice, row_stride, col_stride, row_dilation, col_dilation, static_cast<int>(explicit_paddings[2]), static_cast<int>(explicit_paddings[3]), static_cast<int>(explicit_paddings[4]), static_cast<int>(explicit_paddings[5])); } else { functor::SpatialConvolution<CPUDevice, T>()( ctx->eigen_device<CPUDevice>(), output_slice, input_slice, filter_slice, row_stride, col_stride, row_dilation, col_dilation, BrainPadding2EigenPadding(padding)); } } // Shuffle temporary output back into pre-shuffled shape. std::array<int64_t, 5> rev_shuffle({1, 2, 3, 0, 4}); output->shaped<T, 5>(pre_shuffle(*output)).device(device) = output_shuffled.tensor<T, 5>().shuffle(rev_shuffle); }
52476148530312265483336987277784785500
conv_ops.cc
162425470101834995272420301327894414264
CWE-354
CVE-2021-41206
TensorFlow is an open source platform for machine learning. In affected versions several TensorFlow operations are missing validation for the shapes of the tensor arguments involved in the call. Depending on the API, this can result in undefined behavior and segfault or `CHECK`-fail related crashes but in some scenarios writes and reads from heap populated arrays are also possible. We have discovered these issues internally via tooling while working on improving/testing GPU op determinism. As such, we don't have reproducers and there will be multiple fixes for these issues. These fixes will be included in TensorFlow 2.7.0. We will also cherrypick these commits on TensorFlow 2.6.1, TensorFlow 2.5.2, and TensorFlow 2.4.4, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2021-41206
195,055
tensorflow
2b7100d6cdff36aa21010a82269bc05a6d1cc74a
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/2b7100d6cdff36aa21010a82269bc05a6d1cc74a
Cleanup and remove duplicate validation in `SparseCount`. We have valdiation that is duplicated, checking different conditions, in different formats and failing to capture all cases. This should fix all the previous bugs. PiperOrigin-RevId: 414886981 Change-Id: Ibf0bba0beb057b76d505324bb9487565daf95f01
1
void Compute(OpKernelContext* context) override { const Tensor& indices = context->input(0); const Tensor& values = context->input(1); const Tensor& shape = context->input(2); const Tensor& weights = context->input(3); bool use_weights = weights.NumElements() > 0; OP_REQUIRES(context, TensorShapeUtils::IsMatrix(indices.shape()), errors::InvalidArgument( "Input indices must be a 2-dimensional tensor. Got: ", indices.shape().DebugString())); if (use_weights) { OP_REQUIRES( context, weights.shape() == values.shape(), errors::InvalidArgument( "Weights and values must have the same shape. Weight shape: ", weights.shape().DebugString(), "; values shape: ", values.shape().DebugString())); } OP_REQUIRES(context, shape.NumElements() != 0, errors::InvalidArgument( "The shape argument requires at least one element.")); bool is_1d = shape.NumElements() == 1; auto shape_vector = shape.flat<int64_t>(); int num_batches = is_1d ? 1 : shape_vector(0); int num_values = values.NumElements(); for (int b = 0; b < shape_vector.size(); b++) { OP_REQUIRES(context, shape_vector(b) >= 0, errors::InvalidArgument( "Elements in dense_shape must be >= 0. Instead got:", shape.DebugString())); } OP_REQUIRES(context, num_values == indices.shape().dim_size(0), errors::InvalidArgument( "Number of values must match first dimension of indices.", "Got ", num_values, " values, indices shape: ", indices.shape().DebugString())); const auto indices_values = indices.matrix<int64_t>(); const auto values_values = values.flat<T>(); const auto weight_values = weights.flat<W>(); auto per_batch_counts = BatchedMap<W>(num_batches); T max_value = 0; OP_REQUIRES(context, num_values <= indices.shape().dim_size(0), errors::InvalidArgument( "The first dimension of indices must be equal to or " "greather than number of values. ( ", indices.shape().dim_size(0), " vs. ", num_values, " )")); OP_REQUIRES(context, indices.shape().dim_size(1) > 0, errors::InvalidArgument("The second dimension of indices must " "be greater than 0. Received: ", indices.shape().dim_size(1))); for (int idx = 0; idx < num_values; ++idx) { int batch = is_1d ? 0 : indices_values(idx, 0); if (batch >= num_batches) { OP_REQUIRES(context, batch < num_batches, errors::InvalidArgument( "Indices value along the first dimension must be ", "lower than the first index of the shape.", "Got ", batch, " as batch and ", num_batches, " as the first dimension of the shape.")); } const auto& value = values_values(idx); if (value >= 0 && (maxlength_ <= 0 || value < maxlength_)) { if (binary_output_) { per_batch_counts[batch][value] = 1; } else if (use_weights) { per_batch_counts[batch][value] += weight_values(idx); } else { per_batch_counts[batch][value]++; } if (value > max_value) { max_value = value; } } } int num_output_values = GetOutputSize(max_value, maxlength_, minlength_); OP_REQUIRES_OK(context, OutputSparse<W>(per_batch_counts, num_output_values, is_1d, context)); }
115744370413617881150207979427400512016
count_ops.cc
290832582717285970119064032382621433475
CWE-787
CVE-2022-21740
Tensorflow is an Open Source Machine Learning Framework. The implementation of `SparseCountSparseOutput` is vulnerable to a heap overflow. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-21740
220,804
tensorflow
2b7100d6cdff36aa21010a82269bc05a6d1cc74a
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/2b7100d6cdff36aa21010a82269bc05a6d1cc74a
Cleanup and remove duplicate validation in `SparseCount`. We have valdiation that is duplicated, checking different conditions, in different formats and failing to capture all cases. This should fix all the previous bugs. PiperOrigin-RevId: 414886981 Change-Id: Ibf0bba0beb057b76d505324bb9487565daf95f01
0
void Compute(OpKernelContext* context) override { const Tensor& splits = context->input(0); const Tensor& values = context->input(1); const Tensor& weights = context->input(2); bool use_weights = weights.NumElements() > 0; bool is_1d = false; if (use_weights) { OP_REQUIRES( context, weights.shape() == values.shape(), errors::InvalidArgument( "Weights and values must have the same shape. Weight shape: ", weights.shape().DebugString(), "; values shape: ", values.shape().DebugString())); } const auto splits_values = splits.flat<int64_t>(); const auto values_values = values.flat<T>(); const auto weight_values = weights.flat<W>(); int num_batches = splits.NumElements() - 1; int num_values = values.NumElements(); OP_REQUIRES( context, num_batches > 0, errors::InvalidArgument( "Must provide at least 2 elements for the splits argument")); OP_REQUIRES(context, splits_values(0) == 0, errors::InvalidArgument("Splits must start with 0, not with ", splits_values(0))); OP_REQUIRES(context, splits_values(num_batches) == num_values, errors::InvalidArgument( "Splits must end with the number of values, got ", splits_values(num_batches), " instead of ", num_values)); auto per_batch_counts = BatchedMap<W>(num_batches); T max_value = 0; int batch_idx = 0; for (int idx = 0; idx < num_values; ++idx) { while (idx >= splits_values(batch_idx)) { batch_idx++; } const auto& value = values_values(idx); if (value >= 0 && (maxlength_ <= 0 || value < maxlength_)) { if (binary_output_) { per_batch_counts[batch_idx - 1][value] = 1; } else if (use_weights) { per_batch_counts[batch_idx - 1][value] += weight_values(idx); } else { per_batch_counts[batch_idx - 1][value]++; } if (value > max_value) { max_value = value; } } } int num_output_values = GetOutputSize(max_value, maxlength_, minlength_); OP_REQUIRES_OK(context, OutputSparse<W>(per_batch_counts, num_output_values, is_1d, context)); }
321329284400462468105618833406255634390
count_ops.cc
221778566959720819887290009238961995785
CWE-787
CVE-2022-21740
Tensorflow is an Open Source Machine Learning Framework. The implementation of `SparseCountSparseOutput` is vulnerable to a heap overflow. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-21740
195,056
tensorflow
8c6f391a2282684a25cbfec7687bd5d35261a209
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/8c6f391a2282684a25cbfec7687bd5d35261a209
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check PiperOrigin-RevId: 416383645 Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
1
inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size, const float* bias_data, int array_size, float* array_data) { // Note: see b/132215220: in May 2019 we thought it would be OK to replace // this with the Eigen one-liner: // return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max). // This turned out to severely regress performance: +4ms (i.e. 8%) on // MobileNet v2 / 1.0 / 224. So we keep custom NEON code for now. TFLITE_DCHECK_EQ((array_size % bias_size), 0); #ifdef USE_NEON float* array_ptr = array_data; float* array_end_ptr = array_ptr + array_size; const auto clamp_min_vec = vdupq_n_f32(clamp_min); const auto clamp_max_vec = vdupq_n_f32(clamp_max); for (; array_ptr != array_end_ptr; array_ptr += bias_size) { int i = 0; for (; i <= bias_size - 16; i += 16) { auto b0 = vld1q_f32(bias_data + i); auto b1 = vld1q_f32(bias_data + i + 4); auto b2 = vld1q_f32(bias_data + i + 8); auto b3 = vld1q_f32(bias_data + i + 12); auto a0 = vld1q_f32(array_ptr + i); auto a1 = vld1q_f32(array_ptr + i + 4); auto a2 = vld1q_f32(array_ptr + i + 8); auto a3 = vld1q_f32(array_ptr + i + 12); auto x0 = vaddq_f32(a0, b0); auto x1 = vaddq_f32(a1, b1); auto x2 = vaddq_f32(a2, b2); auto x3 = vaddq_f32(a3, b3); x0 = vmaxq_f32(clamp_min_vec, x0); x1 = vmaxq_f32(clamp_min_vec, x1); x2 = vmaxq_f32(clamp_min_vec, x2); x3 = vmaxq_f32(clamp_min_vec, x3); x0 = vminq_f32(clamp_max_vec, x0); x1 = vminq_f32(clamp_max_vec, x1); x2 = vminq_f32(clamp_max_vec, x2); x3 = vminq_f32(clamp_max_vec, x3); vst1q_f32(array_ptr + i, x0); vst1q_f32(array_ptr + i + 4, x1); vst1q_f32(array_ptr + i + 8, x2); vst1q_f32(array_ptr + i + 12, x3); } for (; i <= bias_size - 4; i += 4) { auto b = vld1q_f32(bias_data + i); auto a = vld1q_f32(array_ptr + i); auto x = vaddq_f32(a, b); x = vmaxq_f32(clamp_min_vec, x); x = vminq_f32(clamp_max_vec, x); vst1q_f32(array_ptr + i, x); } for (; i < bias_size; i++) { array_ptr[i] = ActivationFunctionWithMinMax(array_ptr[i] + bias_data[i], clamp_min, clamp_max); } } #else // not NEON for (int array_offset = 0; array_offset < array_size; array_offset += bias_size) { for (int i = 0; i < bias_size; i++) { array_data[array_offset + i] = ActivationFunctionWithMinMax( array_data[array_offset + i] + bias_data[i], clamp_min, clamp_max); } } #endif }
154263320578941255259441922880599149557
common.h
11373796702176609664888229687660280569
CWE-369
CVE-2022-23557
Tensorflow is an Open Source Machine Learning Framework. An attacker can craft a TFLite model that would trigger a division by zero in `BiasAndClamp` implementation. There is no check that the `bias_size` is non zero. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23557
220,841
tensorflow
8c6f391a2282684a25cbfec7687bd5d35261a209
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/8c6f391a2282684a25cbfec7687bd5d35261a209
[lite] Add check for bias_size is zero to avoid division by zero. This shouldn't happen for properly converted models. Just safety check PiperOrigin-RevId: 416383645 Change-Id: If8e508bf696ae8ecfb927e69c139a8ccf7fe60cb
0
inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size, const float* bias_data, int array_size, float* array_data) { if (bias_size == 0) return; // Note: see b/132215220: in May 2019 we thought it would be OK to replace // this with the Eigen one-liner: // return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max). // This turned out to severely regress performance: +4ms (i.e. 8%) on // MobileNet v2 / 1.0 / 224. So we keep custom NEON code for now. TFLITE_DCHECK_EQ((array_size % bias_size), 0); #ifdef USE_NEON float* array_ptr = array_data; float* array_end_ptr = array_ptr + array_size; const auto clamp_min_vec = vdupq_n_f32(clamp_min); const auto clamp_max_vec = vdupq_n_f32(clamp_max); for (; array_ptr != array_end_ptr; array_ptr += bias_size) { int i = 0; for (; i <= bias_size - 16; i += 16) { auto b0 = vld1q_f32(bias_data + i); auto b1 = vld1q_f32(bias_data + i + 4); auto b2 = vld1q_f32(bias_data + i + 8); auto b3 = vld1q_f32(bias_data + i + 12); auto a0 = vld1q_f32(array_ptr + i); auto a1 = vld1q_f32(array_ptr + i + 4); auto a2 = vld1q_f32(array_ptr + i + 8); auto a3 = vld1q_f32(array_ptr + i + 12); auto x0 = vaddq_f32(a0, b0); auto x1 = vaddq_f32(a1, b1); auto x2 = vaddq_f32(a2, b2); auto x3 = vaddq_f32(a3, b3); x0 = vmaxq_f32(clamp_min_vec, x0); x1 = vmaxq_f32(clamp_min_vec, x1); x2 = vmaxq_f32(clamp_min_vec, x2); x3 = vmaxq_f32(clamp_min_vec, x3); x0 = vminq_f32(clamp_max_vec, x0); x1 = vminq_f32(clamp_max_vec, x1); x2 = vminq_f32(clamp_max_vec, x2); x3 = vminq_f32(clamp_max_vec, x3); vst1q_f32(array_ptr + i, x0); vst1q_f32(array_ptr + i + 4, x1); vst1q_f32(array_ptr + i + 8, x2); vst1q_f32(array_ptr + i + 12, x3); } for (; i <= bias_size - 4; i += 4) { auto b = vld1q_f32(bias_data + i); auto a = vld1q_f32(array_ptr + i); auto x = vaddq_f32(a, b); x = vmaxq_f32(clamp_min_vec, x); x = vminq_f32(clamp_max_vec, x); vst1q_f32(array_ptr + i, x); } for (; i < bias_size; i++) { array_ptr[i] = ActivationFunctionWithMinMax(array_ptr[i] + bias_data[i], clamp_min, clamp_max); } } #else // not NEON for (int array_offset = 0; array_offset < array_size; array_offset += bias_size) { for (int i = 0; i < bias_size; i++) { array_data[array_offset + i] = ActivationFunctionWithMinMax( array_data[array_offset + i] + bias_data[i], clamp_min, clamp_max); } } #endif }
163406073569204971648641083480315438791
common.h
206010119069068373550820723284960883967
CWE-369
CVE-2022-23557
Tensorflow is an Open Source Machine Learning Framework. An attacker can craft a TFLite model that would trigger a division by zero in `BiasAndClamp` implementation. There is no check that the `bias_size` is non zero. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23557
195,074
gpac
a69b567b8c95c72f9560c873c5ab348be058f340
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/a69b567b8c95c72f9560c873c5ab348be058f340
fixed #1895
1
GF_AV1Config *gf_odf_av1_cfg_read_bs_size(GF_BitStream *bs, u32 size) { #ifndef GPAC_DISABLE_AV_PARSERS AV1State state; u8 reserved; GF_AV1Config *cfg; if (!size) size = (u32) gf_bs_available(bs); if (!size) return NULL; cfg = gf_odf_av1_cfg_new(); gf_av1_init_state(&state); state.config = cfg; cfg->marker = gf_bs_read_int(bs, 1); cfg->version = gf_bs_read_int(bs, 7); cfg->seq_profile = gf_bs_read_int(bs, 3); cfg->seq_level_idx_0 = gf_bs_read_int(bs, 5); cfg->seq_tier_0 = gf_bs_read_int(bs, 1); cfg->high_bitdepth = gf_bs_read_int(bs, 1); cfg->twelve_bit = gf_bs_read_int(bs, 1); cfg->monochrome = gf_bs_read_int(bs, 1); cfg->chroma_subsampling_x = gf_bs_read_int(bs, 1); cfg->chroma_subsampling_y = gf_bs_read_int(bs, 1); cfg->chroma_sample_position = gf_bs_read_int(bs, 2); reserved = gf_bs_read_int(bs, 3); if (reserved != 0 || cfg->marker != 1 || cfg->version != 1) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] wrong avcC reserved %d / marker %d / version %d expecting 0 1 1\n", reserved, cfg->marker, cfg->version)); gf_odf_av1_cfg_del(cfg); return NULL; } cfg->initial_presentation_delay_present = gf_bs_read_int(bs, 1); if (cfg->initial_presentation_delay_present) { cfg->initial_presentation_delay_minus_one = gf_bs_read_int(bs, 4); } else { /*reserved = */gf_bs_read_int(bs, 4); cfg->initial_presentation_delay_minus_one = 0; } size -= 4; while (size) { u64 pos, obu_size; ObuType obu_type; GF_AV1_OBUArrayEntry *a; pos = gf_bs_get_position(bs); obu_size = 0; if (gf_av1_parse_obu(bs, &obu_type, &obu_size, NULL, &state) != GF_OK) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] could not parse AV1 OBU at position "LLU". Leaving parsing.\n", pos)); break; } assert(obu_size == gf_bs_get_position(bs) - pos); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] parsed AV1 OBU type=%u size="LLU" at position "LLU".\n", obu_type, obu_size, pos)); if (!av1_is_obu_header(obu_type)) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] AV1 unexpected OBU type=%u size="LLU" found at position "LLU". Forwarding.\n", pos)); } GF_SAFEALLOC(a, GF_AV1_OBUArrayEntry); if (!a) break; a->obu = gf_malloc((size_t)obu_size); if (!a->obu) { gf_free(a); break; } gf_bs_seek(bs, pos); gf_bs_read_data(bs, (char *) a->obu, (u32)obu_size); a->obu_length = obu_size; a->obu_type = obu_type; gf_list_add(cfg->obu_array, a); if (size<obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] AV1 config misses %d bytes to fit the entire OBU\n", obu_size - size)); break; } size -= (u32) obu_size; } gf_av1_reset_state(& state, GF_TRUE); return cfg; #else return NULL; #endif }
270972574846681061752900592460657064315
descriptors.c
100253523943266503998746709370742625478
CWE-415
CVE-2021-40571
The binary MP4Box in Gpac 1.0.1 has a double-free vulnerability in the ilst_box_read function in box_code_apple.c, which allows attackers to cause a denial of service, even code execution and escalation of privileges.
https://nvd.nist.gov/vuln/detail/CVE-2021-40571
221,160
gpac
a69b567b8c95c72f9560c873c5ab348be058f340
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/a69b567b8c95c72f9560c873c5ab348be058f340
fixed #1895
0
GF_AV1Config *gf_odf_av1_cfg_read_bs_size(GF_BitStream *bs, u32 size) { #ifndef GPAC_DISABLE_AV_PARSERS AV1State state; u8 reserved; GF_AV1Config *cfg; if (!size) size = (u32) gf_bs_available(bs); if (!size) return NULL; cfg = gf_odf_av1_cfg_new(); gf_av1_init_state(&state); state.config = cfg; cfg->marker = gf_bs_read_int(bs, 1); cfg->version = gf_bs_read_int(bs, 7); cfg->seq_profile = gf_bs_read_int(bs, 3); cfg->seq_level_idx_0 = gf_bs_read_int(bs, 5); cfg->seq_tier_0 = gf_bs_read_int(bs, 1); cfg->high_bitdepth = gf_bs_read_int(bs, 1); cfg->twelve_bit = gf_bs_read_int(bs, 1); cfg->monochrome = gf_bs_read_int(bs, 1); cfg->chroma_subsampling_x = gf_bs_read_int(bs, 1); cfg->chroma_subsampling_y = gf_bs_read_int(bs, 1); cfg->chroma_sample_position = gf_bs_read_int(bs, 2); reserved = gf_bs_read_int(bs, 3); if (reserved != 0 || cfg->marker != 1 || cfg->version != 1) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] wrong avcC reserved %d / marker %d / version %d expecting 0 1 1\n", reserved, cfg->marker, cfg->version)); gf_odf_av1_cfg_del(cfg); return NULL; } cfg->initial_presentation_delay_present = gf_bs_read_int(bs, 1); if (cfg->initial_presentation_delay_present) { cfg->initial_presentation_delay_minus_one = gf_bs_read_int(bs, 4); } else { /*reserved = */gf_bs_read_int(bs, 4); cfg->initial_presentation_delay_minus_one = 0; } size -= 4; while (size) { u64 pos, obu_size; ObuType obu_type; GF_AV1_OBUArrayEntry *a; pos = gf_bs_get_position(bs); obu_size = 0; if (gf_av1_parse_obu(bs, &obu_type, &obu_size, NULL, &state) != GF_OK) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] could not parse AV1 OBU at position "LLU". Leaving parsing.\n", pos)); break; } assert(obu_size == gf_bs_get_position(bs) - pos); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] parsed AV1 OBU type=%u size="LLU" at position "LLU".\n", obu_type, obu_size, pos)); if (!av1_is_obu_header(obu_type)) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] AV1 unexpected OBU type=%u size="LLU" found at position "LLU". Forwarding.\n", pos)); } GF_SAFEALLOC(a, GF_AV1_OBUArrayEntry); if (!a) break; a->obu = gf_malloc((size_t)obu_size); if (!a->obu) { gf_free(a); break; } gf_bs_seek(bs, pos); gf_bs_read_data(bs, (char *) a->obu, (u32)obu_size); a->obu_length = obu_size; a->obu_type = obu_type; gf_list_add(cfg->obu_array, a); if (size<obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] AV1 config misses %d bytes to fit the entire OBU\n", obu_size - size)); break; } size -= (u32) obu_size; } gf_av1_reset_state(& state, GF_TRUE); gf_bs_align(bs); return cfg; #else return NULL; #endif }
161782515383812350901831460771265303089
descriptors.c
86476492964393375980272696403064975409
CWE-415
CVE-2021-40571
The binary MP4Box in Gpac 1.0.1 has a double-free vulnerability in the ilst_box_read function in box_code_apple.c, which allows attackers to cause a denial of service, even code execution and escalation of privileges.
https://nvd.nist.gov/vuln/detail/CVE-2021-40571
195,082
linux
c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc
KVM: nSVM: always intercept VMLOAD/VMSAVE when nested (CVE-2021-3656) If L1 disables VMLOAD/VMSAVE intercepts, and doesn't enable Virtual VMLOAD/VMSAVE (currently not supported for the nested hypervisor), then VMLOAD/VMSAVE must operate on the L1 physical memory, which is only possible by making L0 intercept these instructions. Failure to do so allowed the nested guest to run VMLOAD/VMSAVE unintercepted, and thus read/write portions of the host physical memory. Fixes: 89c8a4984fc9 ("KVM: SVM: Enable Virtual VMLOAD VMSAVE feature") Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1
void recalc_intercepts(struct vcpu_svm *svm) { struct vmcb_control_area *c, *h, *g; unsigned int i; vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); if (!is_guest_mode(&svm->vcpu)) return; c = &svm->vmcb->control; h = &svm->vmcb01.ptr->control; g = &svm->nested.ctl; for (i = 0; i < MAX_INTERCEPT; i++) c->intercepts[i] = h->intercepts[i]; if (g->int_ctl & V_INTR_MASKING_MASK) { /* We only want the cr8 intercept bits of L1 */ vmcb_clr_intercept(c, INTERCEPT_CR8_READ); vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE); /* * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not * affect any interrupt we may want to inject; therefore, * interrupt window vmexits are irrelevant to L0. */ vmcb_clr_intercept(c, INTERCEPT_VINTR); } /* We don't want to see VMMCALLs from a nested guest */ vmcb_clr_intercept(c, INTERCEPT_VMMCALL); for (i = 0; i < MAX_INTERCEPT; i++) c->intercepts[i] |= g->intercepts[i]; /* If SMI is not intercepted, ignore guest SMI intercept as well */ if (!intercept_smi) vmcb_clr_intercept(c, INTERCEPT_SMI); }
308018010909685377463219146239861290533
None
CWE-862
CVE-2021-3656
A flaw was found in the KVM's AMD code for supporting SVM nested virtualization. The flaw occurs when processing the VMCB (virtual machine control block) provided by the L1 guest to spawn/handle a nested guest (L2). Due to improper validation of the "virt_ext" field, this issue could allow a malicious L1 to disable both VMLOAD/VMSAVE intercepts and VLS (Virtual VMLOAD/VMSAVE) for the L2 guest. As a result, the L2 guest would be allowed to read/write physical pages of the host, resulting in a crash of the entire system, leak of sensitive data or potential guest-to-host escape.
https://nvd.nist.gov/vuln/detail/CVE-2021-3656
221,413
linux
c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc
https://github.com/torvalds/linux
https://github.com/torvalds/linux/commit/c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc
KVM: nSVM: always intercept VMLOAD/VMSAVE when nested (CVE-2021-3656) If L1 disables VMLOAD/VMSAVE intercepts, and doesn't enable Virtual VMLOAD/VMSAVE (currently not supported for the nested hypervisor), then VMLOAD/VMSAVE must operate on the L1 physical memory, which is only possible by making L0 intercept these instructions. Failure to do so allowed the nested guest to run VMLOAD/VMSAVE unintercepted, and thus read/write portions of the host physical memory. Fixes: 89c8a4984fc9 ("KVM: SVM: Enable Virtual VMLOAD VMSAVE feature") Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0
void recalc_intercepts(struct vcpu_svm *svm) { struct vmcb_control_area *c, *h, *g; unsigned int i; vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); if (!is_guest_mode(&svm->vcpu)) return; c = &svm->vmcb->control; h = &svm->vmcb01.ptr->control; g = &svm->nested.ctl; for (i = 0; i < MAX_INTERCEPT; i++) c->intercepts[i] = h->intercepts[i]; if (g->int_ctl & V_INTR_MASKING_MASK) { /* We only want the cr8 intercept bits of L1 */ vmcb_clr_intercept(c, INTERCEPT_CR8_READ); vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE); /* * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not * affect any interrupt we may want to inject; therefore, * interrupt window vmexits are irrelevant to L0. */ vmcb_clr_intercept(c, INTERCEPT_VINTR); } /* We don't want to see VMMCALLs from a nested guest */ vmcb_clr_intercept(c, INTERCEPT_VMMCALL); for (i = 0; i < MAX_INTERCEPT; i++) c->intercepts[i] |= g->intercepts[i]; /* If SMI is not intercepted, ignore guest SMI intercept as well */ if (!intercept_smi) vmcb_clr_intercept(c, INTERCEPT_SMI); vmcb_set_intercept(c, INTERCEPT_VMLOAD); vmcb_set_intercept(c, INTERCEPT_VMSAVE); }
61346983903340748153155985789359366401
None
CWE-862
CVE-2021-3656
A flaw was found in the KVM's AMD code for supporting SVM nested virtualization. The flaw occurs when processing the VMCB (virtual machine control block) provided by the L1 guest to spawn/handle a nested guest (L2). Due to improper validation of the "virt_ext" field, this issue could allow a malicious L1 to disable both VMLOAD/VMSAVE intercepts and VLS (Virtual VMLOAD/VMSAVE) for the L2 guest. As a result, the L2 guest would be allowed to read/write physical pages of the host, resulting in a crash of the entire system, leak of sensitive data or potential guest-to-host escape.
https://nvd.nist.gov/vuln/detail/CVE-2021-3656
195,083
tensorflow
5b491cd5e41ad63735161cec9c2a568172c8b6a3
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/5b491cd5e41ad63735161cec9c2a568172c8b6a3
Validate `proto.dtype()` before calling `set_dtype()`. This prevents a `DCHECK`-fail when the proto contains an invalid dtype for a tensor shape with 0 elements or for an incomplete tensor shape. PiperOrigin-RevId: 408369083 Change-Id: Ia21a3e3d62a90d642a4561f08f3b543e5ad00c46
1
bool Tensor::FromProto(Allocator* a, const TensorProto& proto) { CHECK_NOTNULL(a); TensorBuffer* p = nullptr; if (!TensorShape::IsValid(proto.tensor_shape())) return false; if (proto.dtype() == DT_INVALID) return false; TensorShape shape(proto.tensor_shape()); const int64_t N = shape.num_elements(); if (N > 0 && proto.dtype()) { bool dtype_error = false; if (!proto.tensor_content().empty()) { const auto& content = proto.tensor_content(); CASES_WITH_DEFAULT(proto.dtype(), p = Helper<T>::Decode(a, content, N), dtype_error = true, dtype_error = true); } else { CASES_WITH_DEFAULT(proto.dtype(), p = FromProtoField<T>(a, proto, N), dtype_error = true, dtype_error = true); } if (dtype_error || p == nullptr) return false; } shape_ = shape; set_dtype(proto.dtype()); UnrefIfNonNull(buf_); buf_ = p; // TODO(misard) add tracking of which kernels and steps are calling // FromProto. if (MemoryLoggingEnabled() && buf_ != nullptr && buf_->data() != nullptr) { LogMemory::RecordTensorAllocation("Unknown (from Proto)", LogMemory::UNKNOWN_STEP_ID, *this); } return true; }
112719252128622113589892906952570683457
tensor.cc
289613009517546867193769314060658742037
CWE-617
CVE-2022-23571
Tensorflow is an Open Source Machine Learning Framework. When decoding a tensor from protobuf, a TensorFlow process can encounter cases where a `CHECK` assertion is invalidated based on user controlled arguments, if the tensors have an invalid `dtype` and 0 elements or an invalid shape. This allows attackers to cause denial of services in TensorFlow processes. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23571
221,428
tensorflow
5b491cd5e41ad63735161cec9c2a568172c8b6a3
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/5b491cd5e41ad63735161cec9c2a568172c8b6a3
Validate `proto.dtype()` before calling `set_dtype()`. This prevents a `DCHECK`-fail when the proto contains an invalid dtype for a tensor shape with 0 elements or for an incomplete tensor shape. PiperOrigin-RevId: 408369083 Change-Id: Ia21a3e3d62a90d642a4561f08f3b543e5ad00c46
0
bool Tensor::FromProto(Allocator* a, const TensorProto& proto) { CHECK_NOTNULL(a); TensorBuffer* p = nullptr; if (!TensorShape::IsValid(proto.tensor_shape())) return false; if (proto.dtype() == DT_INVALID) return false; TensorShape shape(proto.tensor_shape()); const int64_t N = shape.num_elements(); if (N > 0 && proto.dtype()) { bool dtype_error = false; if (!proto.tensor_content().empty()) { const auto& content = proto.tensor_content(); CASES_WITH_DEFAULT(proto.dtype(), p = Helper<T>::Decode(a, content, N), dtype_error = true, dtype_error = true); } else { CASES_WITH_DEFAULT(proto.dtype(), p = FromProtoField<T>(a, proto, N), dtype_error = true, dtype_error = true); } if (dtype_error || p == nullptr) return false; } else { // Handle the case of empty tensors (N = 0) or tensors with incomplete shape // (N = -1). All other values of `shape.num_elements()` should be invalid by // construction. // Here, we just need to validate that the `proto.dtype()` value is valid. bool dtype_error = false; CASES_WITH_DEFAULT(proto.dtype(), break, dtype_error = true, dtype_error = true); if (dtype_error) return false; } shape_ = shape; set_dtype(proto.dtype()); UnrefIfNonNull(buf_); buf_ = p; // TODO(misard) add tracking of which kernels and steps are calling // FromProto. if (MemoryLoggingEnabled() && buf_ != nullptr && buf_->data() != nullptr) { LogMemory::RecordTensorAllocation("Unknown (from Proto)", LogMemory::UNKNOWN_STEP_ID, *this); } return true; }
12020279702191708342972381802829194549
tensor.cc
303884711858139014412460575672580480868
CWE-617
CVE-2022-23571
Tensorflow is an Open Source Machine Learning Framework. When decoding a tensor from protobuf, a TensorFlow process can encounter cases where a `CHECK` assertion is invalidated based on user controlled arguments, if the tensors have an invalid `dtype` and 0 elements or an invalid shape. This allows attackers to cause denial of services in TensorFlow processes. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23571
195,091
tensorflow
35f0fabb4c178253a964d7aabdbb15c6a398b69a
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/35f0fabb4c178253a964d7aabdbb15c6a398b69a
Avoid Segfault for scalar shapes. Calling tensor::FromElementsOp with an empty vector of elements and no type causes a segfault. We need to let the FromElementsOp know which scalar type it should have. Also add back the DynamicBroadcastInDimOp canonicalization patterns, which previously prevented this bug from happening. Add a regression test that demonstrates the bug. PiperOrigin-RevId: 417561444 Change-Id: I6d1d6cfb71aabbad6102422625a00bbe253ac95a
1
llvm::Optional<Value> simplifyBroadcast(ShapeComponentAnalysis& analysis, ValueRange shapes, Location loc, OpBuilder* builder) { // First find the input shape with the largest rank. SmallVector<ArrayRef<ShapeComponentAnalysis::SymbolicExpr>> shapes_found; size_t maxRank = 0; for (const auto &shape : llvm::enumerate(shapes)) { auto found_shape = analysis.GetValueInfo(shape.value()); if (!found_shape) return {}; shapes_found.push_back(*found_shape); maxRank = std::max(maxRank, found_shape->size()); } SmallVector<const ShapeComponentAnalysis::SymbolicExpr*> joined_dimensions( maxRank); SmallVector<std::pair<Value, int64_t>> shape_and_rank_for_dim(maxRank); for (const auto &shape : llvm::enumerate(shapes_found)) { for (const auto &dim : llvm::enumerate(llvm::reverse(shape.value()))) { // 1 dimensions don't contribute to the final result. if (dim.value().isConstant(1)) continue; // If it's not a 1 dimension it will be present in the result. Remember // where it came from. auto index = maxRank - dim.index() - 1; if (!joined_dimensions[index]) { joined_dimensions[index] = &dim.value(); shape_and_rank_for_dim[index] = std::make_pair(shapes[shape.index()], shape.value().size()); continue; } // Bail if the dimensions are neither equal nor 1. if (*joined_dimensions[index] != dim.value()) return {}; } } // If the output is the same as one of the inputs just return that. if (llvm::is_splat(shape_and_rank_for_dim) && shape_and_rank_for_dim[0].first) { return shape_and_rank_for_dim[0].first; } // Otherwise rematerialize the shape from the pieces we have. SmallVector<Value> elements; for (int i = 0; i != maxRank; ++i) { // 1 dimensions are filtered above, recreate the constant. if (!shape_and_rank_for_dim[i].first) { auto one = builder->getIntegerAttr( shapes[0].getType().cast<RankedTensorType>().getElementType(), 1); elements.push_back(builder->create<ConstantOp>(loc, one)); continue; } // Extract from one of the shapes, accounting for the reverse indexing // performed by broadcast. Value index = builder->create<ConstantIndexOp>( loc, i - maxRank + shape_and_rank_for_dim[i].second); elements.push_back(builder->create<tensor::ExtractOp>( loc, shape_and_rank_for_dim[i].first, index)); } return Value(builder->create<tensor::FromElementsOp>(loc, elements)); }
84683486121098934971147990908524528886
tf_cpurt_symbolic_shape_optimization.cc
183860206963562900623001205261417288221
CWE-754
CVE-2022-23593
Tensorflow is an Open Source Machine Learning Framework. The `simplifyBroadcast` function in the MLIR-TFRT infrastructure in TensorFlow is vulnerable to a segfault (hence, denial of service), if called with scalar shapes. If all shapes are scalar, then `maxRank` is 0, so we build an empty `SmallVector`. The fix will be included in TensorFlow 2.8.0. This is the only affected version.
https://nvd.nist.gov/vuln/detail/CVE-2022-23593
221,631
tensorflow
35f0fabb4c178253a964d7aabdbb15c6a398b69a
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/35f0fabb4c178253a964d7aabdbb15c6a398b69a
Avoid Segfault for scalar shapes. Calling tensor::FromElementsOp with an empty vector of elements and no type causes a segfault. We need to let the FromElementsOp know which scalar type it should have. Also add back the DynamicBroadcastInDimOp canonicalization patterns, which previously prevented this bug from happening. Add a regression test that demonstrates the bug. PiperOrigin-RevId: 417561444 Change-Id: I6d1d6cfb71aabbad6102422625a00bbe253ac95a
0
llvm::Optional<Value> simplifyBroadcast(ShapeComponentAnalysis& analysis, ValueRange shapes, Location loc, OpBuilder* builder) { // First find the input shape with the largest rank. SmallVector<ArrayRef<ShapeComponentAnalysis::SymbolicExpr>> shapes_found; size_t maxRank = 0; for (const auto &shape : llvm::enumerate(shapes)) { auto found_shape = analysis.GetValueInfo(shape.value()); if (!found_shape) return {}; shapes_found.push_back(*found_shape); maxRank = std::max(maxRank, found_shape->size()); } if (maxRank == 0) { return Value(builder->create<tensor::FromElementsOp>( loc, shapes[0].getType(), SmallVector<Value>())); } SmallVector<const ShapeComponentAnalysis::SymbolicExpr*> joined_dimensions( maxRank); SmallVector<std::pair<Value, int64_t>> shape_and_rank_for_dim(maxRank); for (const auto &shape : llvm::enumerate(shapes_found)) { for (const auto &dim : llvm::enumerate(llvm::reverse(shape.value()))) { // 1 dimensions don't contribute to the final result. if (dim.value().isConstant(1)) continue; // If it's not a 1 dimension it will be present in the result. Remember // where it came from. auto index = maxRank - dim.index() - 1; if (!joined_dimensions[index]) { joined_dimensions[index] = &dim.value(); shape_and_rank_for_dim[index] = std::make_pair(shapes[shape.index()], shape.value().size()); continue; } // Bail if the dimensions are neither equal nor 1. if (*joined_dimensions[index] != dim.value()) return {}; } } // If the output is the same as one of the inputs just return that. if (llvm::is_splat(shape_and_rank_for_dim) && shape_and_rank_for_dim[0].first) { return shape_and_rank_for_dim[0].first; } // Otherwise rematerialize the shape from the pieces we have. SmallVector<Value> elements; for (int i = 0; i != maxRank; ++i) { // 1 dimensions are filtered above, recreate the constant. if (!shape_and_rank_for_dim[i].first) { auto one = builder->getIntegerAttr( shapes[0].getType().cast<RankedTensorType>().getElementType(), 1); elements.push_back(builder->create<ConstantOp>(loc, one)); continue; } // Extract from one of the shapes, accounting for the reverse indexing // performed by broadcast. Value index = builder->create<ConstantIndexOp>( loc, i - maxRank + shape_and_rank_for_dim[i].second); elements.push_back(builder->create<tensor::ExtractOp>( loc, shape_and_rank_for_dim[i].first, index)); } return Value(builder->create<tensor::FromElementsOp>(loc, elements)); }
131837408517580503230068988683463768929
tf_cpurt_symbolic_shape_optimization.cc
61127670286277963749447708912499557476
CWE-754
CVE-2022-23593
Tensorflow is an Open Source Machine Learning Framework. The `simplifyBroadcast` function in the MLIR-TFRT infrastructure in TensorFlow is vulnerable to a segfault (hence, denial of service), if called with scalar shapes. If all shapes are scalar, then `maxRank` is 0, so we build an empty `SmallVector`. The fix will be included in TensorFlow 2.8.0. This is the only affected version.
https://nvd.nist.gov/vuln/detail/CVE-2022-23593
195,230
pjproject
f74c1fc22b760d2a24369aa72c74c4a9ab985859
https://github.com/pjsip/pjproject
https://github.com/pjsip/pjproject/commit/f74c1fc22b760d2a24369aa72c74c4a9ab985859
Merge pull request from GHSA-r374-qrwv-86hh
1
void pjmedia_rtcp_xr_rx_rtcp_xr( pjmedia_rtcp_xr_session *sess, const void *pkt, pj_size_t size) { const pjmedia_rtcp_xr_pkt *rtcp_xr = (pjmedia_rtcp_xr_pkt*) pkt; const pjmedia_rtcp_xr_rb_rr_time *rb_rr_time = NULL; const pjmedia_rtcp_xr_rb_dlrr *rb_dlrr = NULL; const pjmedia_rtcp_xr_rb_stats *rb_stats = NULL; const pjmedia_rtcp_xr_rb_voip_mtc *rb_voip_mtc = NULL; const pjmedia_rtcp_xr_rb_header *rb_hdr = (pjmedia_rtcp_xr_rb_header*) rtcp_xr->buf; unsigned pkt_len, rb_len; if (rtcp_xr->common.pt != RTCP_XR) return; pkt_len = pj_ntohs((pj_uint16_t)rtcp_xr->common.length); if ((pkt_len + 1) > (size / 4)) return; /* Parse report rpt_types */ while ((pj_int32_t*)rb_hdr < (pj_int32_t*)pkt + pkt_len) { rb_len = pj_ntohs((pj_uint16_t)rb_hdr->length); /* Just skip any block with length == 0 (no report content) */ if (rb_len) { switch (rb_hdr->bt) { case BT_RR_TIME: rb_rr_time = (pjmedia_rtcp_xr_rb_rr_time*) rb_hdr; break; case BT_DLRR: rb_dlrr = (pjmedia_rtcp_xr_rb_dlrr*) rb_hdr; break; case BT_STATS: rb_stats = (pjmedia_rtcp_xr_rb_stats*) rb_hdr; break; case BT_VOIP_METRICS: rb_voip_mtc = (pjmedia_rtcp_xr_rb_voip_mtc*) rb_hdr; break; default: break; } } rb_hdr = (pjmedia_rtcp_xr_rb_header*) ((pj_int32_t*)rb_hdr + rb_len + 1); } /* Receiving RR Time */ if (rb_rr_time) { /* Save LRR from NTP timestamp of the RR time block report */ sess->rx_lrr = ((pj_ntohl(rb_rr_time->ntp_sec) & 0x0000FFFF) << 16) | ((pj_ntohl(rb_rr_time->ntp_frac) >> 16) & 0xFFFF); /* Calculate RR arrival time for DLRR */ pj_get_timestamp(&sess->rx_lrr_time); TRACE_((sess->name, "Rx RTCP SR: ntp_ts=%p", sess->rx_lrr, (pj_uint32_t)(sess->rx_lrr_time.u64*65536/ sess->rtcp_session->ts_freq.u64))); } /* Receiving DLRR */ if (rb_dlrr) { pj_uint32_t lrr, now, dlrr; pj_uint64_t eedelay; pjmedia_rtcp_ntp_rec ntp; /* LRR is the middle 32bit of NTP. It has 1/65536 second * resolution */ lrr = pj_ntohl(rb_dlrr->item.lrr); /* DLRR is delay since LRR, also in 1/65536 resolution */ dlrr = pj_ntohl(rb_dlrr->item.dlrr); /* Get current time, and convert to 1/65536 resolution */ pjmedia_rtcp_get_ntp_time(sess->rtcp_session, &ntp); now = ((ntp.hi & 0xFFFF) << 16) + (ntp.lo >> 16); /* End-to-end delay is (now-lrr-dlrr) */ eedelay = now - lrr - dlrr; /* Convert end to end delay to usec (keeping the calculation in * 64bit space):: * sess->ee_delay = (eedelay * 1000) / 65536; */ if (eedelay < 4294) { eedelay = (eedelay * 1000000) >> 16; } else { eedelay = (eedelay * 1000) >> 16; eedelay *= 1000; } TRACE_((sess->name, "Rx RTCP XR DLRR: lrr=%p, dlrr=%p (%d:%03dms), " "now=%p, rtt=%p", lrr, dlrr, dlrr/65536, (dlrr%65536)*1000/65536, now, (pj_uint32_t)eedelay)); /* Only save calculation if "now" is greater than lrr, or * otherwise rtt will be invalid */ if (now-dlrr >= lrr) { unsigned rtt = (pj_uint32_t)eedelay; /* Check that eedelay value really makes sense. * We allow up to 30 seconds RTT! */ if (eedelay <= 30 * 1000 * 1000UL) { /* "Normalize" rtt value that is exceptionally high. * For such values, "normalize" the rtt to be three times * the average value. */ if (rtt>((unsigned)sess->stat.rtt.mean*3) && sess->stat.rtt.n!=0) { unsigned orig_rtt = rtt; rtt = (unsigned)sess->stat.rtt.mean*3; PJ_LOG(5,(sess->name, "RTT value %d usec is normalized to %d usec", orig_rtt, rtt)); } TRACE_((sess->name, "RTCP RTT is set to %d usec", rtt)); pj_math_stat_update(&sess->stat.rtt, rtt); } } else { PJ_LOG(5, (sess->name, "Internal RTCP NTP clock skew detected: " "lrr=%p, now=%p, dlrr=%p (%d:%03dms), " "diff=%d", lrr, now, dlrr, dlrr/65536, (dlrr%65536)*1000/65536, dlrr-(now-lrr))); } } /* Receiving Statistics Summary */ if (rb_stats) { pj_uint8_t flags = rb_stats->header.specific; pj_bzero(&sess->stat.tx.stat_sum, sizeof(sess->stat.tx.stat_sum)); /* Range of packets sequence reported in this blocks */ sess->stat.tx.stat_sum.begin_seq = pj_ntohs(rb_stats->begin_seq); sess->stat.tx.stat_sum.end_seq = pj_ntohs(rb_stats->end_seq); /* Get flags of valid fields */ sess->stat.tx.stat_sum.l = (flags & (1 << 7)) != 0; sess->stat.tx.stat_sum.d = (flags & (1 << 6)) != 0; sess->stat.tx.stat_sum.j = (flags & (1 << 5)) != 0; sess->stat.tx.stat_sum.t = (flags & (3 << 3)) != 0; /* Fetch the reports info */ if (sess->stat.tx.stat_sum.l) { sess->stat.tx.stat_sum.lost = pj_ntohl(rb_stats->lost); } if (sess->stat.tx.stat_sum.d) { sess->stat.tx.stat_sum.dup = pj_ntohl(rb_stats->dup); } if (sess->stat.tx.stat_sum.j) { sess->stat.tx.stat_sum.jitter.min = pj_ntohl(rb_stats->jitter_min); sess->stat.tx.stat_sum.jitter.max = pj_ntohl(rb_stats->jitter_max); sess->stat.tx.stat_sum.jitter.mean= pj_ntohl(rb_stats->jitter_mean); pj_math_stat_set_stddev(&sess->stat.tx.stat_sum.jitter, pj_ntohl(rb_stats->jitter_dev)); } if (sess->stat.tx.stat_sum.t) { sess->stat.tx.stat_sum.toh.min = rb_stats->toh_min; sess->stat.tx.stat_sum.toh.max = rb_stats->toh_max; sess->stat.tx.stat_sum.toh.mean= rb_stats->toh_mean; pj_math_stat_set_stddev(&sess->stat.tx.stat_sum.toh, pj_ntohl(rb_stats->toh_dev)); } pj_gettimeofday(&sess->stat.tx.stat_sum.update); } /* Receiving VoIP Metrics */ if (rb_voip_mtc) { sess->stat.tx.voip_mtc.loss_rate = rb_voip_mtc->loss_rate; sess->stat.tx.voip_mtc.discard_rate = rb_voip_mtc->discard_rate; sess->stat.tx.voip_mtc.burst_den = rb_voip_mtc->burst_den; sess->stat.tx.voip_mtc.gap_den = rb_voip_mtc->gap_den; sess->stat.tx.voip_mtc.burst_dur = pj_ntohs(rb_voip_mtc->burst_dur); sess->stat.tx.voip_mtc.gap_dur = pj_ntohs(rb_voip_mtc->gap_dur); sess->stat.tx.voip_mtc.rnd_trip_delay = pj_ntohs(rb_voip_mtc->rnd_trip_delay); sess->stat.tx.voip_mtc.end_sys_delay = pj_ntohs(rb_voip_mtc->end_sys_delay); /* signal & noise level encoded in two's complement form */ sess->stat.tx.voip_mtc.signal_lvl = (pj_int8_t) ((rb_voip_mtc->signal_lvl > 127)? ((int)rb_voip_mtc->signal_lvl - 256) : rb_voip_mtc->signal_lvl); sess->stat.tx.voip_mtc.noise_lvl = (pj_int8_t) ((rb_voip_mtc->noise_lvl > 127)? ((int)rb_voip_mtc->noise_lvl - 256) : rb_voip_mtc->noise_lvl); sess->stat.tx.voip_mtc.rerl = rb_voip_mtc->rerl; sess->stat.tx.voip_mtc.gmin = rb_voip_mtc->gmin; sess->stat.tx.voip_mtc.r_factor = rb_voip_mtc->r_factor; sess->stat.tx.voip_mtc.ext_r_factor = rb_voip_mtc->ext_r_factor; sess->stat.tx.voip_mtc.mos_lq = rb_voip_mtc->mos_lq; sess->stat.tx.voip_mtc.mos_cq = rb_voip_mtc->mos_cq; sess->stat.tx.voip_mtc.rx_config = rb_voip_mtc->rx_config; sess->stat.tx.voip_mtc.jb_nom = pj_ntohs(rb_voip_mtc->jb_nom); sess->stat.tx.voip_mtc.jb_max = pj_ntohs(rb_voip_mtc->jb_max); sess->stat.tx.voip_mtc.jb_abs_max = pj_ntohs(rb_voip_mtc->jb_abs_max); pj_gettimeofday(&sess->stat.tx.voip_mtc.update); } }
128531615202269817130665554219664776865
rtcp_xr.c
114410540091951766279707779044798368853
CWE-125
CVE-2021-43845
PJSIP is a free and open source multimedia communication library. In version 2.11.1 and prior, if incoming RTCP XR message contain block, the data field is not checked against the received packet size, potentially resulting in an out-of-bound read access. This affects all users that use PJMEDIA and RTCP XR. A malicious actor can send a RTCP XR message with an invalid packet size.
https://nvd.nist.gov/vuln/detail/CVE-2021-43845
222,737
pjproject
f74c1fc22b760d2a24369aa72c74c4a9ab985859
https://github.com/pjsip/pjproject
https://github.com/pjsip/pjproject/commit/f74c1fc22b760d2a24369aa72c74c4a9ab985859
Merge pull request from GHSA-r374-qrwv-86hh
0
void pjmedia_rtcp_xr_rx_rtcp_xr( pjmedia_rtcp_xr_session *sess, const void *pkt, pj_size_t size) { const pjmedia_rtcp_xr_pkt *rtcp_xr = (pjmedia_rtcp_xr_pkt*) pkt; const pjmedia_rtcp_xr_rb_rr_time *rb_rr_time = NULL; const pjmedia_rtcp_xr_rb_dlrr *rb_dlrr = NULL; const pjmedia_rtcp_xr_rb_stats *rb_stats = NULL; const pjmedia_rtcp_xr_rb_voip_mtc *rb_voip_mtc = NULL; const pjmedia_rtcp_xr_rb_header *rb_hdr = (pjmedia_rtcp_xr_rb_header*) rtcp_xr->buf; unsigned pkt_len, rb_len; if (rtcp_xr->common.pt != RTCP_XR) return; pkt_len = pj_ntohs((pj_uint16_t)rtcp_xr->common.length); if ((pkt_len + 1) > (size / 4)) return; /* Parse report rpt_types */ while ((pj_int32_t*)rb_hdr < (pj_int32_t*)pkt + pkt_len) { rb_len = pj_ntohs((pj_uint16_t)rb_hdr->length); /* Just skip any block with length == 0 (no report content) */ if (rb_len) { switch (rb_hdr->bt) { case BT_RR_TIME: if ((char*)rb_hdr + sizeof(*rb_rr_time) <= (char*)pkt + size) { rb_rr_time = (pjmedia_rtcp_xr_rb_rr_time*)rb_hdr; } break; case BT_DLRR: if ((char*)rb_hdr + sizeof(*rb_dlrr) <= (char*)pkt + size) { rb_dlrr = (pjmedia_rtcp_xr_rb_dlrr*)rb_hdr; } break; case BT_STATS: if ((char*)rb_hdr + sizeof(*rb_stats) <= (char*)pkt + size) { rb_stats = (pjmedia_rtcp_xr_rb_stats*)rb_hdr; } break; case BT_VOIP_METRICS: if ((char*)rb_hdr + sizeof(*rb_voip_mtc) <= (char*)pkt + size) { rb_voip_mtc = (pjmedia_rtcp_xr_rb_voip_mtc*)rb_hdr; } break; default: break; } } rb_hdr = (pjmedia_rtcp_xr_rb_header*) ((pj_int32_t*)rb_hdr + rb_len + 1); } /* Receiving RR Time */ if (rb_rr_time) { /* Save LRR from NTP timestamp of the RR time block report */ sess->rx_lrr = ((pj_ntohl(rb_rr_time->ntp_sec) & 0x0000FFFF) << 16) | ((pj_ntohl(rb_rr_time->ntp_frac) >> 16) & 0xFFFF); /* Calculate RR arrival time for DLRR */ pj_get_timestamp(&sess->rx_lrr_time); TRACE_((sess->name, "Rx RTCP SR: ntp_ts=%p", sess->rx_lrr, (pj_uint32_t)(sess->rx_lrr_time.u64*65536/ sess->rtcp_session->ts_freq.u64))); } /* Receiving DLRR */ if (rb_dlrr) { pj_uint32_t lrr, now, dlrr; pj_uint64_t eedelay; pjmedia_rtcp_ntp_rec ntp; /* LRR is the middle 32bit of NTP. It has 1/65536 second * resolution */ lrr = pj_ntohl(rb_dlrr->item.lrr); /* DLRR is delay since LRR, also in 1/65536 resolution */ dlrr = pj_ntohl(rb_dlrr->item.dlrr); /* Get current time, and convert to 1/65536 resolution */ pjmedia_rtcp_get_ntp_time(sess->rtcp_session, &ntp); now = ((ntp.hi & 0xFFFF) << 16) + (ntp.lo >> 16); /* End-to-end delay is (now-lrr-dlrr) */ eedelay = now - lrr - dlrr; /* Convert end to end delay to usec (keeping the calculation in * 64bit space):: * sess->ee_delay = (eedelay * 1000) / 65536; */ if (eedelay < 4294) { eedelay = (eedelay * 1000000) >> 16; } else { eedelay = (eedelay * 1000) >> 16; eedelay *= 1000; } TRACE_((sess->name, "Rx RTCP XR DLRR: lrr=%p, dlrr=%p (%d:%03dms), " "now=%p, rtt=%p", lrr, dlrr, dlrr/65536, (dlrr%65536)*1000/65536, now, (pj_uint32_t)eedelay)); /* Only save calculation if "now" is greater than lrr, or * otherwise rtt will be invalid */ if (now-dlrr >= lrr) { unsigned rtt = (pj_uint32_t)eedelay; /* Check that eedelay value really makes sense. * We allow up to 30 seconds RTT! */ if (eedelay <= 30 * 1000 * 1000UL) { /* "Normalize" rtt value that is exceptionally high. * For such values, "normalize" the rtt to be three times * the average value. */ if (rtt>((unsigned)sess->stat.rtt.mean*3) && sess->stat.rtt.n!=0) { unsigned orig_rtt = rtt; rtt = (unsigned)sess->stat.rtt.mean*3; PJ_LOG(5,(sess->name, "RTT value %d usec is normalized to %d usec", orig_rtt, rtt)); } TRACE_((sess->name, "RTCP RTT is set to %d usec", rtt)); pj_math_stat_update(&sess->stat.rtt, rtt); } } else { PJ_LOG(5, (sess->name, "Internal RTCP NTP clock skew detected: " "lrr=%p, now=%p, dlrr=%p (%d:%03dms), " "diff=%d", lrr, now, dlrr, dlrr/65536, (dlrr%65536)*1000/65536, dlrr-(now-lrr))); } } /* Receiving Statistics Summary */ if (rb_stats) { pj_uint8_t flags = rb_stats->header.specific; pj_bzero(&sess->stat.tx.stat_sum, sizeof(sess->stat.tx.stat_sum)); /* Range of packets sequence reported in this blocks */ sess->stat.tx.stat_sum.begin_seq = pj_ntohs(rb_stats->begin_seq); sess->stat.tx.stat_sum.end_seq = pj_ntohs(rb_stats->end_seq); /* Get flags of valid fields */ sess->stat.tx.stat_sum.l = (flags & (1 << 7)) != 0; sess->stat.tx.stat_sum.d = (flags & (1 << 6)) != 0; sess->stat.tx.stat_sum.j = (flags & (1 << 5)) != 0; sess->stat.tx.stat_sum.t = (flags & (3 << 3)) != 0; /* Fetch the reports info */ if (sess->stat.tx.stat_sum.l) { sess->stat.tx.stat_sum.lost = pj_ntohl(rb_stats->lost); } if (sess->stat.tx.stat_sum.d) { sess->stat.tx.stat_sum.dup = pj_ntohl(rb_stats->dup); } if (sess->stat.tx.stat_sum.j) { sess->stat.tx.stat_sum.jitter.min = pj_ntohl(rb_stats->jitter_min); sess->stat.tx.stat_sum.jitter.max = pj_ntohl(rb_stats->jitter_max); sess->stat.tx.stat_sum.jitter.mean= pj_ntohl(rb_stats->jitter_mean); pj_math_stat_set_stddev(&sess->stat.tx.stat_sum.jitter, pj_ntohl(rb_stats->jitter_dev)); } if (sess->stat.tx.stat_sum.t) { sess->stat.tx.stat_sum.toh.min = rb_stats->toh_min; sess->stat.tx.stat_sum.toh.max = rb_stats->toh_max; sess->stat.tx.stat_sum.toh.mean= rb_stats->toh_mean; pj_math_stat_set_stddev(&sess->stat.tx.stat_sum.toh, pj_ntohl(rb_stats->toh_dev)); } pj_gettimeofday(&sess->stat.tx.stat_sum.update); } /* Receiving VoIP Metrics */ if (rb_voip_mtc) { sess->stat.tx.voip_mtc.loss_rate = rb_voip_mtc->loss_rate; sess->stat.tx.voip_mtc.discard_rate = rb_voip_mtc->discard_rate; sess->stat.tx.voip_mtc.burst_den = rb_voip_mtc->burst_den; sess->stat.tx.voip_mtc.gap_den = rb_voip_mtc->gap_den; sess->stat.tx.voip_mtc.burst_dur = pj_ntohs(rb_voip_mtc->burst_dur); sess->stat.tx.voip_mtc.gap_dur = pj_ntohs(rb_voip_mtc->gap_dur); sess->stat.tx.voip_mtc.rnd_trip_delay = pj_ntohs(rb_voip_mtc->rnd_trip_delay); sess->stat.tx.voip_mtc.end_sys_delay = pj_ntohs(rb_voip_mtc->end_sys_delay); /* signal & noise level encoded in two's complement form */ sess->stat.tx.voip_mtc.signal_lvl = (pj_int8_t) ((rb_voip_mtc->signal_lvl > 127)? ((int)rb_voip_mtc->signal_lvl - 256) : rb_voip_mtc->signal_lvl); sess->stat.tx.voip_mtc.noise_lvl = (pj_int8_t) ((rb_voip_mtc->noise_lvl > 127)? ((int)rb_voip_mtc->noise_lvl - 256) : rb_voip_mtc->noise_lvl); sess->stat.tx.voip_mtc.rerl = rb_voip_mtc->rerl; sess->stat.tx.voip_mtc.gmin = rb_voip_mtc->gmin; sess->stat.tx.voip_mtc.r_factor = rb_voip_mtc->r_factor; sess->stat.tx.voip_mtc.ext_r_factor = rb_voip_mtc->ext_r_factor; sess->stat.tx.voip_mtc.mos_lq = rb_voip_mtc->mos_lq; sess->stat.tx.voip_mtc.mos_cq = rb_voip_mtc->mos_cq; sess->stat.tx.voip_mtc.rx_config = rb_voip_mtc->rx_config; sess->stat.tx.voip_mtc.jb_nom = pj_ntohs(rb_voip_mtc->jb_nom); sess->stat.tx.voip_mtc.jb_max = pj_ntohs(rb_voip_mtc->jb_max); sess->stat.tx.voip_mtc.jb_abs_max = pj_ntohs(rb_voip_mtc->jb_abs_max); pj_gettimeofday(&sess->stat.tx.voip_mtc.update); } }
134123814969944330757535232446891920936
rtcp_xr.c
238632209992792964107720594546671940595
CWE-125
CVE-2021-43845
PJSIP is a free and open source multimedia communication library. In version 2.11.1 and prior, if incoming RTCP XR message contain block, the data field is not checked against the received packet size, potentially resulting in an out-of-bound read access. This affects all users that use PJMEDIA and RTCP XR. A malicious actor can send a RTCP XR message with an invalid packet size.
https://nvd.nist.gov/vuln/detail/CVE-2021-43845
195,231
gpac
893fb99b606eebfae46cde151846a980e689039b
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/893fb99b606eebfae46cde151846a980e689039b
fixed #1902
1
s32 gf_avc_parse_nalu(GF_BitStream *bs, AVCState *avc) { u8 idr_flag; s32 slice, ret; u32 nal_hdr; AVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nal_hdr = gf_bs_read_u8(bs); slice = 0; memcpy(&n_state, &avc->s_info, sizeof(AVCSliceInfo)); avc->last_nal_type_parsed = n_state.nal_unit_type = nal_hdr & 0x1F; n_state.nal_ref_idc = (nal_hdr >> 5) & 0x3; idr_flag = 0; switch (n_state.nal_unit_type) { case GF_AVC_NALU_ACCESS_UNIT: case GF_AVC_NALU_END_OF_SEQ: case GF_AVC_NALU_END_OF_STREAM: ret = 1; break; case GF_AVC_NALU_SVC_SLICE: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); // slice buffer - read the info and compare. /*ret = */svc_parse_slice(bs, avc, &n_state); if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } avc_compute_poc(&n_state); if (avc->s_info.poc != n_state.poc) { memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 1; } memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 0; case GF_AVC_NALU_SVC_PREFIX_NALU: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); return 0; case GF_AVC_NALU_IDR_SLICE: case GF_AVC_NALU_NON_IDR_SLICE: case GF_AVC_NALU_DP_A_SLICE: case GF_AVC_NALU_DP_B_SLICE: case GF_AVC_NALU_DP_C_SLICE: slice = 1; /* slice buffer - read the info and compare.*/ ret = avc_parse_slice(bs, avc, idr_flag, &n_state); if (ret < 0) return ret; ret = 0; if ( ((avc->s_info.nal_unit_type > GF_AVC_NALU_IDR_SLICE) || (avc->s_info.nal_unit_type < GF_AVC_NALU_NON_IDR_SLICE)) && (avc->s_info.nal_unit_type != GF_AVC_NALU_SVC_SLICE) ) { break; } if (avc->s_info.frame_num != n_state.frame_num) { ret = 1; break; } if (avc->s_info.field_pic_flag != n_state.field_pic_flag) { ret = 1; break; } if ((avc->s_info.nal_ref_idc != n_state.nal_ref_idc) && (!avc->s_info.nal_ref_idc || !n_state.nal_ref_idc)) { ret = 1; break; } assert(avc->s_info.sps); if (avc->s_info.sps->poc_type == n_state.sps->poc_type) { if (!avc->s_info.sps->poc_type) { if (!n_state.bottom_field_flag && (avc->s_info.poc_lsb != n_state.poc_lsb)) { ret = 1; break; } if (avc->s_info.delta_poc_bottom != n_state.delta_poc_bottom) { ret = 1; break; } } else if (avc->s_info.sps->poc_type == 1) { if (avc->s_info.delta_poc[0] != n_state.delta_poc[0]) { ret = 1; break; } if (avc->s_info.delta_poc[1] != n_state.delta_poc[1]) { ret = 1; break; } } } if (n_state.nal_unit_type == GF_AVC_NALU_IDR_SLICE) { if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) { /*IdrPicFlag differs in value*/ ret = 1; break; } else if (avc->s_info.idr_pic_id != n_state.idr_pic_id) { /*both IDR and idr_pic_id differs*/ ret = 1; break; } } break; case GF_AVC_NALU_SEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 0, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_PIC_PARAM: avc->last_ps_idx = gf_avc_read_pps_bs_internal(bs, avc, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SVC_SUBSEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 1, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEQ_PARAM_EXT: avc->last_ps_idx = (s32) gf_bs_read_ue(bs); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEI: case GF_AVC_NALU_FILLER_DATA: return 0; default: if (avc->s_info.nal_unit_type <= GF_AVC_NALU_IDR_SLICE) ret = 1; //To detect change of AU when multiple sps and pps in stream else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEI && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEQ_PARAM && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else ret = 0; break; } /* save _prev values */ if (ret && avc->s_info.sps) { n_state.frame_num_offset_prev = avc->s_info.frame_num_offset; if ((avc->s_info.sps->poc_type != 2) || (avc->s_info.nal_ref_idc != 0)) n_state.frame_num_prev = avc->s_info.frame_num; if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } } if (slice) avc_compute_poc(&n_state); memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return ret; }
99100226875075764129164909998725433232
av_parsers.c
168517587328341017594269375399465893964
CWE-476
CVE-2021-40565
A Segmentation fault caused by a null pointer dereference vulnerability exists in Gpac through 1.0.1 via the gf_avc_parse_nalu function in av_parsers.c when using mp4box, which causes a denial of service.
https://nvd.nist.gov/vuln/detail/CVE-2021-40565
222,739
gpac
893fb99b606eebfae46cde151846a980e689039b
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/893fb99b606eebfae46cde151846a980e689039b
fixed #1902
0
s32 gf_avc_parse_nalu(GF_BitStream *bs, AVCState *avc) { u8 idr_flag; s32 slice, ret; u32 nal_hdr; AVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nal_hdr = gf_bs_read_u8(bs); slice = 0; memcpy(&n_state, &avc->s_info, sizeof(AVCSliceInfo)); avc->last_nal_type_parsed = n_state.nal_unit_type = nal_hdr & 0x1F; n_state.nal_ref_idc = (nal_hdr >> 5) & 0x3; idr_flag = 0; switch (n_state.nal_unit_type) { case GF_AVC_NALU_ACCESS_UNIT: case GF_AVC_NALU_END_OF_SEQ: case GF_AVC_NALU_END_OF_STREAM: ret = 1; break; case GF_AVC_NALU_SVC_SLICE: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); // slice buffer - read the info and compare. /*ret = */svc_parse_slice(bs, avc, &n_state); if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } avc_compute_poc(&n_state); if (avc->s_info.poc != n_state.poc) { memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 1; } memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 0; case GF_AVC_NALU_SVC_PREFIX_NALU: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); return 0; case GF_AVC_NALU_IDR_SLICE: case GF_AVC_NALU_NON_IDR_SLICE: case GF_AVC_NALU_DP_A_SLICE: case GF_AVC_NALU_DP_B_SLICE: case GF_AVC_NALU_DP_C_SLICE: slice = 1; /* slice buffer - read the info and compare.*/ ret = avc_parse_slice(bs, avc, idr_flag, &n_state); if (ret < 0) return ret; ret = 0; if ( ((avc->s_info.nal_unit_type > GF_AVC_NALU_IDR_SLICE) || (avc->s_info.nal_unit_type < GF_AVC_NALU_NON_IDR_SLICE)) && (avc->s_info.nal_unit_type != GF_AVC_NALU_SVC_SLICE) ) { break; } if (avc->s_info.frame_num != n_state.frame_num) { ret = 1; break; } if (avc->s_info.field_pic_flag != n_state.field_pic_flag) { ret = 1; break; } if ((avc->s_info.nal_ref_idc != n_state.nal_ref_idc) && (!avc->s_info.nal_ref_idc || !n_state.nal_ref_idc)) { ret = 1; break; } if (!avc->s_info.sps) return -1; if (avc->s_info.sps->poc_type == n_state.sps->poc_type) { if (!avc->s_info.sps->poc_type) { if (!n_state.bottom_field_flag && (avc->s_info.poc_lsb != n_state.poc_lsb)) { ret = 1; break; } if (avc->s_info.delta_poc_bottom != n_state.delta_poc_bottom) { ret = 1; break; } } else if (avc->s_info.sps->poc_type == 1) { if (avc->s_info.delta_poc[0] != n_state.delta_poc[0]) { ret = 1; break; } if (avc->s_info.delta_poc[1] != n_state.delta_poc[1]) { ret = 1; break; } } } if (n_state.nal_unit_type == GF_AVC_NALU_IDR_SLICE) { if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) { /*IdrPicFlag differs in value*/ ret = 1; break; } else if (avc->s_info.idr_pic_id != n_state.idr_pic_id) { /*both IDR and idr_pic_id differs*/ ret = 1; break; } } break; case GF_AVC_NALU_SEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 0, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_PIC_PARAM: avc->last_ps_idx = gf_avc_read_pps_bs_internal(bs, avc, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SVC_SUBSEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 1, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEQ_PARAM_EXT: avc->last_ps_idx = (s32) gf_bs_read_ue(bs); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEI: case GF_AVC_NALU_FILLER_DATA: return 0; default: if (avc->s_info.nal_unit_type <= GF_AVC_NALU_IDR_SLICE) ret = 1; //To detect change of AU when multiple sps and pps in stream else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEI && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEQ_PARAM && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else ret = 0; break; } /* save _prev values */ if (ret && avc->s_info.sps) { n_state.frame_num_offset_prev = avc->s_info.frame_num_offset; if ((avc->s_info.sps->poc_type != 2) || (avc->s_info.nal_ref_idc != 0)) n_state.frame_num_prev = avc->s_info.frame_num; if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } } if (slice) avc_compute_poc(&n_state); memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return ret; }
151148020238252563376772611575796474624
av_parsers.c
336095072032702615903888752582747164805
CWE-476
CVE-2021-40565
A Segmentation fault caused by a null pointer dereference vulnerability exists in Gpac through 1.0.1 via the gf_avc_parse_nalu function in av_parsers.c when using mp4box, which causes a denial of service.
https://nvd.nist.gov/vuln/detail/CVE-2021-40565
195,237
ImageMagick
f221ea0fa3171f0f4fdf74ac9d81b203b9534c23
https://github.com/ImageMagick/ImageMagick
https://github.com/ImageMagick/ImageMagick/commit/f221ea0fa3171f0f4fdf74ac9d81b203b9534c23
Fixes #4985: 4e+26 is outside the range of representable values of type 'unsigned long' at coders/pcl.c:299 (#4986) * fix Division by zero in XMenuWidget() of MagickCore/widget.c * Fix memory leak in AnimateImageCommand() of MagickWand/animate.c and DisplayImageCommand() of MagickWand/display.c * fix Division by zero in ReadEnhMetaFile() of coders/emf.c * Resolve conflicts * fix issue: outside the range of representable values of type 'unsigned char' at coders/psd.c:1025 * fix error: 4e+26 is outside the range of representable values of type 'unsigned long' at coders/pcl.c:299 Co-authored-by: zhailiangliang <zhailiangliang@loongson.cn>
1
static Image *ReadPCLImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define CropBox "CropBox" #define DeviceCMYK "DeviceCMYK" #define MediaBox "MediaBox" #define RenderPCLText " Rendering PCL... " char command[MagickPathExtent], *density, filename[MagickPathExtent], geometry[MagickPathExtent], *options, input_filename[MagickPathExtent]; const DelegateInfo *delegate_info; Image *image, *next_image; ImageInfo *read_info; MagickBooleanType cmyk, status; PointInfo delta; RectangleInfo bounding_box, page; char *p; ssize_t c; SegmentInfo bounds; size_t height, width; ssize_t count; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Open image file. */ image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } status=AcquireUniqueSymbolicLink(image_info->filename,input_filename); if (status == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile", image_info->filename); image=DestroyImageList(image); return((Image *) NULL); } /* Set the page density. */ delta.x=DefaultResolution; delta.y=DefaultResolution; if ((image->resolution.x == 0.0) || (image->resolution.y == 0.0)) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(PSDensityGeometry,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } /* Determine page geometry from the PCL media box. */ cmyk=image->colorspace == CMYKColorspace ? MagickTrue : MagickFalse; count=0; (void) memset(&bounding_box,0,sizeof(bounding_box)); (void) memset(&bounds,0,sizeof(bounds)); (void) memset(&page,0,sizeof(page)); (void) memset(command,0,sizeof(command)); p=command; for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image)) { if (image_info->page != (char *) NULL) continue; /* Note PCL elements. */ *p++=(char) c; if ((c != (int) '/') && (c != '\n') && ((size_t) (p-command) < (MagickPathExtent-1))) continue; *p='\0'; p=command; /* Is this a CMYK document? */ if (LocaleNCompare(DeviceCMYK,command,strlen(DeviceCMYK)) == 0) cmyk=MagickTrue; if (LocaleNCompare(CropBox,command,strlen(CropBox)) == 0) { /* Note region defined by crop box. */ count=(ssize_t) sscanf(command,"CropBox [%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); if (count != 4) count=(ssize_t) sscanf(command,"CropBox[%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); } if (LocaleNCompare(MediaBox,command,strlen(MediaBox)) == 0) { /* Note region defined by media box. */ count=(ssize_t) sscanf(command,"MediaBox [%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); if (count != 4) count=(ssize_t) sscanf(command,"MediaBox[%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); } if (count != 4) continue; /* Set PCL render geometry. */ width=(size_t) floor(bounds.x2-bounds.x1+0.5); height=(size_t) floor(bounds.y2-bounds.y1+0.5); if (width > page.width) page.width=width; if (height > page.height) page.height=height; } (void) CloseBlob(image); /* Render PCL with the GhostPCL delegate. */ if ((page.width == 0) || (page.height == 0)) (void) ParseAbsoluteGeometry(PSPageGeometry,&page); if (image_info->page != (char *) NULL) (void) ParseAbsoluteGeometry(image_info->page,&page); (void) FormatLocaleString(geometry,MagickPathExtent,"%.20gx%.20g",(double) page.width,(double) page.height); if (image_info->monochrome != MagickFalse) delegate_info=GetDelegateInfo("pcl:mono",(char *) NULL,exception); else if (cmyk != MagickFalse) delegate_info=GetDelegateInfo("pcl:cmyk",(char *) NULL,exception); else delegate_info=GetDelegateInfo("pcl:color",(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { image=DestroyImage(image); return((Image *) NULL); } if ((page.width == 0) || (page.height == 0)) (void) ParseAbsoluteGeometry(PSPageGeometry,&page); if (image_info->page != (char *) NULL) (void) ParseAbsoluteGeometry(image_info->page,&page); density=AcquireString(""); options=AcquireString(""); (void) FormatLocaleString(density,MagickPathExtent,"%gx%g", image->resolution.x,image->resolution.y); if (image_info->ping != MagickFalse) (void) FormatLocaleString(density,MagickPathExtent,"2.0x2.0"); page.width=(size_t) floor(page.width*image->resolution.x/delta.x+0.5); page.height=(size_t) floor(page.height*image->resolution.y/delta.y+0.5); (void) FormatLocaleString(options,MagickPathExtent,"-g%.20gx%.20g ",(double) page.width,(double) page.height); image=DestroyImage(image); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; if (read_info->number_scenes != 0) { if (read_info->number_scenes != 1) (void) FormatLocaleString(options,MagickPathExtent,"-dLastPage=%.20g", (double) (read_info->scene+read_info->number_scenes)); else (void) FormatLocaleString(options,MagickPathExtent, "-dFirstPage=%.20g -dLastPage=%.20g",(double) read_info->scene+1, (double) (read_info->scene+read_info->number_scenes)); read_info->number_scenes=0; if (read_info->scenes != (char *) NULL) *read_info->scenes='\0'; } (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) AcquireUniqueFilename(read_info->filename); (void) FormatLocaleString(command,MagickPathExtent, GetDelegateCommands(delegate_info), read_info->antialias != MagickFalse ? 4 : 1, read_info->antialias != MagickFalse ? 4 : 1,density,options, read_info->filename,input_filename); options=DestroyString(options); density=DestroyString(density); status=ExternalDelegateCommand(MagickFalse,read_info->verbose,command, (char *) NULL,exception) != 0 ? MagickTrue : MagickFalse; image=ReadImage(read_info,exception); (void) RelinquishUniqueFileResource(read_info->filename); (void) RelinquishUniqueFileResource(input_filename); read_info=DestroyImageInfo(read_info); if (image == (Image *) NULL) ThrowReaderException(DelegateError,"PCLDelegateFailed"); if (LocaleCompare(image->magick,"BMP") == 0) { Image *cmyk_image; cmyk_image=ConsolidateCMYKImages(image,exception); if (cmyk_image != (Image *) NULL) { image=DestroyImageList(image); image=cmyk_image; } } do { (void) CopyMagickString(image->filename,filename,MagickPathExtent); image->page=page; if (image_info->ping != MagickFalse) { image->magick_columns*=image->resolution.x/2.0; image->magick_rows*=image->resolution.y/2.0; image->columns*=image->resolution.x/2.0; image->rows*=image->resolution.y/2.0; } next_image=SyncNextImageInList(image); if (next_image != (Image *) NULL) image=next_image; } while (next_image != (Image *) NULL); return(GetFirstImageInList(image)); }
164108098598115354275502589345492195560
pcl.c
226900089914426038554396055314138187051
CWE-190
CVE-2022-32546
A vulnerability was found in ImageMagick, causing an outside the range of representable values of type 'unsigned long' at coders/pcl.c, when crafted or untrusted input is processed. This leads to a negative impact to application availability or other problems related to undefined behavior.
https://nvd.nist.gov/vuln/detail/CVE-2022-32546
223,089
ImageMagick
f221ea0fa3171f0f4fdf74ac9d81b203b9534c23
https://github.com/ImageMagick/ImageMagick
https://github.com/ImageMagick/ImageMagick/commit/f221ea0fa3171f0f4fdf74ac9d81b203b9534c23
Fixes #4985: 4e+26 is outside the range of representable values of type 'unsigned long' at coders/pcl.c:299 (#4986) * fix Division by zero in XMenuWidget() of MagickCore/widget.c * Fix memory leak in AnimateImageCommand() of MagickWand/animate.c and DisplayImageCommand() of MagickWand/display.c * fix Division by zero in ReadEnhMetaFile() of coders/emf.c * Resolve conflicts * fix issue: outside the range of representable values of type 'unsigned char' at coders/psd.c:1025 * fix error: 4e+26 is outside the range of representable values of type 'unsigned long' at coders/pcl.c:299 Co-authored-by: zhailiangliang <zhailiangliang@loongson.cn>
0
static Image *ReadPCLImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define CropBox "CropBox" #define DeviceCMYK "DeviceCMYK" #define MediaBox "MediaBox" #define RenderPCLText " Rendering PCL... " char command[MagickPathExtent], *density, filename[MagickPathExtent], geometry[MagickPathExtent], *options, input_filename[MagickPathExtent]; const DelegateInfo *delegate_info; Image *image, *next_image; ImageInfo *read_info; MagickBooleanType cmyk, status; PointInfo delta; RectangleInfo bounding_box, page; char *p; ssize_t c; SegmentInfo bounds; size_t height, width; ssize_t count; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Open image file. */ image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } status=AcquireUniqueSymbolicLink(image_info->filename,input_filename); if (status == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile", image_info->filename); image=DestroyImageList(image); return((Image *) NULL); } /* Set the page density. */ delta.x=DefaultResolution; delta.y=DefaultResolution; if ((image->resolution.x == 0.0) || (image->resolution.y == 0.0)) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(PSDensityGeometry,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } /* Determine page geometry from the PCL media box. */ cmyk=image->colorspace == CMYKColorspace ? MagickTrue : MagickFalse; count=0; (void) memset(&bounding_box,0,sizeof(bounding_box)); (void) memset(&bounds,0,sizeof(bounds)); (void) memset(&page,0,sizeof(page)); (void) memset(command,0,sizeof(command)); p=command; for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image)) { if (image_info->page != (char *) NULL) continue; /* Note PCL elements. */ *p++=(char) c; if ((c != (int) '/') && (c != '\n') && ((size_t) (p-command) < (MagickPathExtent-1))) continue; *p='\0'; p=command; /* Is this a CMYK document? */ if (LocaleNCompare(DeviceCMYK,command,strlen(DeviceCMYK)) == 0) cmyk=MagickTrue; if (LocaleNCompare(CropBox,command,strlen(CropBox)) == 0) { /* Note region defined by crop box. */ count=(ssize_t) sscanf(command,"CropBox [%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); if (count != 4) count=(ssize_t) sscanf(command,"CropBox[%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); } if (LocaleNCompare(MediaBox,command,strlen(MediaBox)) == 0) { /* Note region defined by media box. */ count=(ssize_t) sscanf(command,"MediaBox [%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); if (count != 4) count=(ssize_t) sscanf(command,"MediaBox[%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); } if (count != 4) continue; /* Set PCL render geometry. */ width=(size_t)CastDoubleToLong(floor(bounds.x2-bounds.x1+0.5)); height=(size_t)CastDoubleToLong(floor(bounds.y2-bounds.y1+0.5)); if (width > page.width) page.width=width; if (height > page.height) page.height=height; } (void) CloseBlob(image); /* Render PCL with the GhostPCL delegate. */ if ((page.width == 0) || (page.height == 0)) (void) ParseAbsoluteGeometry(PSPageGeometry,&page); if (image_info->page != (char *) NULL) (void) ParseAbsoluteGeometry(image_info->page,&page); (void) FormatLocaleString(geometry,MagickPathExtent,"%.20gx%.20g",(double) page.width,(double) page.height); if (image_info->monochrome != MagickFalse) delegate_info=GetDelegateInfo("pcl:mono",(char *) NULL,exception); else if (cmyk != MagickFalse) delegate_info=GetDelegateInfo("pcl:cmyk",(char *) NULL,exception); else delegate_info=GetDelegateInfo("pcl:color",(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { image=DestroyImage(image); return((Image *) NULL); } if ((page.width == 0) || (page.height == 0)) (void) ParseAbsoluteGeometry(PSPageGeometry,&page); if (image_info->page != (char *) NULL) (void) ParseAbsoluteGeometry(image_info->page,&page); density=AcquireString(""); options=AcquireString(""); (void) FormatLocaleString(density,MagickPathExtent,"%gx%g", image->resolution.x,image->resolution.y); if (image_info->ping != MagickFalse) (void) FormatLocaleString(density,MagickPathExtent,"2.0x2.0"); page.width=(size_t) floor(page.width*image->resolution.x/delta.x+0.5); page.height=(size_t) floor(page.height*image->resolution.y/delta.y+0.5); (void) FormatLocaleString(options,MagickPathExtent,"-g%.20gx%.20g ",(double) page.width,(double) page.height); image=DestroyImage(image); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; if (read_info->number_scenes != 0) { if (read_info->number_scenes != 1) (void) FormatLocaleString(options,MagickPathExtent,"-dLastPage=%.20g", (double) (read_info->scene+read_info->number_scenes)); else (void) FormatLocaleString(options,MagickPathExtent, "-dFirstPage=%.20g -dLastPage=%.20g",(double) read_info->scene+1, (double) (read_info->scene+read_info->number_scenes)); read_info->number_scenes=0; if (read_info->scenes != (char *) NULL) *read_info->scenes='\0'; } (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) AcquireUniqueFilename(read_info->filename); (void) FormatLocaleString(command,MagickPathExtent, GetDelegateCommands(delegate_info), read_info->antialias != MagickFalse ? 4 : 1, read_info->antialias != MagickFalse ? 4 : 1,density,options, read_info->filename,input_filename); options=DestroyString(options); density=DestroyString(density); status=ExternalDelegateCommand(MagickFalse,read_info->verbose,command, (char *) NULL,exception) != 0 ? MagickTrue : MagickFalse; image=ReadImage(read_info,exception); (void) RelinquishUniqueFileResource(read_info->filename); (void) RelinquishUniqueFileResource(input_filename); read_info=DestroyImageInfo(read_info); if (image == (Image *) NULL) ThrowReaderException(DelegateError,"PCLDelegateFailed"); if (LocaleCompare(image->magick,"BMP") == 0) { Image *cmyk_image; cmyk_image=ConsolidateCMYKImages(image,exception); if (cmyk_image != (Image *) NULL) { image=DestroyImageList(image); image=cmyk_image; } } do { (void) CopyMagickString(image->filename,filename,MagickPathExtent); image->page=page; if (image_info->ping != MagickFalse) { image->magick_columns*=image->resolution.x/2.0; image->magick_rows*=image->resolution.y/2.0; image->columns*=image->resolution.x/2.0; image->rows*=image->resolution.y/2.0; } next_image=SyncNextImageInList(image); if (next_image != (Image *) NULL) image=next_image; } while (next_image != (Image *) NULL); return(GetFirstImageInList(image)); }
19106273782202991773902274267597206156
pcl.c
107050694639473008713363285641232916868
CWE-190
CVE-2022-32546
A vulnerability was found in ImageMagick, causing an outside the range of representable values of type 'unsigned long' at coders/pcl.c, when crafted or untrusted input is processed. This leads to a negative impact to application availability or other problems related to undefined behavior.
https://nvd.nist.gov/vuln/detail/CVE-2022-32546
195,264
pcre2
d4fa336fbcc388f89095b184ba6d99422cfc676c
https://github.com/PCRE2Project/pcre2
https://github.com/PCRE2Project/pcre2/commit/d4fa336fbcc388f89095b184ba6d99422cfc676c
Fix incorrect value reading in JIT.
1
static void compile_xclass_matchingpath(compiler_common *common, PCRE2_SPTR cc, jump_list **backtracks) { DEFINE_COMPILER; jump_list *found = NULL; jump_list **list = (cc[0] & XCL_NOT) == 0 ? &found : backtracks; sljit_uw c, charoffset, max = 256, min = READ_CHAR_MAX; struct sljit_jump *jump = NULL; PCRE2_SPTR ccbegin; int compares, invertcmp, numberofcmps; #if defined SUPPORT_UNICODE && (PCRE2_CODE_UNIT_WIDTH == 8 || PCRE2_CODE_UNIT_WIDTH == 16) BOOL utf = common->utf; #endif /* SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == [8|16] */ #ifdef SUPPORT_UNICODE sljit_u32 unicode_status = 0; int typereg = TMP1; const sljit_u32 *other_cases; sljit_uw typeoffset; #endif /* SUPPORT_UNICODE */ /* Scanning the necessary info. */ cc++; ccbegin = cc; compares = 0; if (cc[-1] & XCL_MAP) { min = 0; cc += 32 / sizeof(PCRE2_UCHAR); } while (*cc != XCL_END) { compares++; if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); if (c > max) max = c; if (c < min) min = c; #ifdef SUPPORT_UNICODE unicode_status |= XCLASS_SAVE_CHAR; #endif /* SUPPORT_UNICODE */ } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); if (c < min) min = c; GETCHARINCTEST(c, cc); if (c > max) max = c; #ifdef SUPPORT_UNICODE unicode_status |= XCLASS_SAVE_CHAR; #endif /* SUPPORT_UNICODE */ } #ifdef SUPPORT_UNICODE else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; if (*cc == PT_CLIST && *cc == XCL_PROP) { other_cases = PRIV(ucd_caseless_sets) + cc[1]; while (*other_cases != NOTACHAR) { if (*other_cases > max) max = *other_cases; if (*other_cases < min) min = *other_cases; other_cases++; } } else { max = READ_CHAR_MAX; min = 0; } switch(*cc) { case PT_ANY: /* Any either accepts everything or ignored. */ if (cc[-1] == XCL_PROP) { compile_char1_matchingpath(common, OP_ALLANY, cc, backtracks, FALSE); if (list == backtracks) add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); return; } break; case PT_LAMP: case PT_GC: case PT_PC: case PT_ALNUM: unicode_status |= XCLASS_HAS_TYPE; break; case PT_SCX: unicode_status |= XCLASS_HAS_SCRIPT_EXTENSION; if (cc[-1] == XCL_NOTPROP) { unicode_status |= XCLASS_SCRIPT_EXTENSION_NOTPROP; break; } compares++; /* Fall through */ case PT_SC: unicode_status |= XCLASS_HAS_SCRIPT; break; case PT_SPACE: case PT_PXSPACE: case PT_WORD: case PT_PXGRAPH: case PT_PXPRINT: case PT_PXPUNCT: unicode_status |= XCLASS_SAVE_CHAR | XCLASS_HAS_TYPE; break; case PT_CLIST: case PT_UCNC: unicode_status |= XCLASS_SAVE_CHAR; break; case PT_BOOL: unicode_status |= XCLASS_HAS_BOOL; break; case PT_BIDICL: unicode_status |= XCLASS_HAS_BIDICL; break; default: SLJIT_UNREACHABLE(); break; } cc += 2; } #endif /* SUPPORT_UNICODE */ } SLJIT_ASSERT(compares > 0); /* We are not necessary in utf mode even in 8 bit mode. */ cc = ccbegin; if ((cc[-1] & XCL_NOT) != 0) read_char(common, min, max, backtracks, READ_CHAR_UPDATE_STR_PTR); else { #ifdef SUPPORT_UNICODE read_char(common, min, max, (unicode_status & XCLASS_NEEDS_UCD) ? backtracks : NULL, 0); #else /* !SUPPORT_UNICODE */ read_char(common, min, max, NULL, 0); #endif /* SUPPORT_UNICODE */ } if ((cc[-1] & XCL_HASPROP) == 0) { if ((cc[-1] & XCL_MAP) != 0) { jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255); if (!optimize_class(common, (const sljit_u8 *)cc, (((const sljit_u8 *)cc)[31] & 0x80) != 0, TRUE, &found)) { OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7); OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3); OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc); OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0); OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, TMP2, 0); add_jump(compiler, &found, JUMP(SLJIT_NOT_ZERO)); } add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); JUMPHERE(jump); cc += 32 / sizeof(PCRE2_UCHAR); } else { OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, min); add_jump(compiler, (cc[-1] & XCL_NOT) == 0 ? backtracks : &found, CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, max - min)); } } else if ((cc[-1] & XCL_MAP) != 0) { OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0); #ifdef SUPPORT_UNICODE unicode_status |= XCLASS_CHAR_SAVED; #endif /* SUPPORT_UNICODE */ if (!optimize_class(common, (const sljit_u8 *)cc, FALSE, TRUE, list)) { #if PCRE2_CODE_UNIT_WIDTH == 8 jump = NULL; if (common->utf) #endif /* PCRE2_CODE_UNIT_WIDTH == 8 */ jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255); OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7); OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3); OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc); OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0); OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, TMP2, 0); add_jump(compiler, list, JUMP(SLJIT_NOT_ZERO)); #if PCRE2_CODE_UNIT_WIDTH == 8 if (common->utf) #endif /* PCRE2_CODE_UNIT_WIDTH == 8 */ JUMPHERE(jump); } OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0); cc += 32 / sizeof(PCRE2_UCHAR); } #ifdef SUPPORT_UNICODE if (unicode_status & XCLASS_NEEDS_UCD) { if ((unicode_status & (XCLASS_SAVE_CHAR | XCLASS_CHAR_SAVED)) == XCLASS_SAVE_CHAR) OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0); #if PCRE2_CODE_UNIT_WIDTH == 32 if (!common->utf) { jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, MAX_UTF_CODE_POINT + 1); OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, UNASSIGNED_UTF_CHAR); JUMPHERE(jump); } #endif /* PCRE2_CODE_UNIT_WIDTH == 32 */ OP2(SLJIT_LSHR, TMP2, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_SHIFT); OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 1); OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_stage1)); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_MASK); OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, UCD_BLOCK_SHIFT); OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, TMP2, 0); OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_stage2)); OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM2(TMP2, TMP1), 1); OP2(SLJIT_SHL, TMP1, 0, TMP2, 0, SLJIT_IMM, 3); OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 2); OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0); ccbegin = cc; if (unicode_status & XCLASS_HAS_BIDICL) { OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, scriptx_bidiclass)); OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BIDICLASS_SHIFT); while (*cc != XCL_END) { if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); GETCHARINCTEST(c, cc); } else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; if (*cc == PT_BIDICL) { compares--; invertcmp = (compares == 0 && list != backtracks); if (cc[-1] == XCL_NOTPROP) invertcmp ^= 0x1; jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (int)cc[1]); add_jump(compiler, compares > 0 ? list : backtracks, jump); } cc += 2; } } cc = ccbegin; } if (unicode_status & XCLASS_HAS_BOOL) { OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, bprops)); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BPROPS_MASK); OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 2); while (*cc != XCL_END) { if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); GETCHARINCTEST(c, cc); } else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; if (*cc == PT_BOOL) { compares--; invertcmp = (compares == 0 && list != backtracks); if (cc[-1] == XCL_NOTPROP) invertcmp ^= 0x1; OP2U(SLJIT_AND32 | SLJIT_SET_Z, SLJIT_MEM1(TMP1), (sljit_sw)(PRIV(ucd_boolprop_sets) + (cc[1] >> 5)), SLJIT_IMM, (sljit_sw)1 << (cc[1] & 0x1f)); add_jump(compiler, compares > 0 ? list : backtracks, JUMP(SLJIT_NOT_ZERO ^ invertcmp)); } cc += 2; } } cc = ccbegin; } if (unicode_status & XCLASS_HAS_SCRIPT) { OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script)); while (*cc != XCL_END) { if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); GETCHARINCTEST(c, cc); } else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; switch (*cc) { case PT_SCX: if (cc[-1] == XCL_NOTPROP) break; /* Fall through */ case PT_SC: compares--; invertcmp = (compares == 0 && list != backtracks); if (cc[-1] == XCL_NOTPROP) invertcmp ^= 0x1; add_jump(compiler, compares > 0 ? list : backtracks, CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (int)cc[1])); } cc += 2; } } cc = ccbegin; } if (unicode_status & XCLASS_HAS_SCRIPT_EXTENSION) { OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, scriptx_bidiclass)); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_SCRIPTX_MASK); OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 2); if (unicode_status & XCLASS_SCRIPT_EXTENSION_NOTPROP) { if (unicode_status & XCLASS_HAS_TYPE) { if (unicode_status & XCLASS_SAVE_CHAR) { OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, TMP2, 0); unicode_status |= XCLASS_SCRIPT_EXTENSION_RESTORE_LOCALS0; } else { OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP2, 0); unicode_status |= XCLASS_SCRIPT_EXTENSION_RESTORE_RETURN_ADDR; } } OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script)); } while (*cc != XCL_END) { if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); GETCHARINCTEST(c, cc); } else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; if (*cc == PT_SCX) { compares--; invertcmp = (compares == 0 && list != backtracks); jump = NULL; if (cc[-1] == XCL_NOTPROP) { jump = CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, (int)cc[1]); if (invertcmp) { add_jump(compiler, backtracks, jump); jump = NULL; } invertcmp ^= 0x1; } OP2U(SLJIT_AND32 | SLJIT_SET_Z, SLJIT_MEM1(TMP1), (sljit_sw)(PRIV(ucd_script_sets) + (cc[1] >> 5)), SLJIT_IMM, (sljit_sw)1 << (cc[1] & 0x1f)); add_jump(compiler, compares > 0 ? list : backtracks, JUMP(SLJIT_NOT_ZERO ^ invertcmp)); if (jump != NULL) JUMPHERE(jump); } cc += 2; } } if (unicode_status & XCLASS_SCRIPT_EXTENSION_RESTORE_LOCALS0) OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); else if (unicode_status & XCLASS_SCRIPT_EXTENSION_RESTORE_RETURN_ADDR) OP1(SLJIT_MOV, TMP2, 0, RETURN_ADDR, 0); cc = ccbegin; } if (unicode_status & XCLASS_SAVE_CHAR) OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0); if (unicode_status & XCLASS_HAS_TYPE) { if (unicode_status & XCLASS_SAVE_CHAR) typereg = RETURN_ADDR; OP1(SLJIT_MOV_U8, typereg, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, chartype)); } } #endif /* SUPPORT_UNICODE */ /* Generating code. */ charoffset = 0; numberofcmps = 0; #ifdef SUPPORT_UNICODE typeoffset = 0; #endif /* SUPPORT_UNICODE */ while (*cc != XCL_END) { compares--; invertcmp = (compares == 0 && list != backtracks); jump = NULL; if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE)) { OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_EQUAL); numberofcmps++; } else if (numberofcmps > 0) { OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); numberofcmps = 0; } else { jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); numberofcmps = 0; } } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); SET_CHAR_OFFSET(c); GETCHARINCTEST(c, cc); if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE)) { OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); numberofcmps++; } else if (numberofcmps > 0) { OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); numberofcmps = 0; } else { jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); numberofcmps = 0; } } #ifdef SUPPORT_UNICODE else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); if (*cc == XCL_NOTPROP) invertcmp ^= 0x1; cc++; switch(*cc) { case PT_ANY: if (!invertcmp) jump = JUMP(SLJIT_JUMP); break; case PT_LAMP: OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Lu - typeoffset); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Ll - typeoffset); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Lt - typeoffset); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_GC: c = PRIV(ucp_typerange)[(int)cc[1] * 2]; SET_TYPE_OFFSET(c); jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, PRIV(ucp_typerange)[(int)cc[1] * 2 + 1] - c); break; case PT_PC: jump = CMP(SLJIT_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, (int)cc[1] - typeoffset); break; case PT_SC: case PT_SCX: case PT_BOOL: case PT_BIDICL: compares++; /* Do nothing. */ break; case PT_SPACE: case PT_PXSPACE: SET_CHAR_OFFSET(9); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0xd - 0x9); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x85 - 0x9); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x180e - 0x9); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); SET_TYPE_OFFSET(ucp_Zl); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Zl); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_WORD: OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_UNDERSCORE - charoffset)); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); /* Fall through. */ case PT_ALNUM: SET_TYPE_OFFSET(ucp_Ll); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Lu - ucp_Ll); OP_FLAGS((*cc == PT_ALNUM) ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); SET_TYPE_OFFSET(ucp_Nd); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_No - ucp_Nd); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_CLIST: other_cases = PRIV(ucd_caseless_sets) + cc[1]; /* At least three characters are required. Otherwise this case would be handled by the normal code path. */ SLJIT_ASSERT(other_cases[0] != NOTACHAR && other_cases[1] != NOTACHAR && other_cases[2] != NOTACHAR); SLJIT_ASSERT(other_cases[0] < other_cases[1] && other_cases[1] < other_cases[2]); /* Optimizing character pairs, if their difference is power of 2. */ if (is_powerof2(other_cases[1] ^ other_cases[0])) { if (charoffset == 0) OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]); else { OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset); OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]); } OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, other_cases[1]); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); other_cases += 2; } else if (is_powerof2(other_cases[2] ^ other_cases[1])) { if (charoffset == 0) OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, other_cases[2] ^ other_cases[1]); else { OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset); OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]); } OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, other_cases[2]); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(other_cases[0] - charoffset)); OP_FLAGS(SLJIT_OR | ((other_cases[3] == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL); other_cases += 3; } else { OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset)); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); } while (*other_cases != NOTACHAR) { OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset)); OP_FLAGS(SLJIT_OR | ((*other_cases == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL); } jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_UCNC: OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_DOLLAR_SIGN - charoffset)); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_COMMERCIAL_AT - charoffset)); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_GRAVE_ACCENT - charoffset)); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); SET_CHAR_OFFSET(0xa0); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(0xd7ff - charoffset)); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); SET_CHAR_OFFSET(0); OP2U(SLJIT_SUB | SLJIT_SET_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 0xe000 - 0); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_GREATER_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_PXGRAPH: /* C and Z groups are the farthest two groups. */ SET_TYPE_OFFSET(ucp_Ll); OP2U(SLJIT_SUB | SLJIT_SET_GREATER, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER); jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll); /* In case of ucp_Cf, we overwrite the result. */ SET_CHAR_OFFSET(0x2066); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x180e - 0x2066); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); JUMPHERE(jump); jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0); break; case PT_PXPRINT: /* C and Z groups are the farthest two groups. */ SET_TYPE_OFFSET(ucp_Ll); OP2U(SLJIT_SUB | SLJIT_SET_GREATER, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER); OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Ll); OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_NOT_EQUAL); jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll); /* In case of ucp_Cf, we overwrite the result. */ SET_CHAR_OFFSET(0x2066); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); JUMPHERE(jump); jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0); break; case PT_PXPUNCT: SET_TYPE_OFFSET(ucp_Sc); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_So - ucp_Sc); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); SET_CHAR_OFFSET(0); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x7f); OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_LESS_EQUAL); SET_TYPE_OFFSET(ucp_Pc); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Ps - ucp_Pc); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; default: SLJIT_UNREACHABLE(); break; } cc += 2; } #endif /* SUPPORT_UNICODE */ if (jump != NULL) add_jump(compiler, compares > 0 ? list : backtracks, jump); } if (found != NULL) set_jumps(found, LABEL()); }
183419698766008283102134937176756315954
pcre2_jit_compile.c
284265016287060690142505784626516203619
CWE-125
CVE-2022-1586
An out-of-bounds read vulnerability was discovered in the PCRE2 library in the compile_xclass_matchingpath() function of the pcre2_jit_compile.c file. This involves a unicode property matching issue in JIT-compiled regular expressions. The issue occurs because the character was not fully read in case-less matching within JIT.
https://nvd.nist.gov/vuln/detail/CVE-2022-1586
223,368
pcre2
d4fa336fbcc388f89095b184ba6d99422cfc676c
https://github.com/PCRE2Project/pcre2
https://github.com/PCRE2Project/pcre2/commit/d4fa336fbcc388f89095b184ba6d99422cfc676c
Fix incorrect value reading in JIT.
0
static void compile_xclass_matchingpath(compiler_common *common, PCRE2_SPTR cc, jump_list **backtracks) { DEFINE_COMPILER; jump_list *found = NULL; jump_list **list = (cc[0] & XCL_NOT) == 0 ? &found : backtracks; sljit_uw c, charoffset, max = 256, min = READ_CHAR_MAX; struct sljit_jump *jump = NULL; PCRE2_SPTR ccbegin; int compares, invertcmp, numberofcmps; #if defined SUPPORT_UNICODE && (PCRE2_CODE_UNIT_WIDTH == 8 || PCRE2_CODE_UNIT_WIDTH == 16) BOOL utf = common->utf; #endif /* SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == [8|16] */ #ifdef SUPPORT_UNICODE sljit_u32 unicode_status = 0; int typereg = TMP1; const sljit_u32 *other_cases; sljit_uw typeoffset; #endif /* SUPPORT_UNICODE */ /* Scanning the necessary info. */ cc++; ccbegin = cc; compares = 0; if (cc[-1] & XCL_MAP) { min = 0; cc += 32 / sizeof(PCRE2_UCHAR); } while (*cc != XCL_END) { compares++; if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); if (c > max) max = c; if (c < min) min = c; #ifdef SUPPORT_UNICODE unicode_status |= XCLASS_SAVE_CHAR; #endif /* SUPPORT_UNICODE */ } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); if (c < min) min = c; GETCHARINCTEST(c, cc); if (c > max) max = c; #ifdef SUPPORT_UNICODE unicode_status |= XCLASS_SAVE_CHAR; #endif /* SUPPORT_UNICODE */ } #ifdef SUPPORT_UNICODE else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; if (*cc == PT_CLIST && cc[-1] == XCL_PROP) { other_cases = PRIV(ucd_caseless_sets) + cc[1]; while (*other_cases != NOTACHAR) { if (*other_cases > max) max = *other_cases; if (*other_cases < min) min = *other_cases; other_cases++; } } else { max = READ_CHAR_MAX; min = 0; } switch(*cc) { case PT_ANY: /* Any either accepts everything or ignored. */ if (cc[-1] == XCL_PROP) { compile_char1_matchingpath(common, OP_ALLANY, cc, backtracks, FALSE); if (list == backtracks) add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); return; } break; case PT_LAMP: case PT_GC: case PT_PC: case PT_ALNUM: unicode_status |= XCLASS_HAS_TYPE; break; case PT_SCX: unicode_status |= XCLASS_HAS_SCRIPT_EXTENSION; if (cc[-1] == XCL_NOTPROP) { unicode_status |= XCLASS_SCRIPT_EXTENSION_NOTPROP; break; } compares++; /* Fall through */ case PT_SC: unicode_status |= XCLASS_HAS_SCRIPT; break; case PT_SPACE: case PT_PXSPACE: case PT_WORD: case PT_PXGRAPH: case PT_PXPRINT: case PT_PXPUNCT: unicode_status |= XCLASS_SAVE_CHAR | XCLASS_HAS_TYPE; break; case PT_CLIST: case PT_UCNC: unicode_status |= XCLASS_SAVE_CHAR; break; case PT_BOOL: unicode_status |= XCLASS_HAS_BOOL; break; case PT_BIDICL: unicode_status |= XCLASS_HAS_BIDICL; break; default: SLJIT_UNREACHABLE(); break; } cc += 2; } #endif /* SUPPORT_UNICODE */ } SLJIT_ASSERT(compares > 0); /* We are not necessary in utf mode even in 8 bit mode. */ cc = ccbegin; if ((cc[-1] & XCL_NOT) != 0) read_char(common, min, max, backtracks, READ_CHAR_UPDATE_STR_PTR); else { #ifdef SUPPORT_UNICODE read_char(common, min, max, (unicode_status & XCLASS_NEEDS_UCD) ? backtracks : NULL, 0); #else /* !SUPPORT_UNICODE */ read_char(common, min, max, NULL, 0); #endif /* SUPPORT_UNICODE */ } if ((cc[-1] & XCL_HASPROP) == 0) { if ((cc[-1] & XCL_MAP) != 0) { jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255); if (!optimize_class(common, (const sljit_u8 *)cc, (((const sljit_u8 *)cc)[31] & 0x80) != 0, TRUE, &found)) { OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7); OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3); OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc); OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0); OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, TMP2, 0); add_jump(compiler, &found, JUMP(SLJIT_NOT_ZERO)); } add_jump(compiler, backtracks, JUMP(SLJIT_JUMP)); JUMPHERE(jump); cc += 32 / sizeof(PCRE2_UCHAR); } else { OP2(SLJIT_SUB, TMP2, 0, TMP1, 0, SLJIT_IMM, min); add_jump(compiler, (cc[-1] & XCL_NOT) == 0 ? backtracks : &found, CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, max - min)); } } else if ((cc[-1] & XCL_MAP) != 0) { OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0); #ifdef SUPPORT_UNICODE unicode_status |= XCLASS_CHAR_SAVED; #endif /* SUPPORT_UNICODE */ if (!optimize_class(common, (const sljit_u8 *)cc, FALSE, TRUE, list)) { #if PCRE2_CODE_UNIT_WIDTH == 8 jump = NULL; if (common->utf) #endif /* PCRE2_CODE_UNIT_WIDTH == 8 */ jump = CMP(SLJIT_GREATER, TMP1, 0, SLJIT_IMM, 255); OP2(SLJIT_AND, TMP2, 0, TMP1, 0, SLJIT_IMM, 0x7); OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3); OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc); OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0); OP2U(SLJIT_AND | SLJIT_SET_Z, TMP1, 0, TMP2, 0); add_jump(compiler, list, JUMP(SLJIT_NOT_ZERO)); #if PCRE2_CODE_UNIT_WIDTH == 8 if (common->utf) #endif /* PCRE2_CODE_UNIT_WIDTH == 8 */ JUMPHERE(jump); } OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0); cc += 32 / sizeof(PCRE2_UCHAR); } #ifdef SUPPORT_UNICODE if (unicode_status & XCLASS_NEEDS_UCD) { if ((unicode_status & (XCLASS_SAVE_CHAR | XCLASS_CHAR_SAVED)) == XCLASS_SAVE_CHAR) OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0); #if PCRE2_CODE_UNIT_WIDTH == 32 if (!common->utf) { jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, MAX_UTF_CODE_POINT + 1); OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, UNASSIGNED_UTF_CHAR); JUMPHERE(jump); } #endif /* PCRE2_CODE_UNIT_WIDTH == 32 */ OP2(SLJIT_LSHR, TMP2, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_SHIFT); OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 1); OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_stage1)); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_MASK); OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, UCD_BLOCK_SHIFT); OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, TMP2, 0); OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, (sljit_sw)PRIV(ucd_stage2)); OP1(SLJIT_MOV_U16, TMP2, 0, SLJIT_MEM2(TMP2, TMP1), 1); OP2(SLJIT_SHL, TMP1, 0, TMP2, 0, SLJIT_IMM, 3); OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 2); OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0); ccbegin = cc; if (unicode_status & XCLASS_HAS_BIDICL) { OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, scriptx_bidiclass)); OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BIDICLASS_SHIFT); while (*cc != XCL_END) { if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); GETCHARINCTEST(c, cc); } else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; if (*cc == PT_BIDICL) { compares--; invertcmp = (compares == 0 && list != backtracks); if (cc[-1] == XCL_NOTPROP) invertcmp ^= 0x1; jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (int)cc[1]); add_jump(compiler, compares > 0 ? list : backtracks, jump); } cc += 2; } } cc = ccbegin; } if (unicode_status & XCLASS_HAS_BOOL) { OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, bprops)); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BPROPS_MASK); OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 2); while (*cc != XCL_END) { if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); GETCHARINCTEST(c, cc); } else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; if (*cc == PT_BOOL) { compares--; invertcmp = (compares == 0 && list != backtracks); if (cc[-1] == XCL_NOTPROP) invertcmp ^= 0x1; OP2U(SLJIT_AND32 | SLJIT_SET_Z, SLJIT_MEM1(TMP1), (sljit_sw)(PRIV(ucd_boolprop_sets) + (cc[1] >> 5)), SLJIT_IMM, (sljit_sw)1 << (cc[1] & 0x1f)); add_jump(compiler, compares > 0 ? list : backtracks, JUMP(SLJIT_NOT_ZERO ^ invertcmp)); } cc += 2; } } cc = ccbegin; } if (unicode_status & XCLASS_HAS_SCRIPT) { OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script)); while (*cc != XCL_END) { if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); GETCHARINCTEST(c, cc); } else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; switch (*cc) { case PT_SCX: if (cc[-1] == XCL_NOTPROP) break; /* Fall through */ case PT_SC: compares--; invertcmp = (compares == 0 && list != backtracks); if (cc[-1] == XCL_NOTPROP) invertcmp ^= 0x1; add_jump(compiler, compares > 0 ? list : backtracks, CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (int)cc[1])); } cc += 2; } } cc = ccbegin; } if (unicode_status & XCLASS_HAS_SCRIPT_EXTENSION) { OP1(SLJIT_MOV_U16, TMP1, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, scriptx_bidiclass)); OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_SCRIPTX_MASK); OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 2); if (unicode_status & XCLASS_SCRIPT_EXTENSION_NOTPROP) { if (unicode_status & XCLASS_HAS_TYPE) { if (unicode_status & XCLASS_SAVE_CHAR) { OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, TMP2, 0); unicode_status |= XCLASS_SCRIPT_EXTENSION_RESTORE_LOCALS0; } else { OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP2, 0); unicode_status |= XCLASS_SCRIPT_EXTENSION_RESTORE_RETURN_ADDR; } } OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, script)); } while (*cc != XCL_END) { if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); GETCHARINCTEST(c, cc); } else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); cc++; if (*cc == PT_SCX) { compares--; invertcmp = (compares == 0 && list != backtracks); jump = NULL; if (cc[-1] == XCL_NOTPROP) { jump = CMP(SLJIT_EQUAL, TMP2, 0, SLJIT_IMM, (int)cc[1]); if (invertcmp) { add_jump(compiler, backtracks, jump); jump = NULL; } invertcmp ^= 0x1; } OP2U(SLJIT_AND32 | SLJIT_SET_Z, SLJIT_MEM1(TMP1), (sljit_sw)(PRIV(ucd_script_sets) + (cc[1] >> 5)), SLJIT_IMM, (sljit_sw)1 << (cc[1] & 0x1f)); add_jump(compiler, compares > 0 ? list : backtracks, JUMP(SLJIT_NOT_ZERO ^ invertcmp)); if (jump != NULL) JUMPHERE(jump); } cc += 2; } } if (unicode_status & XCLASS_SCRIPT_EXTENSION_RESTORE_LOCALS0) OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); else if (unicode_status & XCLASS_SCRIPT_EXTENSION_RESTORE_RETURN_ADDR) OP1(SLJIT_MOV, TMP2, 0, RETURN_ADDR, 0); cc = ccbegin; } if (unicode_status & XCLASS_SAVE_CHAR) OP1(SLJIT_MOV, TMP1, 0, RETURN_ADDR, 0); if (unicode_status & XCLASS_HAS_TYPE) { if (unicode_status & XCLASS_SAVE_CHAR) typereg = RETURN_ADDR; OP1(SLJIT_MOV_U8, typereg, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_records) + SLJIT_OFFSETOF(ucd_record, chartype)); } } #endif /* SUPPORT_UNICODE */ /* Generating code. */ charoffset = 0; numberofcmps = 0; #ifdef SUPPORT_UNICODE typeoffset = 0; #endif /* SUPPORT_UNICODE */ while (*cc != XCL_END) { compares--; invertcmp = (compares == 0 && list != backtracks); jump = NULL; if (*cc == XCL_SINGLE) { cc ++; GETCHARINCTEST(c, cc); if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE)) { OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_EQUAL); numberofcmps++; } else if (numberofcmps > 0) { OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); numberofcmps = 0; } else { jump = CMP(SLJIT_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); numberofcmps = 0; } } else if (*cc == XCL_RANGE) { cc ++; GETCHARINCTEST(c, cc); SET_CHAR_OFFSET(c); GETCHARINCTEST(c, cc); if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE)) { OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); numberofcmps++; } else if (numberofcmps > 0) { OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); numberofcmps = 0; } else { jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset)); numberofcmps = 0; } } #ifdef SUPPORT_UNICODE else { SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP); if (*cc == XCL_NOTPROP) invertcmp ^= 0x1; cc++; switch(*cc) { case PT_ANY: if (!invertcmp) jump = JUMP(SLJIT_JUMP); break; case PT_LAMP: OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Lu - typeoffset); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Ll - typeoffset); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Lt - typeoffset); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_GC: c = PRIV(ucp_typerange)[(int)cc[1] * 2]; SET_TYPE_OFFSET(c); jump = CMP(SLJIT_LESS_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, PRIV(ucp_typerange)[(int)cc[1] * 2 + 1] - c); break; case PT_PC: jump = CMP(SLJIT_EQUAL ^ invertcmp, typereg, 0, SLJIT_IMM, (int)cc[1] - typeoffset); break; case PT_SC: case PT_SCX: case PT_BOOL: case PT_BIDICL: compares++; /* Do nothing. */ break; case PT_SPACE: case PT_PXSPACE: SET_CHAR_OFFSET(9); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0xd - 0x9); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x85 - 0x9); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x180e - 0x9); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); SET_TYPE_OFFSET(ucp_Zl); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Zl); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_WORD: OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_UNDERSCORE - charoffset)); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); /* Fall through. */ case PT_ALNUM: SET_TYPE_OFFSET(ucp_Ll); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Lu - ucp_Ll); OP_FLAGS((*cc == PT_ALNUM) ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); SET_TYPE_OFFSET(ucp_Nd); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_No - ucp_Nd); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_CLIST: other_cases = PRIV(ucd_caseless_sets) + cc[1]; /* At least three characters are required. Otherwise this case would be handled by the normal code path. */ SLJIT_ASSERT(other_cases[0] != NOTACHAR && other_cases[1] != NOTACHAR && other_cases[2] != NOTACHAR); SLJIT_ASSERT(other_cases[0] < other_cases[1] && other_cases[1] < other_cases[2]); /* Optimizing character pairs, if their difference is power of 2. */ if (is_powerof2(other_cases[1] ^ other_cases[0])) { if (charoffset == 0) OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]); else { OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset); OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]); } OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, other_cases[1]); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); other_cases += 2; } else if (is_powerof2(other_cases[2] ^ other_cases[1])) { if (charoffset == 0) OP2(SLJIT_OR, TMP2, 0, TMP1, 0, SLJIT_IMM, other_cases[2] ^ other_cases[1]); else { OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset); OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]); } OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, other_cases[2]); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(other_cases[0] - charoffset)); OP_FLAGS(SLJIT_OR | ((other_cases[3] == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL); other_cases += 3; } else { OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset)); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); } while (*other_cases != NOTACHAR) { OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset)); OP_FLAGS(SLJIT_OR | ((*other_cases == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL); } jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_UCNC: OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_DOLLAR_SIGN - charoffset)); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_COMMERCIAL_AT - charoffset)); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_GRAVE_ACCENT - charoffset)); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); SET_CHAR_OFFSET(0xa0); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, (sljit_sw)(0xd7ff - charoffset)); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL); SET_CHAR_OFFSET(0); OP2U(SLJIT_SUB | SLJIT_SET_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 0xe000 - 0); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_GREATER_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; case PT_PXGRAPH: /* C and Z groups are the farthest two groups. */ SET_TYPE_OFFSET(ucp_Ll); OP2U(SLJIT_SUB | SLJIT_SET_GREATER, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER); jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll); /* In case of ucp_Cf, we overwrite the result. */ SET_CHAR_OFFSET(0x2066); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x180e - 0x2066); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); JUMPHERE(jump); jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0); break; case PT_PXPRINT: /* C and Z groups are the farthest two groups. */ SET_TYPE_OFFSET(ucp_Ll); OP2U(SLJIT_SUB | SLJIT_SET_GREATER, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER); OP2U(SLJIT_SUB | SLJIT_SET_Z, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Ll); OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_NOT_EQUAL); jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll); /* In case of ucp_Cf, we overwrite the result. */ SET_CHAR_OFFSET(0x2066); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066); OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL); JUMPHERE(jump); jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0); break; case PT_PXPUNCT: SET_TYPE_OFFSET(ucp_Sc); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_So - ucp_Sc); OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL); SET_CHAR_OFFSET(0); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, TMP1, 0, SLJIT_IMM, 0x7f); OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_LESS_EQUAL); SET_TYPE_OFFSET(ucp_Pc); OP2U(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, typereg, 0, SLJIT_IMM, ucp_Ps - ucp_Pc); OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL); jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp); break; default: SLJIT_UNREACHABLE(); break; } cc += 2; } #endif /* SUPPORT_UNICODE */ if (jump != NULL) add_jump(compiler, compares > 0 ? list : backtracks, jump); } if (found != NULL) set_jumps(found, LABEL()); }
144276531294134211562638848702422655084
pcre2_jit_compile.c
52374969195278947710795935639555031915
CWE-125
CVE-2022-1586
An out-of-bounds read vulnerability was discovered in the PCRE2 library in the compile_xclass_matchingpath() function of the pcre2_jit_compile.c file. This involves a unicode property matching issue in JIT-compiled regular expressions. The issue occurs because the character was not fully read in case-less matching within JIT.
https://nvd.nist.gov/vuln/detail/CVE-2022-1586
195,291
tensorflow
ef1d027be116f25e25bb94a60da491c2cf55bd0b
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/ef1d027be116f25e25bb94a60da491c2cf55bd0b
Prevent copying uninitialized data in `AssignOp`. This prevents harder to debug undefined behaviors that cannot be traced back to the original tensor after assignments occur earlier in the graph execution. Several of these undefined behaviors are just reference bindings to null pointers, which are caught when running under ubsan/asan. PiperOrigin-RevId: 408654780 Change-Id: Iad2ec40d43f5fd7ea016c20283356c12d5ddeab1
1
void Compute(OpKernelContext* context) override { const Tensor& rhs = context->input(1); // We always return the input ref. context->forward_ref_input_to_ref_output(0, 0); // We can't always know how this value will be used downstream, so make // conservative assumptions in specifying constraints on the memory // allocation attributes, unless the Grappler graph analysis determined that // it was safe not to. AllocatorAttributes attr; if (!relax_constraints_) { attr.set_gpu_compatible(true); attr.set_nic_compatible(true); } { mutex_lock l(*context->input_ref_mutex(0)); const Tensor& old_lhs = context->mutable_input(0, /* lock_held */ true); const bool same_shape = old_lhs.shape().IsSameSize(rhs.shape()); if (validate_shape_) { OP_REQUIRES(context, same_shape, errors::InvalidArgument( "Assign requires shapes of both tensors to match. " "lhs shape= ", old_lhs.shape().DebugString(), " rhs shape= ", rhs.shape().DebugString())); } // In the code below we try to minimize the amount of memory allocation // and copying by trying the following two shortcuts: // 1. If the lhs is initialized and has the same number of elements as // the rhs we can avoid a memory allocation. // 2. If we can reuse the rhs buffer we avoid both a memory allocation // and copying. // 1. Try to copy into an existing buffer. if (old_lhs.IsInitialized() && old_lhs.shape().num_elements() == rhs.shape().num_elements()) { // The existing lhs tensor has already been initialized and the right // hand side can fit in the underlying buffer. Tensor reshaped_old_lhs; if (same_shape) { reshaped_old_lhs = old_lhs; } else { CHECK(reshaped_old_lhs.CopyFrom(old_lhs, rhs.shape())); context->replace_ref_input(0, reshaped_old_lhs, /* lock_held */ true); } if (use_exclusive_lock_) { Copy(context, &reshaped_old_lhs, rhs); return; } } else { // 2. Try to reuse the rhs. std::unique_ptr<Tensor> input_alias = context->forward_input( 1, OpKernelContext::Params::kNoReservation /*output_index*/, rhs.dtype(), rhs.shape(), DEVICE_MEMORY, attr); if (input_alias != nullptr) { // Update the ref to point to the new buffer. context->replace_ref_input(0, *input_alias, /* lock_held */ true); return; } // Otherwise, create a new tensor whose shape matches the // right hand side, hand off to lhs and copy the rhs into it. Tensor copy_tensor; OP_REQUIRES_OK(context, context->allocate_temp(old_lhs.dtype(), rhs.shape(), &copy_tensor, attr)); // We track memory of variables in variable ops instead of in this // assign op. context->clear_recorded_memory(); context->replace_ref_input(0, copy_tensor, /* lock_held */ true); if (use_exclusive_lock_) { Copy(context, &copy_tensor, rhs); return; } } } // The tensor has already been initialized and the right hand side // matches the left hand side's shape. We have been told to do the // copy outside the lock. Tensor old_unlocked_lhs = context->mutable_input(0, /* lock_held */ false); Copy(context, &old_unlocked_lhs, rhs); }
110563830933859876998490806365273446744
assign_op.h
69919930869774703131816695670485389180
CWE-908
CVE-2022-23573
Tensorflow is an Open Source Machine Learning Framework. The implementation of `AssignOp` can result in copying uninitialized data to a new tensor. This later results in undefined behavior. The implementation has a check that the left hand side of the assignment is initialized (to minimize number of allocations), but does not check that the right hand side is also initialized. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23573
224,153
tensorflow
ef1d027be116f25e25bb94a60da491c2cf55bd0b
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/ef1d027be116f25e25bb94a60da491c2cf55bd0b
Prevent copying uninitialized data in `AssignOp`. This prevents harder to debug undefined behaviors that cannot be traced back to the original tensor after assignments occur earlier in the graph execution. Several of these undefined behaviors are just reference bindings to null pointers, which are caught when running under ubsan/asan. PiperOrigin-RevId: 408654780 Change-Id: Iad2ec40d43f5fd7ea016c20283356c12d5ddeab1
0
void Compute(OpKernelContext* context) override { const Tensor& rhs = context->input(1); // We always return the input ref. context->forward_ref_input_to_ref_output(0, 0); // Prevent copying uninitialized data, to solve harder to debug undefined // behaviors that cannot be traced back to the original tensor. OP_REQUIRES( context, rhs.IsInitialized(), errors::Internal("Right hand side of AssignOp is not initialized")); // We can't always know how this value will be used downstream, so make // conservative assumptions in specifying constraints on the memory // allocation attributes, unless the Grappler graph analysis determined that // it was safe not to. AllocatorAttributes attr; if (!relax_constraints_) { attr.set_gpu_compatible(true); attr.set_nic_compatible(true); } { mutex_lock l(*context->input_ref_mutex(0)); const Tensor& old_lhs = context->mutable_input(0, /* lock_held */ true); const bool same_shape = old_lhs.shape().IsSameSize(rhs.shape()); if (validate_shape_) { OP_REQUIRES(context, same_shape, errors::InvalidArgument( "Assign requires shapes of both tensors to match. " "lhs shape= ", old_lhs.shape().DebugString(), " rhs shape= ", rhs.shape().DebugString())); } // In the code below we try to minimize the amount of memory allocation // and copying by trying the following two shortcuts: // 1. If the lhs is initialized and has the same number of elements as // the rhs we can avoid a memory allocation. // 2. If we can reuse the rhs buffer we avoid both a memory allocation // and copying. // 1. Try to copy into an existing buffer. if (old_lhs.IsInitialized() && old_lhs.shape().num_elements() == rhs.shape().num_elements()) { // The existing lhs tensor has already been initialized and the right // hand side can fit in the underlying buffer. Tensor reshaped_old_lhs; if (same_shape) { reshaped_old_lhs = old_lhs; } else { CHECK(reshaped_old_lhs.CopyFrom(old_lhs, rhs.shape())); context->replace_ref_input(0, reshaped_old_lhs, /* lock_held */ true); } if (use_exclusive_lock_) { Copy(context, &reshaped_old_lhs, rhs); return; } } else { // 2. Try to reuse the rhs. std::unique_ptr<Tensor> input_alias = context->forward_input( 1, OpKernelContext::Params::kNoReservation /*output_index*/, rhs.dtype(), rhs.shape(), DEVICE_MEMORY, attr); if (input_alias != nullptr) { // Update the ref to point to the new buffer. context->replace_ref_input(0, *input_alias, /* lock_held */ true); return; } // Otherwise, create a new tensor whose shape matches the // right hand side, hand off to lhs and copy the rhs into it. Tensor copy_tensor; OP_REQUIRES_OK(context, context->allocate_temp(old_lhs.dtype(), rhs.shape(), &copy_tensor, attr)); // We track memory of variables in variable ops instead of in this // assign op. context->clear_recorded_memory(); context->replace_ref_input(0, copy_tensor, /* lock_held */ true); if (use_exclusive_lock_) { Copy(context, &copy_tensor, rhs); return; } } } // The tensor has already been initialized and the right hand side // matches the left hand side's shape. We have been told to do the // copy outside the lock. Tensor old_unlocked_lhs = context->mutable_input(0, /* lock_held */ false); Copy(context, &old_unlocked_lhs, rhs); }
305037740106398797533289727050001288809
assign_op.h
227740622376075800348272805227748641889
CWE-908
CVE-2022-23573
Tensorflow is an Open Source Machine Learning Framework. The implementation of `AssignOp` can result in copying uninitialized data to a new tensor. This later results in undefined behavior. The implementation has a check that the left hand side of the assignment is initialized (to minimize number of allocations), but does not check that the right hand side is also initialized. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23573
195,293
mruby
ae3c99767a27f5c6c584162e2adc6a5d0eb2c54e
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/ae3c99767a27f5c6c584162e2adc6a5d0eb2c54e
codegen.c: fixed a bug in hash code generation with `!val`.
1
gen_hash(codegen_scope *s, node *tree, int val, int limit) { int slimit = GEN_VAL_STACK_MAX; if (cursp() >= GEN_LIT_ARY_MAX) slimit = INT16_MAX; int len = 0; mrb_bool update = FALSE; while (tree) { if (nint(tree->car->car->car) == NODE_KW_REST_ARGS) { if (len > 0) { pop_n(len*2); if (!update) { genop_2(s, OP_HASH, cursp(), len); } else { pop(); genop_2(s, OP_HASHADD, cursp(), len); } push(); } codegen(s, tree->car->cdr, val); if (len > 0 || update) { pop(); pop(); genop_1(s, OP_HASHCAT, cursp()); push(); } update = TRUE; len = 0; } else { codegen(s, tree->car->car, val); codegen(s, tree->car->cdr, val); len++; } tree = tree->cdr; if (val && cursp() >= slimit) { pop_n(len*2); if (!update) { genop_2(s, OP_HASH, cursp(), len); } else { pop(); genop_2(s, OP_HASHADD, cursp(), len); } push(); update = TRUE; len = 0; } } if (update) { if (val && len > 0) { pop_n(len*2+1); genop_2(s, OP_HASHADD, cursp(), len); push(); } return -1; /* variable length */ } return len; }
193019522040384116683756187518117428466
codegen.c
187346573288549092337421927147361320618
CWE-476
CVE-2022-0481
NULL Pointer Dereference in Homebrew mruby prior to 3.2.
https://nvd.nist.gov/vuln/detail/CVE-2022-0481
224,154
mruby
ae3c99767a27f5c6c584162e2adc6a5d0eb2c54e
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/ae3c99767a27f5c6c584162e2adc6a5d0eb2c54e
codegen.c: fixed a bug in hash code generation with `!val`.
0
gen_hash(codegen_scope *s, node *tree, int val, int limit) { int slimit = GEN_VAL_STACK_MAX; if (cursp() >= GEN_LIT_ARY_MAX) slimit = INT16_MAX; int len = 0; mrb_bool update = FALSE; while (tree) { if (nint(tree->car->car->car) == NODE_KW_REST_ARGS) { if (val && len > 0) { pop_n(len*2); if (!update) { genop_2(s, OP_HASH, cursp(), len); } else { pop(); genop_2(s, OP_HASHADD, cursp(), len); } push(); } codegen(s, tree->car->cdr, val); if (val && (len > 0 || update)) { pop(); pop(); genop_1(s, OP_HASHCAT, cursp()); push(); } update = TRUE; len = 0; } else { codegen(s, tree->car->car, val); codegen(s, tree->car->cdr, val); len++; } tree = tree->cdr; if (val && cursp() >= slimit) { pop_n(len*2); if (!update) { genop_2(s, OP_HASH, cursp(), len); } else { pop(); genop_2(s, OP_HASHADD, cursp(), len); } push(); update = TRUE; len = 0; } } if (update) { if (val && len > 0) { pop_n(len*2+1); genop_2(s, OP_HASHADD, cursp(), len); push(); } return -1; /* variable length */ } return len; }
86249756338106133378324988596644701448
None
CWE-476
CVE-2022-0481
NULL Pointer Dereference in Homebrew mruby prior to 3.2.
https://nvd.nist.gov/vuln/detail/CVE-2022-0481
195,294
tensorflow
f57315566d7094f322b784947093406c2aea0d7d
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/f57315566d7094f322b784947093406c2aea0d7d
Add a check for Key being scalar tensor for MapStage and OrderedMapStage ops. According to documentation[1][2], key must be int64 value, but this wasn't enforced and the ops would fail with check failure for non-scalar key value. [1]https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/ordered-map-stage [2]https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/map-stage PiperOrigin-RevId: 413822112 Change-Id: I9d118faf990e6361900aa32272eff486ad9f0e2e
1
void Compute(OpKernelContext* ctx) override { StagingMap<Ordered>* map = nullptr; OP_REQUIRES_OK(ctx, GetStagingMap(ctx, def(), &map)); core::ScopedUnref scope(map); typename StagingMap<Ordered>::OptionalTuple tuple; const Tensor* key_tensor; const Tensor* indices_tensor; OpInputList values_tensor; OP_REQUIRES_OK(ctx, ctx->input("key", &key_tensor)); OP_REQUIRES_OK(ctx, ctx->input("indices", &indices_tensor)); OP_REQUIRES_OK(ctx, ctx->input_list("values", &values_tensor)); OP_REQUIRES(ctx, key_tensor->NumElements() > 0, errors::InvalidArgument("key must not be empty")); // Create copy for insertion into Staging Area Tensor key(*key_tensor); // Create the tuple to store for (std::size_t i = 0; i < values_tensor.size(); ++i) { tuple.push_back(values_tensor[i]); } // Store the tuple in the map OP_REQUIRES_OK(ctx, map->put(&key, indices_tensor, &tuple)); }
121343016950748954777477429164526353429
map_stage_op.cc
156634864064326951745718193254274952325
CWE-843
CVE-2022-21734
Tensorflow is an Open Source Machine Learning Framework. The implementation of `MapStage` is vulnerable a `CHECK`-fail if the key tensor is not a scalar. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-21734
224,181
tensorflow
f57315566d7094f322b784947093406c2aea0d7d
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/f57315566d7094f322b784947093406c2aea0d7d
Add a check for Key being scalar tensor for MapStage and OrderedMapStage ops. According to documentation[1][2], key must be int64 value, but this wasn't enforced and the ops would fail with check failure for non-scalar key value. [1]https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/ordered-map-stage [2]https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/map-stage PiperOrigin-RevId: 413822112 Change-Id: I9d118faf990e6361900aa32272eff486ad9f0e2e
0
void Compute(OpKernelContext* ctx) override { StagingMap<Ordered>* map = nullptr; OP_REQUIRES_OK(ctx, GetStagingMap(ctx, def(), &map)); core::ScopedUnref scope(map); typename StagingMap<Ordered>::OptionalTuple tuple; const Tensor* key_tensor; const Tensor* indices_tensor; OpInputList values_tensor; OP_REQUIRES_OK(ctx, ctx->input("key", &key_tensor)); OP_REQUIRES_OK(ctx, ctx->input("indices", &indices_tensor)); OP_REQUIRES_OK(ctx, ctx->input_list("values", &values_tensor)); OP_REQUIRES(ctx, key_tensor->NumElements() > 0, errors::InvalidArgument("key must not be empty")); OP_REQUIRES(ctx, key_tensor->NumElements() == 1, errors::InvalidArgument( "key must be an int64 scalar, got tensor with shape: ", key_tensor->shape())); // Create copy for insertion into Staging Area Tensor key(*key_tensor); // Create the tuple to store for (std::size_t i = 0; i < values_tensor.size(); ++i) { tuple.push_back(values_tensor[i]); } // Store the tuple in the map OP_REQUIRES_OK(ctx, map->put(&key, indices_tensor, &tuple)); }
155761287933118551217083530225572870599
map_stage_op.cc
98786060535014659477894352363015597620
CWE-843
CVE-2022-21734
Tensorflow is an Open Source Machine Learning Framework. The implementation of `MapStage` is vulnerable a `CHECK`-fail if the key tensor is not a scalar. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-21734
195,308
flatpak
462fca2c666e0cd2b60d6d2593a7216a83047aaf
https://github.com/flatpak/flatpak
https://github.com/flatpak/flatpak/commit/462fca2c666e0cd2b60d6d2593a7216a83047aaf
run: Don't allow chroot() If we don't allow pivot_root() then there seems no reason why we should allow chroot(). Partially fixes GHSA-67h7-w3jq-vh4q. Signed-off-by: Simon McVittie <smcv@collabora.com>
1
setup_seccomp (FlatpakBwrap *bwrap, const char *arch, gulong allowed_personality, FlatpakRunFlags run_flags, GError **error) { gboolean multiarch = (run_flags & FLATPAK_RUN_FLAG_MULTIARCH) != 0; gboolean devel = (run_flags & FLATPAK_RUN_FLAG_DEVEL) != 0; __attribute__((cleanup (cleanup_seccomp))) scmp_filter_ctx seccomp = NULL; /**** BEGIN NOTE ON CODE SHARING * * There are today a number of different Linux container * implementations. That will likely continue for long into the * future. But we can still try to share code, and it's important * to do so because it affects what library and application writers * can do, and we should support code portability between different * container tools. * * This syscall blocklist is copied from linux-user-chroot, which was in turn * clearly influenced by the Sandstorm.io blocklist. * * If you make any changes here, I suggest sending the changes along * to other sandbox maintainers. Using the libseccomp list is also * an appropriate venue: * https://groups.google.com/forum/#!forum/libseccomp * * A non-exhaustive list of links to container tooling that might * want to share this blocklist: * * https://github.com/sandstorm-io/sandstorm * in src/sandstorm/supervisor.c++ * https://github.com/flatpak/flatpak.git * in common/flatpak-run.c * https://git.gnome.org/browse/linux-user-chroot * in src/setup-seccomp.c * * Other useful resources: * https://github.com/systemd/systemd/blob/HEAD/src/shared/seccomp-util.c * https://github.com/moby/moby/blob/HEAD/profiles/seccomp/default.json * **** END NOTE ON CODE SHARING */ struct { int scall; int errnum; struct scmp_arg_cmp *arg; } syscall_blocklist[] = { /* Block dmesg */ {SCMP_SYS (syslog), EPERM}, /* Useless old syscall */ {SCMP_SYS (uselib), EPERM}, /* Don't allow disabling accounting */ {SCMP_SYS (acct), EPERM}, /* 16-bit code is unnecessary in the sandbox, and modify_ldt is a historic source of interesting information leaks. */ {SCMP_SYS (modify_ldt), EPERM}, /* Don't allow reading current quota use */ {SCMP_SYS (quotactl), EPERM}, /* Don't allow access to the kernel keyring */ {SCMP_SYS (add_key), EPERM}, {SCMP_SYS (keyctl), EPERM}, {SCMP_SYS (request_key), EPERM}, /* Scary VM/NUMA ops */ {SCMP_SYS (move_pages), EPERM}, {SCMP_SYS (mbind), EPERM}, {SCMP_SYS (get_mempolicy), EPERM}, {SCMP_SYS (set_mempolicy), EPERM}, {SCMP_SYS (migrate_pages), EPERM}, /* Don't allow subnamespace setups: */ {SCMP_SYS (unshare), EPERM}, {SCMP_SYS (setns), EPERM}, {SCMP_SYS (mount), EPERM}, {SCMP_SYS (umount), EPERM}, {SCMP_SYS (umount2), EPERM}, {SCMP_SYS (pivot_root), EPERM}, #if defined(__s390__) || defined(__s390x__) || defined(__CRIS__) /* Architectures with CONFIG_CLONE_BACKWARDS2: the child stack * and flags arguments are reversed so the flags come second */ {SCMP_SYS (clone), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)}, #else /* Normally the flags come first */ {SCMP_SYS (clone), EPERM, &SCMP_A0 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)}, #endif /* Don't allow faking input to the controlling tty (CVE-2017-5226) */ {SCMP_SYS (ioctl), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, 0xFFFFFFFFu, (int) TIOCSTI)}, /* seccomp can't look into clone3()'s struct clone_args to check whether * the flags are OK, so we have no choice but to block clone3(). * Return ENOSYS so user-space will fall back to clone(). * (GHSA-67h7-w3jq-vh4q; see also https://github.com/moby/moby/commit/9f6b562d) */ {SCMP_SYS (clone3), ENOSYS}, /* New mount manipulation APIs can also change our VFS. There's no * legitimate reason to do these in the sandbox, so block all of them * rather than thinking about which ones might be dangerous. * (GHSA-67h7-w3jq-vh4q) */ {SCMP_SYS (open_tree), ENOSYS}, {SCMP_SYS (move_mount), ENOSYS}, {SCMP_SYS (fsopen), ENOSYS}, {SCMP_SYS (fsconfig), ENOSYS}, {SCMP_SYS (fsmount), ENOSYS}, {SCMP_SYS (fspick), ENOSYS}, {SCMP_SYS (mount_setattr), ENOSYS}, }; struct { int scall; int errnum; struct scmp_arg_cmp *arg; } syscall_nondevel_blocklist[] = { /* Profiling operations; we expect these to be done by tools from outside * the sandbox. In particular perf has been the source of many CVEs. */ {SCMP_SYS (perf_event_open), EPERM}, /* Don't allow you to switch to bsd emulation or whatnot */ {SCMP_SYS (personality), EPERM, &SCMP_A0 (SCMP_CMP_NE, allowed_personality)}, {SCMP_SYS (ptrace), EPERM} }; /* Blocklist all but unix, inet, inet6 and netlink */ struct { int family; FlatpakRunFlags flags_mask; } socket_family_allowlist[] = { /* NOTE: Keep in numerical order */ { AF_UNSPEC, 0 }, { AF_LOCAL, 0 }, { AF_INET, 0 }, { AF_INET6, 0 }, { AF_NETLINK, 0 }, { AF_CAN, FLATPAK_RUN_FLAG_CANBUS }, { AF_BLUETOOTH, FLATPAK_RUN_FLAG_BLUETOOTH }, }; int last_allowed_family; int i, r; g_auto(GLnxTmpfile) seccomp_tmpf = { 0, }; seccomp = seccomp_init (SCMP_ACT_ALLOW); if (!seccomp) return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Initialize seccomp failed")); if (arch != NULL) { uint32_t arch_id = 0; const uint32_t *extra_arches = NULL; if (strcmp (arch, "i386") == 0) { arch_id = SCMP_ARCH_X86; } else if (strcmp (arch, "x86_64") == 0) { arch_id = SCMP_ARCH_X86_64; extra_arches = seccomp_x86_64_extra_arches; } else if (strcmp (arch, "arm") == 0) { arch_id = SCMP_ARCH_ARM; } #ifdef SCMP_ARCH_AARCH64 else if (strcmp (arch, "aarch64") == 0) { arch_id = SCMP_ARCH_AARCH64; extra_arches = seccomp_aarch64_extra_arches; } #endif /* We only really need to handle arches on multiarch systems. * If only one arch is supported the default is fine */ if (arch_id != 0) { /* This *adds* the target arch, instead of replacing the native one. This is not ideal, because we'd like to only allow the target arch, but we can't really disallow the native arch at this point, because then bubblewrap couldn't continue running. */ r = seccomp_arch_add (seccomp, arch_id); if (r < 0 && r != -EEXIST) return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add architecture to seccomp filter")); if (multiarch && extra_arches != NULL) { for (i = 0; extra_arches[i] != 0; i++) { r = seccomp_arch_add (seccomp, extra_arches[i]); if (r < 0 && r != -EEXIST) return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add multiarch architecture to seccomp filter")); } } } } /* TODO: Should we filter the kernel keyring syscalls in some way? * We do want them to be used by desktop apps, but they could also perhaps * leak system stuff or secrets from other apps. */ for (i = 0; i < G_N_ELEMENTS (syscall_blocklist); i++) { int scall = syscall_blocklist[i].scall; int errnum = syscall_blocklist[i].errnum; g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE); if (syscall_blocklist[i].arg) r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_blocklist[i].arg); else r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0); if (r < 0 && r == -EFAULT /* unknown syscall */) return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall); } if (!devel) { for (i = 0; i < G_N_ELEMENTS (syscall_nondevel_blocklist); i++) { int scall = syscall_nondevel_blocklist[i].scall; int errnum = syscall_nondevel_blocklist[i].errnum; g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE); if (syscall_nondevel_blocklist[i].arg) r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_nondevel_blocklist[i].arg); else r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0); if (r < 0 && r == -EFAULT /* unknown syscall */) return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall); } } /* Socket filtering doesn't work on e.g. i386, so ignore failures here * However, we need to user seccomp_rule_add_exact to avoid libseccomp doing * something else: https://github.com/seccomp/libseccomp/issues/8 */ last_allowed_family = -1; for (i = 0; i < G_N_ELEMENTS (socket_family_allowlist); i++) { int family = socket_family_allowlist[i].family; int disallowed; if (socket_family_allowlist[i].flags_mask != 0 && (socket_family_allowlist[i].flags_mask & run_flags) != socket_family_allowlist[i].flags_mask) continue; for (disallowed = last_allowed_family + 1; disallowed < family; disallowed++) { /* Blocklist the in-between valid families */ seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_EQ, disallowed)); } last_allowed_family = family; } /* Blocklist the rest */ seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_GE, last_allowed_family + 1)); if (!glnx_open_anonymous_tmpfile_full (O_RDWR | O_CLOEXEC, "/tmp", &seccomp_tmpf, error)) return FALSE; if (seccomp_export_bpf (seccomp, seccomp_tmpf.fd) != 0) return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to export bpf")); lseek (seccomp_tmpf.fd, 0, SEEK_SET); flatpak_bwrap_add_args_data_fd (bwrap, "--seccomp", glnx_steal_fd (&seccomp_tmpf.fd), NULL); return TRUE; }
116661486604620809625071911593237669795
flatpak-run.c
32398709380082441128978861691951488575
CWE-20
CVE-2021-41133
Flatpak is a system for building, distributing, and running sandboxed desktop applications on Linux. In versions prior to 1.10.4 and 1.12.0, Flatpak apps with direct access to AF_UNIX sockets such as those used by Wayland, Pipewire or pipewire-pulse can trick portals and other host-OS services into treating the Flatpak app as though it was an ordinary, non-sandboxed host-OS process. They can do this by manipulating the VFS using recent mount-related syscalls that are not blocked by Flatpak's denylist seccomp filter, in order to substitute a crafted `/.flatpak-info` or make that file disappear entirely. Flatpak apps that act as clients for AF_UNIX sockets such as those used by Wayland, Pipewire or pipewire-pulse can escalate the privileges that the corresponding services will believe the Flatpak app has. Note that protocols that operate entirely over the D-Bus session bus (user bus), system bus or accessibility bus are not affected by this. This is due to the use of a proxy process `xdg-dbus-proxy`, whose VFS cannot be manipulated by the Flatpak app, when interacting with these buses. Patches exist for versions 1.10.4 and 1.12.0, and as of time of publication, a patch for version 1.8.2 is being planned. There are no workarounds aside from upgrading to a patched version.
https://nvd.nist.gov/vuln/detail/CVE-2021-41133
224,277
flatpak
462fca2c666e0cd2b60d6d2593a7216a83047aaf
https://github.com/flatpak/flatpak
https://github.com/flatpak/flatpak/commit/462fca2c666e0cd2b60d6d2593a7216a83047aaf
run: Don't allow chroot() If we don't allow pivot_root() then there seems no reason why we should allow chroot(). Partially fixes GHSA-67h7-w3jq-vh4q. Signed-off-by: Simon McVittie <smcv@collabora.com>
0
setup_seccomp (FlatpakBwrap *bwrap, const char *arch, gulong allowed_personality, FlatpakRunFlags run_flags, GError **error) { gboolean multiarch = (run_flags & FLATPAK_RUN_FLAG_MULTIARCH) != 0; gboolean devel = (run_flags & FLATPAK_RUN_FLAG_DEVEL) != 0; __attribute__((cleanup (cleanup_seccomp))) scmp_filter_ctx seccomp = NULL; /**** BEGIN NOTE ON CODE SHARING * * There are today a number of different Linux container * implementations. That will likely continue for long into the * future. But we can still try to share code, and it's important * to do so because it affects what library and application writers * can do, and we should support code portability between different * container tools. * * This syscall blocklist is copied from linux-user-chroot, which was in turn * clearly influenced by the Sandstorm.io blocklist. * * If you make any changes here, I suggest sending the changes along * to other sandbox maintainers. Using the libseccomp list is also * an appropriate venue: * https://groups.google.com/forum/#!forum/libseccomp * * A non-exhaustive list of links to container tooling that might * want to share this blocklist: * * https://github.com/sandstorm-io/sandstorm * in src/sandstorm/supervisor.c++ * https://github.com/flatpak/flatpak.git * in common/flatpak-run.c * https://git.gnome.org/browse/linux-user-chroot * in src/setup-seccomp.c * * Other useful resources: * https://github.com/systemd/systemd/blob/HEAD/src/shared/seccomp-util.c * https://github.com/moby/moby/blob/HEAD/profiles/seccomp/default.json * **** END NOTE ON CODE SHARING */ struct { int scall; int errnum; struct scmp_arg_cmp *arg; } syscall_blocklist[] = { /* Block dmesg */ {SCMP_SYS (syslog), EPERM}, /* Useless old syscall */ {SCMP_SYS (uselib), EPERM}, /* Don't allow disabling accounting */ {SCMP_SYS (acct), EPERM}, /* 16-bit code is unnecessary in the sandbox, and modify_ldt is a historic source of interesting information leaks. */ {SCMP_SYS (modify_ldt), EPERM}, /* Don't allow reading current quota use */ {SCMP_SYS (quotactl), EPERM}, /* Don't allow access to the kernel keyring */ {SCMP_SYS (add_key), EPERM}, {SCMP_SYS (keyctl), EPERM}, {SCMP_SYS (request_key), EPERM}, /* Scary VM/NUMA ops */ {SCMP_SYS (move_pages), EPERM}, {SCMP_SYS (mbind), EPERM}, {SCMP_SYS (get_mempolicy), EPERM}, {SCMP_SYS (set_mempolicy), EPERM}, {SCMP_SYS (migrate_pages), EPERM}, /* Don't allow subnamespace setups: */ {SCMP_SYS (unshare), EPERM}, {SCMP_SYS (setns), EPERM}, {SCMP_SYS (mount), EPERM}, {SCMP_SYS (umount), EPERM}, {SCMP_SYS (umount2), EPERM}, {SCMP_SYS (pivot_root), EPERM}, {SCMP_SYS (chroot), EPERM}, #if defined(__s390__) || defined(__s390x__) || defined(__CRIS__) /* Architectures with CONFIG_CLONE_BACKWARDS2: the child stack * and flags arguments are reversed so the flags come second */ {SCMP_SYS (clone), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)}, #else /* Normally the flags come first */ {SCMP_SYS (clone), EPERM, &SCMP_A0 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)}, #endif /* Don't allow faking input to the controlling tty (CVE-2017-5226) */ {SCMP_SYS (ioctl), EPERM, &SCMP_A1 (SCMP_CMP_MASKED_EQ, 0xFFFFFFFFu, (int) TIOCSTI)}, /* seccomp can't look into clone3()'s struct clone_args to check whether * the flags are OK, so we have no choice but to block clone3(). * Return ENOSYS so user-space will fall back to clone(). * (GHSA-67h7-w3jq-vh4q; see also https://github.com/moby/moby/commit/9f6b562d) */ {SCMP_SYS (clone3), ENOSYS}, /* New mount manipulation APIs can also change our VFS. There's no * legitimate reason to do these in the sandbox, so block all of them * rather than thinking about which ones might be dangerous. * (GHSA-67h7-w3jq-vh4q) */ {SCMP_SYS (open_tree), ENOSYS}, {SCMP_SYS (move_mount), ENOSYS}, {SCMP_SYS (fsopen), ENOSYS}, {SCMP_SYS (fsconfig), ENOSYS}, {SCMP_SYS (fsmount), ENOSYS}, {SCMP_SYS (fspick), ENOSYS}, {SCMP_SYS (mount_setattr), ENOSYS}, }; struct { int scall; int errnum; struct scmp_arg_cmp *arg; } syscall_nondevel_blocklist[] = { /* Profiling operations; we expect these to be done by tools from outside * the sandbox. In particular perf has been the source of many CVEs. */ {SCMP_SYS (perf_event_open), EPERM}, /* Don't allow you to switch to bsd emulation or whatnot */ {SCMP_SYS (personality), EPERM, &SCMP_A0 (SCMP_CMP_NE, allowed_personality)}, {SCMP_SYS (ptrace), EPERM} }; /* Blocklist all but unix, inet, inet6 and netlink */ struct { int family; FlatpakRunFlags flags_mask; } socket_family_allowlist[] = { /* NOTE: Keep in numerical order */ { AF_UNSPEC, 0 }, { AF_LOCAL, 0 }, { AF_INET, 0 }, { AF_INET6, 0 }, { AF_NETLINK, 0 }, { AF_CAN, FLATPAK_RUN_FLAG_CANBUS }, { AF_BLUETOOTH, FLATPAK_RUN_FLAG_BLUETOOTH }, }; int last_allowed_family; int i, r; g_auto(GLnxTmpfile) seccomp_tmpf = { 0, }; seccomp = seccomp_init (SCMP_ACT_ALLOW); if (!seccomp) return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Initialize seccomp failed")); if (arch != NULL) { uint32_t arch_id = 0; const uint32_t *extra_arches = NULL; if (strcmp (arch, "i386") == 0) { arch_id = SCMP_ARCH_X86; } else if (strcmp (arch, "x86_64") == 0) { arch_id = SCMP_ARCH_X86_64; extra_arches = seccomp_x86_64_extra_arches; } else if (strcmp (arch, "arm") == 0) { arch_id = SCMP_ARCH_ARM; } #ifdef SCMP_ARCH_AARCH64 else if (strcmp (arch, "aarch64") == 0) { arch_id = SCMP_ARCH_AARCH64; extra_arches = seccomp_aarch64_extra_arches; } #endif /* We only really need to handle arches on multiarch systems. * If only one arch is supported the default is fine */ if (arch_id != 0) { /* This *adds* the target arch, instead of replacing the native one. This is not ideal, because we'd like to only allow the target arch, but we can't really disallow the native arch at this point, because then bubblewrap couldn't continue running. */ r = seccomp_arch_add (seccomp, arch_id); if (r < 0 && r != -EEXIST) return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add architecture to seccomp filter")); if (multiarch && extra_arches != NULL) { for (i = 0; extra_arches[i] != 0; i++) { r = seccomp_arch_add (seccomp, extra_arches[i]); if (r < 0 && r != -EEXIST) return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add multiarch architecture to seccomp filter")); } } } } /* TODO: Should we filter the kernel keyring syscalls in some way? * We do want them to be used by desktop apps, but they could also perhaps * leak system stuff or secrets from other apps. */ for (i = 0; i < G_N_ELEMENTS (syscall_blocklist); i++) { int scall = syscall_blocklist[i].scall; int errnum = syscall_blocklist[i].errnum; g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE); if (syscall_blocklist[i].arg) r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_blocklist[i].arg); else r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0); if (r < 0 && r == -EFAULT /* unknown syscall */) return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall); } if (!devel) { for (i = 0; i < G_N_ELEMENTS (syscall_nondevel_blocklist); i++) { int scall = syscall_nondevel_blocklist[i].scall; int errnum = syscall_nondevel_blocklist[i].errnum; g_return_val_if_fail (errnum == EPERM || errnum == ENOSYS, FALSE); if (syscall_nondevel_blocklist[i].arg) r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 1, *syscall_nondevel_blocklist[i].arg); else r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (errnum), scall, 0); if (r < 0 && r == -EFAULT /* unknown syscall */) return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall); } } /* Socket filtering doesn't work on e.g. i386, so ignore failures here * However, we need to user seccomp_rule_add_exact to avoid libseccomp doing * something else: https://github.com/seccomp/libseccomp/issues/8 */ last_allowed_family = -1; for (i = 0; i < G_N_ELEMENTS (socket_family_allowlist); i++) { int family = socket_family_allowlist[i].family; int disallowed; if (socket_family_allowlist[i].flags_mask != 0 && (socket_family_allowlist[i].flags_mask & run_flags) != socket_family_allowlist[i].flags_mask) continue; for (disallowed = last_allowed_family + 1; disallowed < family; disallowed++) { /* Blocklist the in-between valid families */ seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_EQ, disallowed)); } last_allowed_family = family; } /* Blocklist the rest */ seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_GE, last_allowed_family + 1)); if (!glnx_open_anonymous_tmpfile_full (O_RDWR | O_CLOEXEC, "/tmp", &seccomp_tmpf, error)) return FALSE; if (seccomp_export_bpf (seccomp, seccomp_tmpf.fd) != 0) return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to export bpf")); lseek (seccomp_tmpf.fd, 0, SEEK_SET); flatpak_bwrap_add_args_data_fd (bwrap, "--seccomp", glnx_steal_fd (&seccomp_tmpf.fd), NULL); return TRUE; }
98799963199923512278205367577377591800
flatpak-run.c
147844647821640300549119412024985340412
CWE-20
CVE-2021-41133
Flatpak is a system for building, distributing, and running sandboxed desktop applications on Linux. In versions prior to 1.10.4 and 1.12.0, Flatpak apps with direct access to AF_UNIX sockets such as those used by Wayland, Pipewire or pipewire-pulse can trick portals and other host-OS services into treating the Flatpak app as though it was an ordinary, non-sandboxed host-OS process. They can do this by manipulating the VFS using recent mount-related syscalls that are not blocked by Flatpak's denylist seccomp filter, in order to substitute a crafted `/.flatpak-info` or make that file disappear entirely. Flatpak apps that act as clients for AF_UNIX sockets such as those used by Wayland, Pipewire or pipewire-pulse can escalate the privileges that the corresponding services will believe the Flatpak app has. Note that protocols that operate entirely over the D-Bus session bus (user bus), system bus or accessibility bus are not affected by this. This is due to the use of a proxy process `xdg-dbus-proxy`, whose VFS cannot be manipulated by the Flatpak app, when interacting with these buses. Patches exist for versions 1.10.4 and 1.12.0, and as of time of publication, a patch for version 1.8.2 is being planned. There are no workarounds aside from upgrading to a patched version.
https://nvd.nist.gov/vuln/detail/CVE-2021-41133
195,331
tensorflow
08d7b00c0a5a20926363849f611729f53f3ec022
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/08d7b00c0a5a20926363849f611729f53f3ec022
Fix Segfault in Concat V2 shape function. PiperOrigin-RevId: 412120654 Change-Id: I3ff915faea694f9ad8b00024e9af2de9909011be
1
Status ConcatShapeHelper(InferenceContext* c, int start_value_index, int end_value_index, int dim_index) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(dim_index), 0, &unused)); const Tensor* concat_dim_t = c->input_tensor(dim_index); if (concat_dim_t == nullptr) { // Return an unknown shape with same rank as inputs, or an unknown rank // if no input's rank is known. // Find rank. int32_t rank = InferenceContext::kUnknownRank; for (int i = start_value_index; i < end_value_index; ++i) { if (rank == InferenceContext::kUnknownRank) rank = c->Rank(c->input(i)); if (rank != InferenceContext::kUnknownRank) { break; } } if (rank == InferenceContext::kUnknownRank) { c->set_output(0, c->UnknownShape()); return Status::OK(); } else if (rank == 0) { return errors::InvalidArgument( "Can't concatenate scalars (use tf.stack instead)"); } else { for (int i = start_value_index; i < end_value_index; ++i) { // Check that all the inputs are of the correct rank. TF_RETURN_IF_ERROR(c->WithRank(c->input(i), rank, &unused)); } } // Build result of <rank> different unknown dims. std::vector<DimensionHandle> dims; dims.reserve(rank); for (int i = 0; i < rank; ++i) dims.push_back(c->UnknownDim()); c->set_output(0, c->MakeShape(dims)); return Status::OK(); } // Merge all the non-concat dims, and sum the concat dim to make an output // shape. int64_t concat_dim; if (concat_dim_t->dtype() == DT_INT32) { concat_dim = static_cast<int64_t>(concat_dim_t->flat<int32>()(0)); } else { concat_dim = concat_dim_t->flat<int64_t>()(0); } // Minimum required number of dimensions. const int min_rank = concat_dim < 0 ? -concat_dim : concat_dim + 1; ShapeHandle output_before; ShapeHandle output_after; ShapeHandle input = c->input(end_value_index - 1); TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input)); TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &output_before)); DimensionHandle output_middle = c->Dim(input, concat_dim); if (concat_dim == -1) { output_after = c->Scalar(); // no dimensions. } else { TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &output_after)); } for (int i = end_value_index - 2; i >= start_value_index; --i) { ShapeHandle before; ShapeHandle after; input = c->input(i); TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input)); TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &before)); DimensionHandle middle = c->Dim(input, concat_dim); if (concat_dim == -1) { after = c->Scalar(); } else { TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &after)); } TF_RETURN_IF_ERROR(c->Merge(before, output_before, &output_before)); TF_RETURN_IF_ERROR(c->Add(output_middle, middle, &output_middle)); TF_RETURN_IF_ERROR(c->Merge(after, output_after, &output_after)); } ShapeHandle s; TF_RETURN_IF_ERROR( c->Concatenate(output_before, c->Vector(output_middle), &s)); TF_RETURN_IF_ERROR(c->Concatenate(s, output_after, &s)); c->set_output(0, s); return Status::OK(); }
115004012549325804010611397133680502113
common_shape_fns.cc
114394888048780454732842913577124501919
CWE-843
CVE-2022-21731
Tensorflow is an Open Source Machine Learning Framework. The implementation of shape inference for `ConcatV2` can be used to trigger a denial of service attack via a segfault caused by a type confusion. The `axis` argument is translated into `concat_dim` in the `ConcatShapeHelper` helper function. Then, a value for `min_rank` is computed based on `concat_dim`. This is then used to validate that the `values` tensor has at least the required rank. However, `WithRankAtLeast` receives the lower bound as a 64-bits value and then compares it against the maximum 32-bits integer value that could be represented. Due to the fact that `min_rank` is a 32-bits value and the value of `axis`, the `rank` argument is a negative value, so the error check is bypassed. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-21731
224,570
tensorflow
08d7b00c0a5a20926363849f611729f53f3ec022
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/08d7b00c0a5a20926363849f611729f53f3ec022
Fix Segfault in Concat V2 shape function. PiperOrigin-RevId: 412120654 Change-Id: I3ff915faea694f9ad8b00024e9af2de9909011be
0
Status ConcatShapeHelper(InferenceContext* c, int start_value_index, int end_value_index, int dim_index) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(dim_index), 0, &unused)); const Tensor* concat_dim_t = c->input_tensor(dim_index); if (concat_dim_t == nullptr) { // Return an unknown shape with same rank as inputs, or an unknown rank // if no input's rank is known. // Find rank. int32_t rank = InferenceContext::kUnknownRank; for (int i = start_value_index; i < end_value_index; ++i) { if (rank == InferenceContext::kUnknownRank) rank = c->Rank(c->input(i)); if (rank != InferenceContext::kUnknownRank) { break; } } if (rank == InferenceContext::kUnknownRank) { c->set_output(0, c->UnknownShape()); return Status::OK(); } else if (rank == 0) { return errors::InvalidArgument( "Can't concatenate scalars (use tf.stack instead)"); } else { for (int i = start_value_index; i < end_value_index; ++i) { // Check that all the inputs are of the correct rank. TF_RETURN_IF_ERROR(c->WithRank(c->input(i), rank, &unused)); } } // Build result of <rank> different unknown dims. std::vector<DimensionHandle> dims; dims.reserve(rank); for (int i = 0; i < rank; ++i) dims.push_back(c->UnknownDim()); c->set_output(0, c->MakeShape(dims)); return Status::OK(); } // Merge all the non-concat dims, and sum the concat dim to make an output // shape. int64_t concat_dim; if (concat_dim_t->dtype() == DT_INT32) { concat_dim = static_cast<int64_t>(concat_dim_t->flat<int32>()(0)); } else { concat_dim = concat_dim_t->flat<int64_t>()(0); } // Minimum required number of dimensions. const int64 min_rank = concat_dim < 0 ? -concat_dim : concat_dim + 1; ShapeHandle output_before; ShapeHandle output_after; ShapeHandle input = c->input(end_value_index - 1); TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input)); TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &output_before)); DimensionHandle output_middle = c->Dim(input, concat_dim); if (concat_dim == -1) { output_after = c->Scalar(); // no dimensions. } else { TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &output_after)); } for (int i = end_value_index - 2; i >= start_value_index; --i) { ShapeHandle before; ShapeHandle after; input = c->input(i); TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input)); TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &before)); DimensionHandle middle = c->Dim(input, concat_dim); if (concat_dim == -1) { after = c->Scalar(); } else { TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &after)); } TF_RETURN_IF_ERROR(c->Merge(before, output_before, &output_before)); TF_RETURN_IF_ERROR(c->Add(output_middle, middle, &output_middle)); TF_RETURN_IF_ERROR(c->Merge(after, output_after, &output_after)); } ShapeHandle s; TF_RETURN_IF_ERROR( c->Concatenate(output_before, c->Vector(output_middle), &s)); TF_RETURN_IF_ERROR(c->Concatenate(s, output_after, &s)); c->set_output(0, s); return Status::OK(); }
224848617993634630925206364943386826300
common_shape_fns.cc
56524265073448960566855170405566896543
CWE-843
CVE-2022-21731
Tensorflow is an Open Source Machine Learning Framework. The implementation of shape inference for `ConcatV2` can be used to trigger a denial of service attack via a segfault caused by a type confusion. The `axis` argument is translated into `concat_dim` in the `ConcatShapeHelper` helper function. Then, a value for `min_rank` is computed based on `concat_dim`. This is then used to validate that the `values` tensor has at least the required rank. However, `WithRankAtLeast` receives the lower bound as a 64-bits value and then compares it against the maximum 32-bits integer value that could be represented. Due to the fact that `min_rank` is a 32-bits value and the value of `axis`, the `rank` argument is a negative value, so the error check is bypassed. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-21731
195,334
gpac
b03c9f252526bb42fbd1b87b9f5e339c3cf2390a
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/b03c9f252526bb42fbd1b87b9f5e339c3cf2390a
fixed #1890
1
GF_Err iloc_box_read(GF_Box *s, GF_BitStream *bs) { u32 item_count, extent_count, i, j; GF_ItemLocationBox *ptr = (GF_ItemLocationBox *)s; ISOM_DECREASE_SIZE(ptr, 2) ptr->offset_size = gf_bs_read_int(bs, 4); ptr->length_size = gf_bs_read_int(bs, 4); ptr->base_offset_size = gf_bs_read_int(bs, 4); if (ptr->version == 1 || ptr->version == 2) { ptr->index_size = gf_bs_read_int(bs, 4); } else { gf_bs_read_int(bs, 4); } if (ptr->version < 2) { ISOM_DECREASE_SIZE(ptr, 2) item_count = gf_bs_read_u16(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) item_count = gf_bs_read_u32(bs); } for (i = 0; i < item_count; i++) { GF_ItemLocationEntry *location_entry = (GF_ItemLocationEntry *)gf_malloc(sizeof(GF_ItemLocationEntry)); if (!location_entry) return GF_OUT_OF_MEM; gf_list_add(ptr->location_entries, location_entry); if (ptr->version < 2) { ISOM_DECREASE_SIZE(ptr, 2) location_entry->item_ID = gf_bs_read_u16(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) location_entry->item_ID = gf_bs_read_u32(bs); } if (ptr->version == 1 || ptr->version == 2) { ISOM_DECREASE_SIZE(ptr, 2) location_entry->construction_method = gf_bs_read_u16(bs); } else { location_entry->construction_method = 0; } ISOM_DECREASE_SIZE(ptr, (2 + ptr->base_offset_size) ) location_entry->data_reference_index = gf_bs_read_u16(bs); location_entry->base_offset = gf_bs_read_int(bs, 8*ptr->base_offset_size); #ifndef GPAC_DISABLE_ISOM_WRITE location_entry->original_base_offset = location_entry->base_offset; #endif ISOM_DECREASE_SIZE(ptr, 2) extent_count = gf_bs_read_u16(bs); location_entry->extent_entries = gf_list_new(); for (j = 0; j < extent_count; j++) { GF_ItemExtentEntry *extent_entry = (GF_ItemExtentEntry *)gf_malloc(sizeof(GF_ItemExtentEntry)); if (!extent_entry) return GF_OUT_OF_MEM; gf_list_add(location_entry->extent_entries, extent_entry); if ((ptr->version == 1 || ptr->version == 2) && ptr->index_size > 0) { ISOM_DECREASE_SIZE(ptr, ptr->index_size) extent_entry->extent_index = gf_bs_read_int(bs, 8 * ptr->index_size); } else { extent_entry->extent_index = 0; } ISOM_DECREASE_SIZE(ptr, (ptr->offset_size+ptr->length_size) ) extent_entry->extent_offset = gf_bs_read_int(bs, 8*ptr->offset_size); extent_entry->extent_length = gf_bs_read_int(bs, 8*ptr->length_size); #ifndef GPAC_DISABLE_ISOM_WRITE extent_entry->original_extent_offset = extent_entry->extent_offset; #endif } } return GF_OK; }
85275035202223574859308673912965262169
box_code_meta.c
315220373545459860670428553876078791185
CWE-415
CVE-2021-40573
The binary MP4Box in Gpac 1.0.1 has a double-free vulnerability in the gf_list_del function in list.c, which allows attackers to cause a denial of service.
https://nvd.nist.gov/vuln/detail/CVE-2021-40573
224,728
gpac
b03c9f252526bb42fbd1b87b9f5e339c3cf2390a
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/b03c9f252526bb42fbd1b87b9f5e339c3cf2390a
fixed #1890
0
GF_Err iloc_box_read(GF_Box *s, GF_BitStream *bs) { u32 item_count, extent_count, i, j; GF_ItemLocationBox *ptr = (GF_ItemLocationBox *)s; ISOM_DECREASE_SIZE(ptr, 2) ptr->offset_size = gf_bs_read_int(bs, 4); ptr->length_size = gf_bs_read_int(bs, 4); ptr->base_offset_size = gf_bs_read_int(bs, 4); if (ptr->version == 1 || ptr->version == 2) { ptr->index_size = gf_bs_read_int(bs, 4); } else { gf_bs_read_int(bs, 4); } if (ptr->version < 2) { ISOM_DECREASE_SIZE(ptr, 2) item_count = gf_bs_read_u16(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) item_count = gf_bs_read_u32(bs); } for (i = 0; i < item_count; i++) { GF_ItemLocationEntry *location_entry; GF_SAFEALLOC(location_entry, GF_ItemLocationEntry); if (!location_entry) return GF_OUT_OF_MEM; gf_list_add(ptr->location_entries, location_entry); if (ptr->version < 2) { ISOM_DECREASE_SIZE(ptr, 2) location_entry->item_ID = gf_bs_read_u16(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) location_entry->item_ID = gf_bs_read_u32(bs); } if (ptr->version == 1 || ptr->version == 2) { ISOM_DECREASE_SIZE(ptr, 2) location_entry->construction_method = gf_bs_read_u16(bs); } else { location_entry->construction_method = 0; } ISOM_DECREASE_SIZE(ptr, (2 + ptr->base_offset_size) ) location_entry->data_reference_index = gf_bs_read_u16(bs); location_entry->base_offset = gf_bs_read_int(bs, 8*ptr->base_offset_size); #ifndef GPAC_DISABLE_ISOM_WRITE location_entry->original_base_offset = location_entry->base_offset; #endif ISOM_DECREASE_SIZE(ptr, 2) extent_count = gf_bs_read_u16(bs); location_entry->extent_entries = gf_list_new(); for (j = 0; j < extent_count; j++) { GF_ItemExtentEntry *extent_entry; GF_SAFEALLOC(extent_entry, GF_ItemExtentEntry); if (!extent_entry) return GF_OUT_OF_MEM; gf_list_add(location_entry->extent_entries, extent_entry); if ((ptr->version == 1 || ptr->version == 2) && ptr->index_size > 0) { ISOM_DECREASE_SIZE(ptr, ptr->index_size) extent_entry->extent_index = gf_bs_read_int(bs, 8 * ptr->index_size); } else { extent_entry->extent_index = 0; } ISOM_DECREASE_SIZE(ptr, (ptr->offset_size+ptr->length_size) ) extent_entry->extent_offset = gf_bs_read_int(bs, 8*ptr->offset_size); extent_entry->extent_length = gf_bs_read_int(bs, 8*ptr->length_size); #ifndef GPAC_DISABLE_ISOM_WRITE extent_entry->original_extent_offset = extent_entry->extent_offset; #endif } } return GF_OK; }
326603429121921341428150431158212875495
box_code_meta.c
99521214022509816524954281629288020612
CWE-415
CVE-2021-40573
The binary MP4Box in Gpac 1.0.1 has a double-free vulnerability in the gf_list_del function in list.c, which allows attackers to cause a denial of service.
https://nvd.nist.gov/vuln/detail/CVE-2021-40573
195,343
tensorflow
002408c3696b173863228223d535f9de72a101a9
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/002408c3696b173863228223d535f9de72a101a9
Add negative bound check for row and column pooling_sequence in FractionalAvgPoolGrad op to avoid out of bound heap access PiperOrigin-RevId: 413837346 Change-Id: I2b86034101df31bee161abcb781755e236c7bccd
1
void Compute(OpKernelContext* context) override { // Here's the basic idea: // Batch and depth dimension are independent from row and col dimension. And // because FractionalAvgPool currently only support pooling along row and // col, we can basically think of this 4D tensor backpropagation as // operation of a series of 2D planes. // // For each element of a 'slice' (2D plane) of output_backprop, we need to // figure out its contributors when doing FractionalAvgPool operation. This // can be done based on row_pooling_sequence, col_pooling_seq and // overlapping. // Once we figure out the original contributors, we just need to evenly // divide the value of this element among these contributors. // // Internally, we divide the out_backprop tensor and store it in a temporary // tensor of double type. And cast it to the corresponding type. typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>> EigenDoubleMatrixMap; // Grab the inputs. const Tensor& orig_input_tensor_shape = context->input(0); OP_REQUIRES(context, orig_input_tensor_shape.dims() == 1 && orig_input_tensor_shape.NumElements() == 4, errors::InvalidArgument("original input tensor shape must be" "1-dimensional and 4 elements")); const Tensor& out_backprop = context->input(1); const Tensor& row_seq_tensor = context->input(2); const Tensor& col_seq_tensor = context->input(3); const int64_t out_batch = out_backprop.dim_size(0); const int64_t out_rows = out_backprop.dim_size(1); const int64_t out_cols = out_backprop.dim_size(2); const int64_t out_depth = out_backprop.dim_size(3); OP_REQUIRES(context, row_seq_tensor.NumElements() > out_rows, errors::InvalidArgument("Given out_backprop shape ", out_backprop.shape().DebugString(), ", row_seq_tensor must have at least ", out_rows + 1, " elements, but got ", row_seq_tensor.NumElements())); OP_REQUIRES(context, col_seq_tensor.NumElements() > out_cols, errors::InvalidArgument("Given out_backprop shape ", out_backprop.shape().DebugString(), ", col_seq_tensor must have at least ", out_cols + 1, " elements, but got ", col_seq_tensor.NumElements())); auto row_seq_tensor_flat = row_seq_tensor.flat<int64_t>(); auto col_seq_tensor_flat = col_seq_tensor.flat<int64_t>(); auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat<int64_t>(); const int64_t in_batch = orig_input_tensor_shape_flat(0); const int64_t in_rows = orig_input_tensor_shape_flat(1); const int64_t in_cols = orig_input_tensor_shape_flat(2); const int64_t in_depth = orig_input_tensor_shape_flat(3); OP_REQUIRES( context, in_batch != 0, errors::InvalidArgument("Batch dimension of input must not be 0")); OP_REQUIRES( context, in_rows != 0, errors::InvalidArgument("Rows dimension of input must not be 0")); OP_REQUIRES( context, in_cols != 0, errors::InvalidArgument("Columns dimension of input must not be 0")); OP_REQUIRES( context, in_depth != 0, errors::InvalidArgument("Depth dimension of input must not be 0")); constexpr int tensor_in_and_out_dims = 4; // Transform orig_input_tensor_shape into TensorShape TensorShape in_shape; for (auto i = 0; i < tensor_in_and_out_dims; ++i) { in_shape.AddDim(orig_input_tensor_shape_flat(i)); } // Create intermediate in_backprop. Tensor in_backprop_tensor_temp; OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp( {0}, DataTypeToEnum<double>::v(), in_shape, &in_backprop_tensor_temp)); in_backprop_tensor_temp.flat<double>().setZero(); // Transform 4D tensor to 2D matrix. EigenDoubleMatrixMap in_backprop_tensor_temp_mat( in_backprop_tensor_temp.flat<double>().data(), in_depth, in_cols * in_rows * in_batch); ConstEigenMatrixMap out_backprop_mat(out_backprop.flat<T>().data(), out_depth, out_cols * out_rows * out_batch); // Loop through each element of out_backprop and evenly distribute the // element to the corresponding pooling cell. const int64_t in_max_row_index = in_rows - 1; const int64_t in_max_col_index = in_cols - 1; for (int64_t b = 0; b < out_batch; ++b) { for (int64_t r = 0; r < out_rows; ++r) { const int64_t in_row_start = row_seq_tensor_flat(r); int64_t in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1) : row_seq_tensor_flat(r + 1) - 1; in_row_end = std::min(in_row_end, in_max_row_index); for (int64_t c = 0; c < out_cols; ++c) { const int64_t in_col_start = col_seq_tensor_flat(c); int64_t in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1) : col_seq_tensor_flat(c + 1) - 1; in_col_end = std::min(in_col_end, in_max_col_index); const int64_t num_elements_in_pooling_cell = (in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1); const int64_t out_index = (b * out_rows + r) * out_cols + c; // Now we can evenly distribute out_backprop(b, h, w, *) to // in_backprop(b, hs:he, ws:we, *). for (int64_t in_r = in_row_start; in_r <= in_row_end; ++in_r) { for (int64_t in_c = in_col_start; in_c <= in_col_end; ++in_c) { const int64_t in_index = (b * in_rows + in_r) * in_cols + in_c; // Walk through each channel (depth). for (int64_t d = 0; d < out_depth; ++d) { const double out_backprop_element = static_cast<double>( out_backprop_mat.coeffRef(d, out_index)); double& in_backprop_ref = in_backprop_tensor_temp_mat.coeffRef(d, in_index); in_backprop_ref += out_backprop_element / num_elements_in_pooling_cell; } } } } } } // Depending on the type, cast double to type T. Tensor* in_backprop_tensor = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, in_shape, &in_backprop_tensor)); auto in_backprop_tensor_flat = in_backprop_tensor->flat<T>(); auto in_backprop_tensor_temp_flat = in_backprop_tensor_temp.flat<double>(); for (int64_t i = 0; i < in_backprop_tensor_flat.size(); ++i) { in_backprop_tensor_flat(i) = static_cast<T>(in_backprop_tensor_temp_flat(i)); } }
91555834572386312187860770421206034544
fractional_avg_pool_op.cc
221866619851129952189561551151828727755
CWE-125
CVE-2022-21730
Tensorflow is an Open Source Machine Learning Framework. The implementation of `FractionalAvgPoolGrad` does not consider cases where the input tensors are invalid allowing an attacker to read from outside of bounds of heap. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-21730
224,891
tensorflow
002408c3696b173863228223d535f9de72a101a9
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/002408c3696b173863228223d535f9de72a101a9
Add negative bound check for row and column pooling_sequence in FractionalAvgPoolGrad op to avoid out of bound heap access PiperOrigin-RevId: 413837346 Change-Id: I2b86034101df31bee161abcb781755e236c7bccd
0
void Compute(OpKernelContext* context) override { // Here's the basic idea: // Batch and depth dimension are independent from row and col dimension. And // because FractionalAvgPool currently only support pooling along row and // col, we can basically think of this 4D tensor backpropagation as // operation of a series of 2D planes. // // For each element of a 'slice' (2D plane) of output_backprop, we need to // figure out its contributors when doing FractionalAvgPool operation. This // can be done based on row_pooling_sequence, col_pooling_seq and // overlapping. // Once we figure out the original contributors, we just need to evenly // divide the value of this element among these contributors. // // Internally, we divide the out_backprop tensor and store it in a temporary // tensor of double type. And cast it to the corresponding type. typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>> EigenDoubleMatrixMap; // Grab the inputs. const Tensor& orig_input_tensor_shape = context->input(0); OP_REQUIRES(context, orig_input_tensor_shape.dims() == 1 && orig_input_tensor_shape.NumElements() == 4, errors::InvalidArgument("original input tensor shape must be" "1-dimensional and 4 elements")); const Tensor& out_backprop = context->input(1); const Tensor& row_seq_tensor = context->input(2); const Tensor& col_seq_tensor = context->input(3); const int64_t out_batch = out_backprop.dim_size(0); const int64_t out_rows = out_backprop.dim_size(1); const int64_t out_cols = out_backprop.dim_size(2); const int64_t out_depth = out_backprop.dim_size(3); OP_REQUIRES(context, row_seq_tensor.NumElements() > out_rows, errors::InvalidArgument("Given out_backprop shape ", out_backprop.shape().DebugString(), ", row_seq_tensor must have at least ", out_rows + 1, " elements, but got ", row_seq_tensor.NumElements())); OP_REQUIRES(context, col_seq_tensor.NumElements() > out_cols, errors::InvalidArgument("Given out_backprop shape ", out_backprop.shape().DebugString(), ", col_seq_tensor must have at least ", out_cols + 1, " elements, but got ", col_seq_tensor.NumElements())); auto row_seq_tensor_flat = row_seq_tensor.flat<int64_t>(); auto col_seq_tensor_flat = col_seq_tensor.flat<int64_t>(); auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat<int64_t>(); const int64_t in_batch = orig_input_tensor_shape_flat(0); const int64_t in_rows = orig_input_tensor_shape_flat(1); const int64_t in_cols = orig_input_tensor_shape_flat(2); const int64_t in_depth = orig_input_tensor_shape_flat(3); OP_REQUIRES( context, in_batch != 0, errors::InvalidArgument("Batch dimension of input must not be 0")); OP_REQUIRES( context, in_rows != 0, errors::InvalidArgument("Rows dimension of input must not be 0")); OP_REQUIRES( context, in_cols != 0, errors::InvalidArgument("Columns dimension of input must not be 0")); OP_REQUIRES( context, in_depth != 0, errors::InvalidArgument("Depth dimension of input must not be 0")); constexpr int tensor_in_and_out_dims = 4; // Transform orig_input_tensor_shape into TensorShape TensorShape in_shape; for (auto i = 0; i < tensor_in_and_out_dims; ++i) { in_shape.AddDim(orig_input_tensor_shape_flat(i)); } // Create intermediate in_backprop. Tensor in_backprop_tensor_temp; OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp( {0}, DataTypeToEnum<double>::v(), in_shape, &in_backprop_tensor_temp)); in_backprop_tensor_temp.flat<double>().setZero(); // Transform 4D tensor to 2D matrix. EigenDoubleMatrixMap in_backprop_tensor_temp_mat( in_backprop_tensor_temp.flat<double>().data(), in_depth, in_cols * in_rows * in_batch); ConstEigenMatrixMap out_backprop_mat(out_backprop.flat<T>().data(), out_depth, out_cols * out_rows * out_batch); // Loop through each element of out_backprop and evenly distribute the // element to the corresponding pooling cell. const int64_t in_max_row_index = in_rows - 1; const int64_t in_max_col_index = in_cols - 1; for (int64_t b = 0; b < out_batch; ++b) { for (int64_t r = 0; r < out_rows; ++r) { const int64_t in_row_start = row_seq_tensor_flat(r); int64_t in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1) : row_seq_tensor_flat(r + 1) - 1; in_row_end = std::min(in_row_end, in_max_row_index); OP_REQUIRES(context, in_row_start >= 0 && in_row_end >= 0, errors::InvalidArgument( "Row sequence tensor values must not be negative, got ", row_seq_tensor_flat)); for (int64_t c = 0; c < out_cols; ++c) { const int64_t in_col_start = col_seq_tensor_flat(c); int64_t in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1) : col_seq_tensor_flat(c + 1) - 1; in_col_end = std::min(in_col_end, in_max_col_index); OP_REQUIRES( context, in_col_start >= 0 && in_col_end >= 0, errors::InvalidArgument( "Column sequence tensor values must not be negative, got ", col_seq_tensor_flat)); const int64_t num_elements_in_pooling_cell = (in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1); const int64_t out_index = (b * out_rows + r) * out_cols + c; // Now we can evenly distribute out_backprop(b, h, w, *) to // in_backprop(b, hs:he, ws:we, *). for (int64_t in_r = in_row_start; in_r <= in_row_end; ++in_r) { for (int64_t in_c = in_col_start; in_c <= in_col_end; ++in_c) { const int64_t in_index = (b * in_rows + in_r) * in_cols + in_c; // Walk through each channel (depth). for (int64_t d = 0; d < out_depth; ++d) { const double out_backprop_element = static_cast<double>( out_backprop_mat.coeffRef(d, out_index)); double& in_backprop_ref = in_backprop_tensor_temp_mat.coeffRef(d, in_index); in_backprop_ref += out_backprop_element / num_elements_in_pooling_cell; } } } } } } // Depending on the type, cast double to type T. Tensor* in_backprop_tensor = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, in_shape, &in_backprop_tensor)); auto in_backprop_tensor_flat = in_backprop_tensor->flat<T>(); auto in_backprop_tensor_temp_flat = in_backprop_tensor_temp.flat<double>(); for (int64_t i = 0; i < in_backprop_tensor_flat.size(); ++i) { in_backprop_tensor_flat(i) = static_cast<T>(in_backprop_tensor_temp_flat(i)); } }
7916647560171328705762237734439424087
fractional_avg_pool_op.cc
273616659727040190050886150342189772450
CWE-125
CVE-2022-21730
Tensorflow is an Open Source Machine Learning Framework. The implementation of `FractionalAvgPoolGrad` does not consider cases where the input tensors are invalid allowing an attacker to read from outside of bounds of heap. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-21730
195,385
flatpak
65cbfac982cb1c83993a9e19aa424daee8e9f042
https://github.com/flatpak/flatpak
https://github.com/flatpak/flatpak/commit/65cbfac982cb1c83993a9e19aa424daee8e9f042
Ensure that bundles have metadata on install If we have a bundle without metadata we wouldn't properly present the permissions in the transaction.
1
flatpak_dir_ensure_bundle_remote (FlatpakDir *self, GFile *file, GBytes *extra_gpg_data, FlatpakDecomposed **out_ref, char **out_checksum, char **out_metadata, gboolean *out_created_remote, GCancellable *cancellable, GError **error) { g_autoptr(FlatpakDecomposed) ref = NULL; gboolean created_remote = FALSE; g_autoptr(GBytes) deploy_data = NULL; g_autoptr(GVariant) metadata = NULL; g_autofree char *origin = NULL; g_autofree char *fp_metadata = NULL; g_autofree char *basename = NULL; g_autoptr(GBytes) included_gpg_data = NULL; GBytes *gpg_data = NULL; g_autofree char *to_checksum = NULL; g_autofree char *remote = NULL; g_autofree char *collection_id = NULL; if (!flatpak_dir_ensure_repo (self, cancellable, error)) return NULL; metadata = flatpak_bundle_load (file, &to_checksum, &ref, &origin, NULL, &fp_metadata, NULL, &included_gpg_data, &collection_id, error); if (metadata == NULL) return NULL; gpg_data = extra_gpg_data ? extra_gpg_data : included_gpg_data; deploy_data = flatpak_dir_get_deploy_data (self, ref, FLATPAK_DEPLOY_VERSION_ANY, cancellable, NULL); if (deploy_data != NULL) { remote = g_strdup (flatpak_deploy_data_get_origin (deploy_data)); /* We need to import any gpg keys because otherwise the pull will fail */ if (gpg_data != NULL) { g_autoptr(GKeyFile) new_config = NULL; new_config = ostree_repo_copy_config (flatpak_dir_get_repo (self)); if (!flatpak_dir_modify_remote (self, remote, new_config, gpg_data, cancellable, error)) return NULL; } } else { g_autofree char *id = flatpak_decomposed_dup_id (ref); /* Add a remote for later updates */ basename = g_file_get_basename (file); remote = flatpak_dir_create_origin_remote (self, origin, id, basename, flatpak_decomposed_get_ref (ref), gpg_data, collection_id, &created_remote, cancellable, error); if (remote == NULL) return NULL; } if (out_created_remote) *out_created_remote = created_remote; if (out_ref) *out_ref = g_steal_pointer (&ref); if (out_checksum) *out_checksum = g_steal_pointer (&to_checksum); if (out_metadata) *out_metadata = g_steal_pointer (&fp_metadata); return g_steal_pointer (&remote); }
117751554146896350574194025697057651898
flatpak-dir.c
41005800026546918810123079124181990480
CWE-276
CVE-2021-43860
Flatpak is a Linux application sandboxing and distribution framework. Prior to versions 1.12.3 and 1.10.6, Flatpak doesn't properly validate that the permissions displayed to the user for an app at install time match the actual permissions granted to the app at runtime, in the case that there's a null byte in the metadata file of an app. Therefore apps can grant themselves permissions without the consent of the user. Flatpak shows permissions to the user during install by reading them from the "xa.metadata" key in the commit metadata. This cannot contain a null terminator, because it is an untrusted GVariant. Flatpak compares these permissions to the *actual* metadata, from the "metadata" file to ensure it wasn't lied to. However, the actual metadata contents are loaded in several places where they are read as simple C-style strings. That means that, if the metadata file includes a null terminator, only the content of the file from *before* the terminator gets compared to xa.metadata. Thus, any permissions that appear in the metadata file after a null terminator are applied at runtime but not shown to the user. So maliciously crafted apps can give themselves hidden permissions. Users who have Flatpaks installed from untrusted sources are at risk in case the Flatpak has a maliciously crafted metadata file, either initially or in an update. This issue is patched in versions 1.12.3 and 1.10.6. As a workaround, users can manually check the permissions of installed apps by checking the metadata file or the xa.metadata key on the commit metadata.
https://nvd.nist.gov/vuln/detail/CVE-2021-43860
224,964
flatpak
65cbfac982cb1c83993a9e19aa424daee8e9f042
https://github.com/flatpak/flatpak
https://github.com/flatpak/flatpak/commit/65cbfac982cb1c83993a9e19aa424daee8e9f042
Ensure that bundles have metadata on install If we have a bundle without metadata we wouldn't properly present the permissions in the transaction.
0
flatpak_dir_ensure_bundle_remote (FlatpakDir *self, GFile *file, GBytes *extra_gpg_data, FlatpakDecomposed **out_ref, char **out_checksum, char **out_metadata, gboolean *out_created_remote, GCancellable *cancellable, GError **error) { g_autoptr(FlatpakDecomposed) ref = NULL; gboolean created_remote = FALSE; g_autoptr(GBytes) deploy_data = NULL; g_autoptr(GVariant) metadata = NULL; g_autofree char *origin = NULL; g_autofree char *fp_metadata = NULL; g_autofree char *basename = NULL; g_autoptr(GBytes) included_gpg_data = NULL; GBytes *gpg_data = NULL; g_autofree char *to_checksum = NULL; g_autofree char *remote = NULL; g_autofree char *collection_id = NULL; if (!flatpak_dir_ensure_repo (self, cancellable, error)) return NULL; metadata = flatpak_bundle_load (file, &to_checksum, &ref, &origin, NULL, &fp_metadata, NULL, &included_gpg_data, &collection_id, error); if (metadata == NULL) return NULL; /* If we rely on metadata (to e.g. print permissions), check it exists before creating the remote */ if (out_metadata && fp_metadata == NULL) { flatpak_fail_error (error, FLATPAK_ERROR_INVALID_DATA, "No metadata in bundler header"); return NULL; } gpg_data = extra_gpg_data ? extra_gpg_data : included_gpg_data; deploy_data = flatpak_dir_get_deploy_data (self, ref, FLATPAK_DEPLOY_VERSION_ANY, cancellable, NULL); if (deploy_data != NULL) { remote = g_strdup (flatpak_deploy_data_get_origin (deploy_data)); /* We need to import any gpg keys because otherwise the pull will fail */ if (gpg_data != NULL) { g_autoptr(GKeyFile) new_config = NULL; new_config = ostree_repo_copy_config (flatpak_dir_get_repo (self)); if (!flatpak_dir_modify_remote (self, remote, new_config, gpg_data, cancellable, error)) return NULL; } } else { g_autofree char *id = flatpak_decomposed_dup_id (ref); /* Add a remote for later updates */ basename = g_file_get_basename (file); remote = flatpak_dir_create_origin_remote (self, origin, id, basename, flatpak_decomposed_get_ref (ref), gpg_data, collection_id, &created_remote, cancellable, error); if (remote == NULL) return NULL; } if (out_created_remote) *out_created_remote = created_remote; if (out_ref) *out_ref = g_steal_pointer (&ref); if (out_checksum) *out_checksum = g_steal_pointer (&to_checksum); if (out_metadata) *out_metadata = g_steal_pointer (&fp_metadata); return g_steal_pointer (&remote); }
57199236180630525002318569009544856929
flatpak-dir.c
126847826047276531038327785325265722415
CWE-276
CVE-2021-43860
Flatpak is a Linux application sandboxing and distribution framework. Prior to versions 1.12.3 and 1.10.6, Flatpak doesn't properly validate that the permissions displayed to the user for an app at install time match the actual permissions granted to the app at runtime, in the case that there's a null byte in the metadata file of an app. Therefore apps can grant themselves permissions without the consent of the user. Flatpak shows permissions to the user during install by reading them from the "xa.metadata" key in the commit metadata. This cannot contain a null terminator, because it is an untrusted GVariant. Flatpak compares these permissions to the *actual* metadata, from the "metadata" file to ensure it wasn't lied to. However, the actual metadata contents are loaded in several places where they are read as simple C-style strings. That means that, if the metadata file includes a null terminator, only the content of the file from *before* the terminator gets compared to xa.metadata. Thus, any permissions that appear in the metadata file after a null terminator are applied at runtime but not shown to the user. So maliciously crafted apps can give themselves hidden permissions. Users who have Flatpaks installed from untrusted sources are at risk in case the Flatpak has a maliciously crafted metadata file, either initially or in an update. This issue is patched in versions 1.12.3 and 1.10.6. As a workaround, users can manually check the permissions of installed apps by checking the metadata file or the xa.metadata key on the commit metadata.
https://nvd.nist.gov/vuln/detail/CVE-2021-43860
195,389
tensorflow
c2b31ff2d3151acb230edc3f5b1832d2c713a9e0
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/c2b31ff2d3151acb230edc3f5b1832d2c713a9e0
Remove a `DCHECK`-fail, log an error instead. `DCHECK` in debug mode results in crashes. TensorFlow has had multiple vulnerabilities due to this. Outside of debug mode, `DCHECK` is a no-op. A better alternative is to report an error to the log buffer and continue. This should happen both in debug mode and in prod mode. PiperOrigin-RevId: 408375925 Change-Id: Id5b3e19c73f3fbe0cc4bba26ca44ff9607bb6356
1
bool RepeatedAttrDefEqual( const protobuf::RepeatedPtrField<OpDef::AttrDef>& a1, const protobuf::RepeatedPtrField<OpDef::AttrDef>& a2) { std::unordered_map<string, const OpDef::AttrDef*> a1_set; for (const OpDef::AttrDef& def : a1) { DCHECK(a1_set.find(def.name()) == a1_set.end()) << "AttrDef names must be unique, but '" << def.name() << "' appears more than once"; a1_set[def.name()] = &def; } for (const OpDef::AttrDef& def : a2) { auto iter = a1_set.find(def.name()); if (iter == a1_set.end()) return false; if (!AttrDefEqual(*iter->second, def)) return false; a1_set.erase(iter); } if (!a1_set.empty()) return false; return true; }
228350956694349821922378909162368693155
op_def_util.cc
43202597261631718571985626227626810269
CWE-617
CVE-2022-23565
Tensorflow is an Open Source Machine Learning Framework. An attacker can trigger denial of service via assertion failure by altering a `SavedModel` on disk such that `AttrDef`s of some operation are duplicated. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23565
225,086
tensorflow
c2b31ff2d3151acb230edc3f5b1832d2c713a9e0
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/c2b31ff2d3151acb230edc3f5b1832d2c713a9e0
Remove a `DCHECK`-fail, log an error instead. `DCHECK` in debug mode results in crashes. TensorFlow has had multiple vulnerabilities due to this. Outside of debug mode, `DCHECK` is a no-op. A better alternative is to report an error to the log buffer and continue. This should happen both in debug mode and in prod mode. PiperOrigin-RevId: 408375925 Change-Id: Id5b3e19c73f3fbe0cc4bba26ca44ff9607bb6356
0
bool RepeatedAttrDefEqual( const protobuf::RepeatedPtrField<OpDef::AttrDef>& a1, const protobuf::RepeatedPtrField<OpDef::AttrDef>& a2) { std::unordered_map<string, const OpDef::AttrDef*> a1_set; for (const OpDef::AttrDef& def : a1) { if (a1_set.find(def.name()) != a1_set.end()) { LOG(ERROR) << "AttrDef names must be unique, but '" << def.name() << "' appears more than once"; } a1_set[def.name()] = &def; } for (const OpDef::AttrDef& def : a2) { auto iter = a1_set.find(def.name()); if (iter == a1_set.end()) return false; if (!AttrDefEqual(*iter->second, def)) return false; a1_set.erase(iter); } if (!a1_set.empty()) return false; return true; }
7221108948147885063916901261116103162
op_def_util.cc
99670691263177784698689977477403265008
CWE-617
CVE-2022-23565
Tensorflow is an Open Source Machine Learning Framework. An attacker can trigger denial of service via assertion failure by altering a `SavedModel` on disk such that `AttrDef`s of some operation are duplicated. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23565
195,398
v4l2loopback
e4cd225557486c420f6a34411f98c575effd43dd
https://github.com/umlaeute/v4l2loopback
https://github.com/umlaeute/v4l2loopback/commit/e4cd225557486c420f6a34411f98c575effd43dd
add explicit format specifier to printf() invocations CWE-134
1
static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); int labellen = (sizeof(cap->card) < sizeof(dev->card_label)) ? sizeof(cap->card) : sizeof(dev->card_label); int device_nr = ((struct v4l2loopback_private *)video_get_drvdata(dev->vdev)) ->device_nr; __u32 capabilities = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; strlcpy(cap->driver, "v4l2 loopback", sizeof(cap->driver)); snprintf(cap->card, labellen, dev->card_label); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:v4l2loopback-%03d", device_nr); #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) /* since 3.1.0, the v4l2-core system is supposed to set the version */ cap->version = V4L2LOOPBACK_VERSION_CODE; #endif #ifdef V4L2_CAP_VIDEO_M2M capabilities |= V4L2_CAP_VIDEO_M2M; #endif /* V4L2_CAP_VIDEO_M2M */ if (dev->announce_all_caps) { capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT; } else { if (dev->ready_for_capture) { capabilities |= V4L2_CAP_VIDEO_CAPTURE; } if (dev->ready_for_output) { capabilities |= V4L2_CAP_VIDEO_OUTPUT; } } #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) dev->vdev->device_caps = #endif /* >=linux-4.7.0 */ cap->device_caps = cap->capabilities = capabilities; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0) cap->capabilities |= V4L2_CAP_DEVICE_CAPS; #endif memset(cap->reserved, 0, sizeof(cap->reserved)); return 0; }
275249025528691740507199336736969659771
v4l2loopback.c
113113223463037707180278012059265756483
CWE-134
CVE-2022-2652
Depending on the way the format strings in the card label are crafted it's possible to leak kernel stack memory. There is also the possibility for DoS due to the v4l2loopback kernel module crashing when providing the card label on request (reproduce e.g. with many %s modifiers in a row).
https://nvd.nist.gov/vuln/detail/CVE-2022-2652
225,383
v4l2loopback
e4cd225557486c420f6a34411f98c575effd43dd
https://github.com/umlaeute/v4l2loopback
https://github.com/umlaeute/v4l2loopback/commit/e4cd225557486c420f6a34411f98c575effd43dd
add explicit format specifier to printf() invocations CWE-134
0
static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); int labellen = (sizeof(cap->card) < sizeof(dev->card_label)) ? sizeof(cap->card) : sizeof(dev->card_label); int device_nr = ((struct v4l2loopback_private *)video_get_drvdata(dev->vdev)) ->device_nr; __u32 capabilities = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; strlcpy(cap->driver, "v4l2 loopback", sizeof(cap->driver)); snprintf(cap->card, labellen, "%s", dev->card_label); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:v4l2loopback-%03d", device_nr); #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) /* since 3.1.0, the v4l2-core system is supposed to set the version */ cap->version = V4L2LOOPBACK_VERSION_CODE; #endif #ifdef V4L2_CAP_VIDEO_M2M capabilities |= V4L2_CAP_VIDEO_M2M; #endif /* V4L2_CAP_VIDEO_M2M */ if (dev->announce_all_caps) { capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT; } else { if (dev->ready_for_capture) { capabilities |= V4L2_CAP_VIDEO_CAPTURE; } if (dev->ready_for_output) { capabilities |= V4L2_CAP_VIDEO_OUTPUT; } } #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) dev->vdev->device_caps = #endif /* >=linux-4.7.0 */ cap->device_caps = cap->capabilities = capabilities; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0) cap->capabilities |= V4L2_CAP_DEVICE_CAPS; #endif memset(cap->reserved, 0, sizeof(cap->reserved)); return 0; }
178769376518603723181418371440757147493
v4l2loopback.c
138252299614112344688826607831650943411
CWE-134
CVE-2022-2652
Depending on the way the format strings in the card label are crafted it's possible to leak kernel stack memory. There is also the possibility for DoS due to the v4l2loopback kernel module crashing when providing the card label on request (reproduce e.g. with many %s modifiers in a row).
https://nvd.nist.gov/vuln/detail/CVE-2022-2652
195,399
tensorflow
045deec1cbdebb27d817008ad5df94d96a08b1bf
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/045deec1cbdebb27d817008ad5df94d96a08b1bf
Prevent null pointer dereference in `mutable_graph_view` PiperOrigin-RevId: 409684472 Change-Id: I577eb9d9ac470fcec0501423171e739a4ec0cb5c
1
bool IsIdentityConsumingSwitch(const MutableGraphView& graph, const NodeDef& node) { if ((IsIdentity(node) || IsIdentityNSingleInput(node)) && node.input_size() > 0) { TensorId tensor_id = ParseTensorName(node.input(0)); if (IsTensorIdControlling(tensor_id)) { return false; } NodeDef* input_node = graph.GetNode(tensor_id.node()); return IsSwitch(*input_node); } return false; }
313619660222966312087557415210995637728
mutable_graph_view.cc
11824580899895481141820753687530297202
CWE-476
CVE-2022-23589
Tensorflow is an Open Source Machine Learning Framework. Under certain scenarios, Grappler component of TensorFlow can trigger a null pointer dereference. There are 2 places where this can occur, for the same malicious alteration of a `SavedModel` file (fixing the first one would trigger the same dereference in the second place). First, during constant folding, the `GraphDef` might not have the required nodes for the binary operation. If a node is missing, the correposning `mul_*child` would be null, and the dereference in the subsequent line would be incorrect. We have a similar issue during `IsIdentityConsumingSwitch`. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23589
225,473
tensorflow
045deec1cbdebb27d817008ad5df94d96a08b1bf
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/045deec1cbdebb27d817008ad5df94d96a08b1bf
Prevent null pointer dereference in `mutable_graph_view` PiperOrigin-RevId: 409684472 Change-Id: I577eb9d9ac470fcec0501423171e739a4ec0cb5c
0
bool IsIdentityConsumingSwitch(const MutableGraphView& graph, const NodeDef& node) { if ((IsIdentity(node) || IsIdentityNSingleInput(node)) && node.input_size() > 0) { TensorId tensor_id = ParseTensorName(node.input(0)); if (IsTensorIdControlling(tensor_id)) { return false; } NodeDef* input_node = graph.GetNode(tensor_id.node()); if (input_node == nullptr) { return false; } return IsSwitch(*input_node); } return false; }
186036134407007049668950369044144546274
mutable_graph_view.cc
183902084895507336616240181892029435869
CWE-476
CVE-2022-23589
Tensorflow is an Open Source Machine Learning Framework. Under certain scenarios, Grappler component of TensorFlow can trigger a null pointer dereference. There are 2 places where this can occur, for the same malicious alteration of a `SavedModel` file (fixing the first one would trigger the same dereference in the second place). First, during constant folding, the `GraphDef` might not have the required nodes for the binary operation. If a node is missing, the correposning `mul_*child` would be null, and the dereference in the subsequent line would be incorrect. We have a similar issue during `IsIdentityConsumingSwitch`. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23589
195,403
tensorflow
a1e1511dde36b3f8aa27a6ec630838e7ea40e091
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/a1e1511dde36b3f8aa27a6ec630838e7ea40e091
[lite] Update TfLiteIntArrayCreate to return size_t PiperOrigin-RevId: 416439896 Change-Id: I847f69b68d1ddaff4b1e925a09b8b69c1756653b
1
TfLiteIntArray* TfLiteIntArrayCreate(int size) { int alloc_size = TfLiteIntArrayGetSizeInBytes(size); if (alloc_size <= 0) return NULL; TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size); if (!ret) return ret; ret->size = size; return ret; }
64742066879088615123277599572040485093
common.c
227108095659128555473924245568634074234
CWE-190
CVE-2022-23558
Tensorflow is an Open Source Machine Learning Framework. An attacker can craft a TFLite model that would cause an integer overflow in `TfLiteIntArrayCreate`. The `TfLiteIntArrayGetSizeInBytes` returns an `int` instead of a `size_t. An attacker can control model inputs such that `computed_size` overflows the size of `int` datatype. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23558
225,552
tensorflow
a1e1511dde36b3f8aa27a6ec630838e7ea40e091
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/a1e1511dde36b3f8aa27a6ec630838e7ea40e091
[lite] Update TfLiteIntArrayCreate to return size_t PiperOrigin-RevId: 416439896 Change-Id: I847f69b68d1ddaff4b1e925a09b8b69c1756653b
0
TfLiteIntArray* TfLiteIntArrayCreate(int size) { size_t alloc_size = TfLiteIntArrayGetSizeInBytes(size); if (alloc_size <= 0) return NULL; TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size); if (!ret) return ret; ret->size = size; return ret; }
125057143458607684207882855383643521977
common.c
141963346575108043523158725755967478163
CWE-190
CVE-2022-23558
Tensorflow is an Open Source Machine Learning Framework. An attacker can craft a TFLite model that would cause an integer overflow in `TfLiteIntArrayCreate`. The `TfLiteIntArrayGetSizeInBytes` returns an `int` instead of a `size_t. An attacker can control model inputs such that `computed_size` overflows the size of `int` datatype. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-23558
195,405
ImageMagick6
29c8abce0da56b536542f76a9ddfebdaab5b2943
https://github.com/ImageMagick/ImageMagick6
https://github.com/ImageMagick/ImageMagick6/commit/29c8abce0da56b536542f76a9ddfebdaab5b2943
https://github.com/ImageMagick/ImageMagick/pull/4986
1
static Image *ReadPCLImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define CropBox "CropBox" #define DeviceCMYK "DeviceCMYK" #define MediaBox "MediaBox" #define RenderPCLText " Rendering PCL... " char command[MaxTextExtent], *density, filename[MaxTextExtent], geometry[MaxTextExtent], *options, input_filename[MaxTextExtent]; const DelegateInfo *delegate_info; Image *image, *next_image; ImageInfo *read_info; int c; MagickBooleanType cmyk, status; PointInfo delta; RectangleInfo bounding_box, page; char *p; SegmentInfo bounds; size_t height, width; ssize_t count; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Open image file. */ image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } status=AcquireUniqueSymbolicLink(image_info->filename,input_filename); if (status == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile", image_info->filename); image=DestroyImageList(image); return((Image *) NULL); } /* Set the page density. */ delta.x=DefaultResolution; delta.y=DefaultResolution; if ((image->x_resolution == 0.0) || (image->y_resolution == 0.0)) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(PSDensityGeometry,&geometry_info); if ((flags & RhoValue) != 0) image->x_resolution=geometry_info.rho; image->y_resolution=image->x_resolution; if ((flags & SigmaValue) != 0) image->y_resolution=geometry_info.sigma; } /* Determine page geometry from the PCL media box. */ cmyk=image->colorspace == CMYKColorspace ? MagickTrue : MagickFalse; count=0; (void) memset(&bounding_box,0,sizeof(bounding_box)); (void) memset(&bounds,0,sizeof(bounds)); (void) memset(&page,0,sizeof(page)); (void) memset(command,0,sizeof(command)); p=command; for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image)) { if (image_info->page != (char *) NULL) continue; /* Note PCL elements. */ *p++=(char) c; if ((c != (int) '/') && (c != '\n') && ((size_t) (p-command) < (MaxTextExtent-1))) continue; *p='\0'; p=command; /* Is this a CMYK document? */ if (LocaleNCompare(DeviceCMYK,command,strlen(DeviceCMYK)) == 0) cmyk=MagickTrue; if (LocaleNCompare(CropBox,command,strlen(CropBox)) == 0) { /* Note region defined by crop box. */ count=(ssize_t) sscanf(command,"CropBox [%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); if (count != 4) count=(ssize_t) sscanf(command,"CropBox[%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); } if (LocaleNCompare(MediaBox,command,strlen(MediaBox)) == 0) { /* Note region defined by media box. */ count=(ssize_t) sscanf(command,"MediaBox [%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); if (count != 4) count=(ssize_t) sscanf(command,"MediaBox[%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); } if (count != 4) continue; /* Set PCL render geometry. */ width=(size_t) floor(bounds.x2-bounds.x1+0.5); height=(size_t) floor(bounds.y2-bounds.y1+0.5); if (width > page.width) page.width=width; if (height > page.height) page.height=height; } (void) CloseBlob(image); /* Render PCL with the GhostPCL delegate. */ if ((page.width == 0) || (page.height == 0)) (void) ParseAbsoluteGeometry(PSPageGeometry,&page); if (image_info->page != (char *) NULL) (void) ParseAbsoluteGeometry(image_info->page,&page); (void) FormatLocaleString(geometry,MaxTextExtent,"%.20gx%.20g",(double) page.width,(double) page.height); if (image_info->monochrome != MagickFalse) delegate_info=GetDelegateInfo("pcl:mono",(char *) NULL,exception); else if (cmyk != MagickFalse) delegate_info=GetDelegateInfo("pcl:cmyk",(char *) NULL,exception); else delegate_info=GetDelegateInfo("pcl:color",(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { image=DestroyImage(image); return((Image *) NULL); } if ((page.width == 0) || (page.height == 0)) (void) ParseAbsoluteGeometry(PSPageGeometry,&page); if (image_info->page != (char *) NULL) (void) ParseAbsoluteGeometry(image_info->page,&page); density=AcquireString(""); options=AcquireString(""); (void) FormatLocaleString(density,MaxTextExtent,"%gx%g", image->x_resolution,image->y_resolution); if (image_info->ping != MagickFalse) (void) FormatLocaleString(density,MagickPathExtent,"2.0x2.0"); page.width=(size_t) floor((double) page.width*image->x_resolution/delta.x+ 0.5); page.height=(size_t) floor((double) page.height*image->y_resolution/delta.y+ 0.5); (void) FormatLocaleString(options,MaxTextExtent,"-g%.20gx%.20g ",(double) page.width,(double) page.height); image=DestroyImage(image); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; if (read_info->number_scenes != 0) { if (read_info->number_scenes != 1) (void) FormatLocaleString(options,MaxTextExtent,"-dLastPage=%.20g", (double) (read_info->scene+read_info->number_scenes)); else (void) FormatLocaleString(options,MaxTextExtent, "-dFirstPage=%.20g -dLastPage=%.20g",(double) read_info->scene+1, (double) (read_info->scene+read_info->number_scenes)); read_info->number_scenes=0; if (read_info->scenes != (char *) NULL) *read_info->scenes='\0'; } (void) CopyMagickString(filename,read_info->filename,MaxTextExtent); (void) AcquireUniqueFilename(read_info->filename); (void) FormatLocaleString(command,MaxTextExtent, GetDelegateCommands(delegate_info), read_info->antialias != MagickFalse ? 4 : 1, read_info->antialias != MagickFalse ? 4 : 1,density,options, read_info->filename,input_filename); options=DestroyString(options); density=DestroyString(density); status=ExternalDelegateCommand(MagickFalse,read_info->verbose,command, (char *) NULL,exception) != 0 ? MagickTrue : MagickFalse; image=ReadImage(read_info,exception); (void) RelinquishUniqueFileResource(read_info->filename); (void) RelinquishUniqueFileResource(input_filename); read_info=DestroyImageInfo(read_info); if (image == (Image *) NULL) ThrowReaderException(DelegateError,"PCLDelegateFailed"); if (LocaleCompare(image->magick,"BMP") == 0) { Image *cmyk_image; cmyk_image=ConsolidateCMYKImages(image,&image->exception); if (cmyk_image != (Image *) NULL) { image=DestroyImageList(image); image=cmyk_image; } } do { (void) CopyMagickString(image->filename,filename,MaxTextExtent); image->page=page; if (image_info->ping != MagickFalse) { image->magick_columns*=image->x_resolution/2.0; image->magick_rows*=image->y_resolution/2.0; image->columns*=image->x_resolution/2.0; image->rows*=image->y_resolution/2.0; } next_image=SyncNextImageInList(image); if (next_image != (Image *) NULL) image=next_image; } while (next_image != (Image *) NULL); return(GetFirstImageInList(image)); }
285956846610376765198872183403915882061
pcl.c
338547174937023730341471137685130471169
CWE-190
CVE-2022-32546
A vulnerability was found in ImageMagick, causing an outside the range of representable values of type 'unsigned long' at coders/pcl.c, when crafted or untrusted input is processed. This leads to a negative impact to application availability or other problems related to undefined behavior.
https://nvd.nist.gov/vuln/detail/CVE-2022-32546
225,566
ImageMagick6
29c8abce0da56b536542f76a9ddfebdaab5b2943
https://github.com/ImageMagick/ImageMagick6
https://github.com/ImageMagick/ImageMagick6/commit/29c8abce0da56b536542f76a9ddfebdaab5b2943
https://github.com/ImageMagick/ImageMagick/pull/4986
0
static Image *ReadPCLImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define CropBox "CropBox" #define DeviceCMYK "DeviceCMYK" #define MediaBox "MediaBox" #define RenderPCLText " Rendering PCL... " char command[MaxTextExtent], *density, filename[MaxTextExtent], geometry[MaxTextExtent], *options, input_filename[MaxTextExtent]; const DelegateInfo *delegate_info; Image *image, *next_image; ImageInfo *read_info; int c; MagickBooleanType cmyk, status; PointInfo delta; RectangleInfo bounding_box, page; char *p; SegmentInfo bounds; size_t height, width; ssize_t count; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Open image file. */ image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } status=AcquireUniqueSymbolicLink(image_info->filename,input_filename); if (status == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile", image_info->filename); image=DestroyImageList(image); return((Image *) NULL); } /* Set the page density. */ delta.x=DefaultResolution; delta.y=DefaultResolution; if ((image->x_resolution == 0.0) || (image->y_resolution == 0.0)) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(PSDensityGeometry,&geometry_info); if ((flags & RhoValue) != 0) image->x_resolution=geometry_info.rho; image->y_resolution=image->x_resolution; if ((flags & SigmaValue) != 0) image->y_resolution=geometry_info.sigma; } /* Determine page geometry from the PCL media box. */ cmyk=image->colorspace == CMYKColorspace ? MagickTrue : MagickFalse; count=0; (void) memset(&bounding_box,0,sizeof(bounding_box)); (void) memset(&bounds,0,sizeof(bounds)); (void) memset(&page,0,sizeof(page)); (void) memset(command,0,sizeof(command)); p=command; for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image)) { if (image_info->page != (char *) NULL) continue; /* Note PCL elements. */ *p++=(char) c; if ((c != (int) '/') && (c != '\n') && ((size_t) (p-command) < (MaxTextExtent-1))) continue; *p='\0'; p=command; /* Is this a CMYK document? */ if (LocaleNCompare(DeviceCMYK,command,strlen(DeviceCMYK)) == 0) cmyk=MagickTrue; if (LocaleNCompare(CropBox,command,strlen(CropBox)) == 0) { /* Note region defined by crop box. */ count=(ssize_t) sscanf(command,"CropBox [%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); if (count != 4) count=(ssize_t) sscanf(command,"CropBox[%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); } if (LocaleNCompare(MediaBox,command,strlen(MediaBox)) == 0) { /* Note region defined by media box. */ count=(ssize_t) sscanf(command,"MediaBox [%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); if (count != 4) count=(ssize_t) sscanf(command,"MediaBox[%lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); } if (count != 4) continue; /* Set PCL render geometry. */ width=(size_t) CastDoubleToLong(floor(bounds.x2-bounds.x1+0.5)); height=(size_t) CastDoubleToLong(floor(bounds.y2-bounds.y1+0.5)); if (width > page.width) page.width=width; if (height > page.height) page.height=height; } (void) CloseBlob(image); /* Render PCL with the GhostPCL delegate. */ if ((page.width == 0) || (page.height == 0)) (void) ParseAbsoluteGeometry(PSPageGeometry,&page); if (image_info->page != (char *) NULL) (void) ParseAbsoluteGeometry(image_info->page,&page); (void) FormatLocaleString(geometry,MaxTextExtent,"%.20gx%.20g",(double) page.width,(double) page.height); if (image_info->monochrome != MagickFalse) delegate_info=GetDelegateInfo("pcl:mono",(char *) NULL,exception); else if (cmyk != MagickFalse) delegate_info=GetDelegateInfo("pcl:cmyk",(char *) NULL,exception); else delegate_info=GetDelegateInfo("pcl:color",(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { image=DestroyImage(image); return((Image *) NULL); } if ((page.width == 0) || (page.height == 0)) (void) ParseAbsoluteGeometry(PSPageGeometry,&page); if (image_info->page != (char *) NULL) (void) ParseAbsoluteGeometry(image_info->page,&page); density=AcquireString(""); options=AcquireString(""); (void) FormatLocaleString(density,MaxTextExtent,"%gx%g", image->x_resolution,image->y_resolution); if (image_info->ping != MagickFalse) (void) FormatLocaleString(density,MagickPathExtent,"2.0x2.0"); page.width=(size_t) floor((double) page.width*image->x_resolution/delta.x+ 0.5); page.height=(size_t) floor((double) page.height*image->y_resolution/delta.y+ 0.5); (void) FormatLocaleString(options,MaxTextExtent,"-g%.20gx%.20g ",(double) page.width,(double) page.height); image=DestroyImage(image); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; if (read_info->number_scenes != 0) { if (read_info->number_scenes != 1) (void) FormatLocaleString(options,MaxTextExtent,"-dLastPage=%.20g", (double) (read_info->scene+read_info->number_scenes)); else (void) FormatLocaleString(options,MaxTextExtent, "-dFirstPage=%.20g -dLastPage=%.20g",(double) read_info->scene+1, (double) (read_info->scene+read_info->number_scenes)); read_info->number_scenes=0; if (read_info->scenes != (char *) NULL) *read_info->scenes='\0'; } (void) CopyMagickString(filename,read_info->filename,MaxTextExtent); (void) AcquireUniqueFilename(read_info->filename); (void) FormatLocaleString(command,MaxTextExtent, GetDelegateCommands(delegate_info), read_info->antialias != MagickFalse ? 4 : 1, read_info->antialias != MagickFalse ? 4 : 1,density,options, read_info->filename,input_filename); options=DestroyString(options); density=DestroyString(density); status=ExternalDelegateCommand(MagickFalse,read_info->verbose,command, (char *) NULL,exception) != 0 ? MagickTrue : MagickFalse; image=ReadImage(read_info,exception); (void) RelinquishUniqueFileResource(read_info->filename); (void) RelinquishUniqueFileResource(input_filename); read_info=DestroyImageInfo(read_info); if (image == (Image *) NULL) ThrowReaderException(DelegateError,"PCLDelegateFailed"); if (LocaleCompare(image->magick,"BMP") == 0) { Image *cmyk_image; cmyk_image=ConsolidateCMYKImages(image,&image->exception); if (cmyk_image != (Image *) NULL) { image=DestroyImageList(image); image=cmyk_image; } } do { (void) CopyMagickString(image->filename,filename,MaxTextExtent); image->page=page; if (image_info->ping != MagickFalse) { image->magick_columns*=image->x_resolution/2.0; image->magick_rows*=image->y_resolution/2.0; image->columns*=image->x_resolution/2.0; image->rows*=image->y_resolution/2.0; } next_image=SyncNextImageInList(image); if (next_image != (Image *) NULL) image=next_image; } while (next_image != (Image *) NULL); return(GetFirstImageInList(image)); }
83929363874861063792960158581169163951
pcl.c
19401947442598835469700605981025281430
CWE-190
CVE-2022-32546
A vulnerability was found in ImageMagick, causing an outside the range of representable values of type 'unsigned long' at coders/pcl.c, when crafted or untrusted input is processed. This leads to a negative impact to application availability or other problems related to undefined behavior.
https://nvd.nist.gov/vuln/detail/CVE-2022-32546
195,409
gpac
64a2e1b799352ac7d7aad1989bc06e7b0f2b01db
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/64a2e1b799352ac7d7aad1989bc06e7b0f2b01db
fixed #2092
1
void gitn_box_del(GF_Box *s) { u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *)s; if (ptr == NULL) return; for (i=0; i<ptr->nb_entries; i++) { if (ptr->entries[i].name) gf_free(ptr->entries[i].name); } if (ptr->entries) gf_free(ptr->entries); gf_free(ptr);
37642310110270321687625000100653046485
box_code_base.c
212802147696207025803784466432150384318
CWE-476
CVE-2021-4043
NULL Pointer Dereference in GitHub repository gpac/gpac prior to 1.1.0.
https://nvd.nist.gov/vuln/detail/CVE-2021-4043
226,029
gpac
64a2e1b799352ac7d7aad1989bc06e7b0f2b01db
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/64a2e1b799352ac7d7aad1989bc06e7b0f2b01db
fixed #2092
0
void gitn_box_del(GF_Box *s) { u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *)s; if (ptr == NULL) return; if (ptr->entries) { for (i=0; i<ptr->nb_entries; i++) { if (ptr->entries[i].name) gf_free(ptr->entries[i].name); } gf_free(ptr->entries); } gf_free(ptr);
265612189453593240220286582640125308421
box_code_base.c
49851003819063672326837979869211393199
CWE-476
CVE-2021-4043
NULL Pointer Dereference in GitHub repository gpac/gpac prior to 1.1.0.
https://nvd.nist.gov/vuln/detail/CVE-2021-4043
195,410
tensorflow
965b97e4a9650495cda5a8c210ef6684b4b9eceb
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/965b97e4a9650495cda5a8c210ef6684b4b9eceb
Properly validate sparse tensor in `SparseTensorSliceDataset` Existing validation was incomplete. PiperOrigin-RevId: 415375048 Change-Id: I14cd18f29ede73286f3ffac35171bd15828997e9
1
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override { // Create a new SparseTensorSliceDatasetOp::Dataset, insert it in // the step container, and return it as the output. const Tensor* indices; OP_REQUIRES_OK(ctx, ctx->input("indices", &indices)); const Tensor* values; OP_REQUIRES_OK(ctx, ctx->input("values", &values)); const Tensor* dense_shape; OP_REQUIRES_OK(ctx, ctx->input("dense_shape", &dense_shape)); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices->shape()), errors::InvalidArgument( "Input indices should be a matrix but received shape ", indices->shape().DebugString())); const auto num_indices = indices->NumElements(); const auto num_values = values->NumElements(); if (num_indices == 0 || num_values == 0) { OP_REQUIRES(ctx, num_indices == num_values, errors::InvalidArgument( "If indices or values are empty, the other one must also " "be. Got indices of shape ", indices->shape().DebugString(), " and values of shape ", values->shape().DebugString())); } OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()), errors::InvalidArgument( "Input values should be a vector but received shape ", indices->shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(dense_shape->shape()), errors::InvalidArgument( "Input shape should be a vector but received shape ", dense_shape->shape().DebugString())); // We currently ensure that `sparse_tensor` is ordered in the // batch dimension. // TODO(mrry): Investigate ways to avoid this unconditional check // if we can be sure that the sparse tensor was produced in an // appropriate order (e.g. by `tf.parse_example()` or a Dataset // that batches elements into rows of a SparseTensor). int64_t previous_batch_index = -1; for (int64_t i = 0; i < indices->dim_size(0); ++i) { int64_t next_batch_index = indices->matrix<int64_t>()(i, 0); OP_REQUIRES( ctx, next_batch_index >= previous_batch_index, errors::Unimplemented("The SparseTensor must be ordered in the batch " "dimension; handling arbitrarily ordered input " "is not currently supported.")); previous_batch_index = next_batch_index; } gtl::InlinedVector<int64_t, 8> std_order(dense_shape->NumElements(), 0); sparse::SparseTensor tensor; OP_REQUIRES_OK( ctx, sparse::SparseTensor::Create( *indices, *values, TensorShape(dense_shape->vec<int64_t>()), std_order, &tensor)); *output = new Dataset<T>(ctx, std::move(tensor)); }
232798786480644222523895580158118045723
sparse_tensor_slice_dataset_op.cc
20179985196620256343354076777909821072
CWE-476
CVE-2022-21736
Tensorflow is an Open Source Machine Learning Framework. The implementation of `SparseTensorSliceDataset` has an undefined behavior: under certain condition it can be made to dereference a `nullptr` value. The 3 input arguments to `SparseTensorSliceDataset` represent a sparse tensor. However, there are some preconditions that these arguments must satisfy but these are not validated in the implementation. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-21736
226,433
tensorflow
965b97e4a9650495cda5a8c210ef6684b4b9eceb
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/965b97e4a9650495cda5a8c210ef6684b4b9eceb
Properly validate sparse tensor in `SparseTensorSliceDataset` Existing validation was incomplete. PiperOrigin-RevId: 415375048 Change-Id: I14cd18f29ede73286f3ffac35171bd15828997e9
0
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override { // Create a new SparseTensorSliceDatasetOp::Dataset, insert it in // the step container, and return it as the output. const Tensor* indices; OP_REQUIRES_OK(ctx, ctx->input("indices", &indices)); const Tensor* values; OP_REQUIRES_OK(ctx, ctx->input("values", &values)); const Tensor* dense_shape; OP_REQUIRES_OK(ctx, ctx->input("dense_shape", &dense_shape)); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices->shape()), errors::InvalidArgument("Input indices must be a matrix. Got: ", indices->shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()), errors::InvalidArgument("Input values must be a vector. Got: ", values->shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(dense_shape->shape()), errors::InvalidArgument("Input shape must be a vector. Got: ", dense_shape->shape().DebugString())); OP_REQUIRES( ctx, values->shape().dim_size(0) == indices->shape().dim_size(0), errors::InvalidArgument( "Number of values must match first dimension of indices. ", "Got ", values->shape().dim_size(0), " values, indices shape: ", indices->shape().DebugString())); OP_REQUIRES( ctx, dense_shape->shape().dim_size(0) == indices->shape().dim_size(1), errors::InvalidArgument( "Number of dimensions must match second dimension of indices. ", "Got ", dense_shape->shape().dim_size(0), " dimensions, indices shape: ", indices->shape().DebugString())); OP_REQUIRES(ctx, dense_shape->NumElements() > 0, errors::InvalidArgument( "The shape argument requires at least one element.")); // We currently ensure that `sparse_tensor` is ordered in the // batch dimension. // TODO(mrry): Investigate ways to avoid this unconditional check // if we can be sure that the sparse tensor was produced in an // appropriate order (e.g. by `tf.parse_example()` or a Dataset // that batches elements into rows of a SparseTensor). int64_t previous_batch_index = -1; for (int64_t i = 0; i < indices->dim_size(0); ++i) { int64_t next_batch_index = indices->matrix<int64_t>()(i, 0); OP_REQUIRES( ctx, next_batch_index >= previous_batch_index, errors::Unimplemented("The SparseTensor must be ordered in the batch " "dimension; handling arbitrarily ordered input " "is not currently supported.")); previous_batch_index = next_batch_index; } gtl::InlinedVector<int64_t, 8> std_order(dense_shape->NumElements(), 0); sparse::SparseTensor tensor; OP_REQUIRES_OK( ctx, sparse::SparseTensor::Create( *indices, *values, TensorShape(dense_shape->vec<int64_t>()), std_order, &tensor)); *output = new Dataset<T>(ctx, std::move(tensor)); }
227797014623034241967270311347340556143
sparse_tensor_slice_dataset_op.cc
223168035118292217298610217555690212472
CWE-476
CVE-2022-21736
Tensorflow is an Open Source Machine Learning Framework. The implementation of `SparseTensorSliceDataset` has an undefined behavior: under certain condition it can be made to dereference a `nullptr` value. The 3 input arguments to `SparseTensorSliceDataset` represent a sparse tensor. However, there are some preconditions that these arguments must satisfy but these are not validated in the implementation. The fix will be included in TensorFlow 2.8.0. We will also cherrypick this commit on TensorFlow 2.7.1, TensorFlow 2.6.3, and TensorFlow 2.5.3, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2022-21736
195,665
njs
2e00e95473861846aa8538be87db07699d9f676d
https://github.com/nginx/njs
https://github.com/nginx/njs/commit/2e00e95473861846aa8538be87db07699d9f676d
Fixed Array.prototype.slice() with slow "this" argument. Previously, when "this" argument was not a fast array, but the "deleted" array was a fast array, the "deleted" array may be left in uninitialized state if "this" argument had gaps. This fix is to ensure that "deleted" is properly initialized. This fixes #485 issue on Github.
1
njs_array_prototype_splice(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { int64_t i, n, start, length, items, delta, delete; njs_int_t ret; njs_value_t *this, value, del_object; njs_array_t *array, *deleted; this = njs_argument(args, 0); ret = njs_value_to_object(vm, this); if (njs_slow_path(ret != NJS_OK)) { return ret; } ret = njs_object_length(vm, this, &length); if (njs_slow_path(ret == NJS_ERROR)) { return ret; } ret = njs_value_to_integer(vm, njs_arg(args, nargs, 1), &start); if (njs_slow_path(ret != NJS_OK)) { return ret; } start = (start < 0) ? njs_max(length + start, 0) : njs_min(start, length); items = 0; delete = 0; if (nargs == 2) { delete = length - start; } else if (nargs > 2) { items = nargs - 3; ret = njs_value_to_integer(vm, njs_arg(args, nargs, 2), &delete); if (njs_slow_path(ret != NJS_OK)) { return ret; } delete = njs_min(njs_max(delete, 0), length - start); } delta = items - delete; if (njs_slow_path((length + delta) > NJS_MAX_LENGTH)) { njs_type_error(vm, "Invalid length"); return NJS_ERROR; } /* TODO: ArraySpeciesCreate(). */ deleted = njs_array_alloc(vm, 0, delete, 0); if (njs_slow_path(deleted == NULL)) { return NJS_ERROR; } if (njs_fast_path(njs_is_fast_array(this) && deleted->object.fast_array)) { array = njs_array(this); for (i = 0, n = start; i < delete; i++, n++) { deleted->start[i] = array->start[n]; } } else { njs_set_array(&del_object, deleted); for (i = 0, n = start; i < delete; i++, n++) { ret = njs_value_property_i64(vm, this, n, &value); if (njs_slow_path(ret == NJS_ERROR)) { return NJS_ERROR; } if (ret == NJS_OK) { /* TODO: CreateDataPropertyOrThrow(). */ ret = njs_value_property_i64_set(vm, &del_object, i, &value); if (njs_slow_path(ret == NJS_ERROR)) { return ret; } } } ret = njs_object_length_set(vm, &del_object, delete); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } } if (njs_fast_path(njs_is_fast_array(this))) { array = njs_array(this); if (delta != 0) { /* * Relocate the rest of items. * Index of the first item is in "n". */ if (delta > 0) { ret = njs_array_expand(vm, array, 0, delta); if (njs_slow_path(ret != NJS_OK)) { return ret; } } ret = njs_array_copy_within(vm, this, start + items, start + delete, array->length - (start + delete), 0); if (njs_slow_path(ret != NJS_OK)) { return ret; } array->length += delta; } /* Copy new items. */ if (items > 0) { memcpy(&array->start[start], &args[3], items * sizeof(njs_value_t)); } } else { if (delta != 0) { ret = njs_array_copy_within(vm, this, start + items, start + delete, length - (start + delete), delta < 0); if (njs_slow_path(ret != NJS_OK)) { return ret; } for (i = length - 1; i >= length + delta; i--) { ret = njs_value_property_i64_delete(vm, this, i, NULL); if (njs_slow_path(ret == NJS_ERROR)) { return NJS_ERROR; } } } /* Copy new items. */ for (i = 3, n = start; items-- > 0; i++, n++) { ret = njs_value_property_i64_set(vm, this, n, &args[i]); if (njs_slow_path(ret == NJS_ERROR)) { return NJS_ERROR; } } ret = njs_object_length_set(vm, this, length + delta); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } } njs_set_array(&vm->retval, deleted); return NJS_OK; }
41889957200154277256182614621042854713
njs_array.c
27861953644579332654826088207600556930
CWE-703
CVE-2022-29779
Nginx NJS v0.7.2 was discovered to contain a segmentation violation in the function njs_value_own_enumerate at src/njs_value.c.
https://nvd.nist.gov/vuln/detail/CVE-2022-29779
230,302
njs
2e00e95473861846aa8538be87db07699d9f676d
https://github.com/nginx/njs
https://github.com/nginx/njs/commit/2e00e95473861846aa8538be87db07699d9f676d
Fixed Array.prototype.slice() with slow "this" argument. Previously, when "this" argument was not a fast array, but the "deleted" array was a fast array, the "deleted" array may be left in uninitialized state if "this" argument had gaps. This fix is to ensure that "deleted" is properly initialized. This fixes #485 issue on Github.
0
njs_array_prototype_splice(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { int64_t i, n, start, length, items, delta, delete; njs_int_t ret; njs_value_t *this, value, del_object; njs_array_t *array, *deleted; this = njs_argument(args, 0); ret = njs_value_to_object(vm, this); if (njs_slow_path(ret != NJS_OK)) { return ret; } ret = njs_object_length(vm, this, &length); if (njs_slow_path(ret == NJS_ERROR)) { return ret; } ret = njs_value_to_integer(vm, njs_arg(args, nargs, 1), &start); if (njs_slow_path(ret != NJS_OK)) { return ret; } start = (start < 0) ? njs_max(length + start, 0) : njs_min(start, length); items = 0; delete = 0; if (nargs == 2) { delete = length - start; } else if (nargs > 2) { items = nargs - 3; ret = njs_value_to_integer(vm, njs_arg(args, nargs, 2), &delete); if (njs_slow_path(ret != NJS_OK)) { return ret; } delete = njs_min(njs_max(delete, 0), length - start); } delta = items - delete; if (njs_slow_path((length + delta) > NJS_MAX_LENGTH)) { njs_type_error(vm, "Invalid length"); return NJS_ERROR; } /* TODO: ArraySpeciesCreate(). */ deleted = njs_array_alloc(vm, 0, delete, 0); if (njs_slow_path(deleted == NULL)) { return NJS_ERROR; } if (njs_fast_path(njs_is_fast_array(this) && deleted->object.fast_array)) { array = njs_array(this); for (i = 0, n = start; i < delete; i++, n++) { deleted->start[i] = array->start[n]; } } else { njs_set_array(&del_object, deleted); for (i = 0, n = start; i < delete; i++, n++) { ret = njs_value_property_i64(vm, this, n, &value); if (njs_slow_path(ret == NJS_ERROR)) { return NJS_ERROR; } if (ret == NJS_OK) { /* TODO: CreateDataPropertyOrThrow(). */ ret = njs_value_property_i64_set(vm, &del_object, i, &value); if (njs_slow_path(ret == NJS_ERROR)) { return ret; } } else { if (deleted->object.fast_array) { njs_set_invalid(&deleted->start[i]); } } } ret = njs_object_length_set(vm, &del_object, delete); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } } if (njs_fast_path(njs_is_fast_array(this))) { array = njs_array(this); if (delta != 0) { /* * Relocate the rest of items. * Index of the first item is in "n". */ if (delta > 0) { ret = njs_array_expand(vm, array, 0, delta); if (njs_slow_path(ret != NJS_OK)) { return ret; } } ret = njs_array_copy_within(vm, this, start + items, start + delete, array->length - (start + delete), 0); if (njs_slow_path(ret != NJS_OK)) { return ret; } array->length += delta; } /* Copy new items. */ if (items > 0) { memcpy(&array->start[start], &args[3], items * sizeof(njs_value_t)); } } else { if (delta != 0) { ret = njs_array_copy_within(vm, this, start + items, start + delete, length - (start + delete), delta < 0); if (njs_slow_path(ret != NJS_OK)) { return ret; } for (i = length - 1; i >= length + delta; i--) { ret = njs_value_property_i64_delete(vm, this, i, NULL); if (njs_slow_path(ret == NJS_ERROR)) { return NJS_ERROR; } } } /* Copy new items. */ for (i = 3, n = start; items-- > 0; i++, n++) { ret = njs_value_property_i64_set(vm, this, n, &args[i]); if (njs_slow_path(ret == NJS_ERROR)) { return NJS_ERROR; } } ret = njs_object_length_set(vm, this, length + delta); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } } njs_set_array(&vm->retval, deleted); return NJS_OK; }
91452857281463208584852927019432341939
njs_array.c
127478727069540736637535553874506726763
CWE-703
CVE-2022-29779
Nginx NJS v0.7.2 was discovered to contain a segmentation violation in the function njs_value_own_enumerate at src/njs_value.c.
https://nvd.nist.gov/vuln/detail/CVE-2022-29779
195,670
pjproject
856f87c2e97a27b256482dbe0d748b1194355a21
https://github.com/pjsip/pjproject
https://github.com/pjsip/pjproject/commit/856f87c2e97a27b256482dbe0d748b1194355a21
Merge pull request from GHSA-5x45-qp78-g4p4 * Prevent infinite loop in scanning xml content * Simplify scanning method * Optimization
1
static pj_xml_node *xml_parse_node( pj_pool_t *pool, pj_scanner *scanner) { pj_xml_node *node; pj_str_t end_name; PJ_CHECK_STACK(); if (*scanner->curptr != '<') on_syntax_error(scanner); /* Handle Processing Instructino (PI) construct (i.e. "<?") */ if (*scanner->curptr == '<' && *(scanner->curptr+1) == '?') { pj_scan_advance_n(scanner, 2, PJ_FALSE); for (;;) { pj_str_t dummy; pj_scan_get_until_ch(scanner, '?', &dummy); if (*scanner->curptr=='?' && *(scanner->curptr+1)=='>') { pj_scan_advance_n(scanner, 2, PJ_TRUE); break; } else { pj_scan_advance_n(scanner, 1, PJ_FALSE); } } return xml_parse_node(pool, scanner); } /* Handle comments construct (i.e. "<!") */ if (pj_scan_strcmp(scanner, "<!", 2) == 0) { pj_scan_advance_n(scanner, 2, PJ_FALSE); for (;;) { pj_str_t dummy; pj_scan_get_until_ch(scanner, '>', &dummy); if (pj_scan_strcmp(scanner, ">", 1) == 0) { pj_scan_advance_n(scanner, 1, PJ_TRUE); break; } else { pj_scan_advance_n(scanner, 1, PJ_FALSE); } } return xml_parse_node(pool, scanner); } /* Alloc node. */ node = alloc_node(pool); /* Get '<' */ pj_scan_get_char(scanner); /* Get node name. */ pj_scan_get_until_chr( scanner, " />\t\r\n", &node->name); /* Get attributes. */ while (*scanner->curptr != '>' && *scanner->curptr != '/') { pj_xml_attr *attr = alloc_attr(pool); pj_scan_get_until_chr( scanner, "=> \t\r\n", &attr->name); if (*scanner->curptr == '=') { pj_scan_get_char( scanner ); pj_scan_get_quotes(scanner, "\"'", "\"'", 2, &attr->value); /* remove quote characters */ ++attr->value.ptr; attr->value.slen -= 2; } pj_list_push_back( &node->attr_head, attr ); } if (*scanner->curptr == '/') { pj_scan_get_char(scanner); if (pj_scan_get_char(scanner) != '>') on_syntax_error(scanner); return node; } /* Enclosing bracket. */ if (pj_scan_get_char(scanner) != '>') on_syntax_error(scanner); /* Sub nodes. */ while (*scanner->curptr == '<' && *(scanner->curptr+1) != '/' && *(scanner->curptr+1) != '!') { pj_xml_node *sub_node = xml_parse_node(pool, scanner); pj_list_push_back( &node->node_head, sub_node ); } /* Content. */ if (!pj_scan_is_eof(scanner) && *scanner->curptr != '<') { pj_scan_get_until_ch(scanner, '<', &node->content); } /* CDATA content. */ if (*scanner->curptr == '<' && *(scanner->curptr+1) == '!' && pj_scan_strcmp(scanner, "<![CDATA[", 9) == 0) { pj_scan_advance_n(scanner, 9, PJ_FALSE); pj_scan_get_until_ch(scanner, ']', &node->content); while (pj_scan_strcmp(scanner, "]]>", 3)) { pj_str_t dummy; pj_scan_get_until_ch(scanner, ']', &dummy); } node->content.slen = scanner->curptr - node->content.ptr; pj_scan_advance_n(scanner, 3, PJ_TRUE); } /* Enclosing node. */ if (pj_scan_get_char(scanner) != '<' || pj_scan_get_char(scanner) != '/') on_syntax_error(scanner); pj_scan_get_until_chr(scanner, " \t>", &end_name); /* Compare name. */ if (pj_stricmp(&node->name, &end_name) != 0) on_syntax_error(scanner); /* Enclosing '>' */ if (pj_scan_get_char(scanner) != '>') on_syntax_error(scanner); return node; }
277240174945375908655928995975016589842
xml.c
319030018310003515806173873266013713455
CWE-703
CVE-2022-24763
PJSIP is a free and open source multimedia communication library written in the C language. Versions 2.12 and prior contain a denial-of-service vulnerability that affects PJSIP users that consume PJSIP's XML parsing in their apps. Users are advised to update. There are no known workarounds.
https://nvd.nist.gov/vuln/detail/CVE-2022-24763
230,394
pjproject
856f87c2e97a27b256482dbe0d748b1194355a21
https://github.com/pjsip/pjproject
https://github.com/pjsip/pjproject/commit/856f87c2e97a27b256482dbe0d748b1194355a21
Merge pull request from GHSA-5x45-qp78-g4p4 * Prevent infinite loop in scanning xml content * Simplify scanning method * Optimization
0
static pj_xml_node *xml_parse_node( pj_pool_t *pool, pj_scanner *scanner) { pj_xml_node *node; pj_str_t end_name; PJ_CHECK_STACK(); if (*scanner->curptr != '<') on_syntax_error(scanner); /* Handle Processing Instructino (PI) construct (i.e. "<?") */ if (*scanner->curptr == '<' && *(scanner->curptr+1) == '?') { pj_scan_advance_n(scanner, 2, PJ_FALSE); for (;;) { pj_str_t dummy; pj_scan_get_until_ch(scanner, '?', &dummy); if (*scanner->curptr=='?' && *(scanner->curptr+1)=='>') { pj_scan_advance_n(scanner, 2, PJ_TRUE); break; } else { pj_scan_advance_n(scanner, 1, PJ_FALSE); } } return xml_parse_node(pool, scanner); } /* Handle comments construct (i.e. "<!") */ if (pj_scan_strcmp(scanner, "<!", 2) == 0) { pj_scan_advance_n(scanner, 2, PJ_FALSE); for (;;) { pj_str_t dummy; pj_scan_get_until_ch(scanner, '>', &dummy); if (pj_scan_strcmp(scanner, ">", 1) == 0) { pj_scan_advance_n(scanner, 1, PJ_TRUE); break; } else { pj_scan_advance_n(scanner, 1, PJ_FALSE); } } return xml_parse_node(pool, scanner); } /* Alloc node. */ node = alloc_node(pool); /* Get '<' */ pj_scan_get_char(scanner); /* Get node name. */ pj_scan_get_until_chr( scanner, " />\t\r\n", &node->name); /* Get attributes. */ while (*scanner->curptr != '>' && *scanner->curptr != '/') { pj_xml_attr *attr = alloc_attr(pool); pj_scan_get_until_chr( scanner, "=> \t\r\n", &attr->name); if (*scanner->curptr == '=') { pj_scan_get_char( scanner ); pj_scan_get_quotes(scanner, "\"'", "\"'", 2, &attr->value); /* remove quote characters */ ++attr->value.ptr; attr->value.slen -= 2; } pj_list_push_back( &node->attr_head, attr ); } if (*scanner->curptr == '/') { pj_scan_get_char(scanner); if (pj_scan_get_char(scanner) != '>') on_syntax_error(scanner); return node; } /* Enclosing bracket. */ if (pj_scan_get_char(scanner) != '>') on_syntax_error(scanner); /* Sub nodes. */ while (*scanner->curptr == '<' && *(scanner->curptr+1) != '/' && *(scanner->curptr+1) != '!') { pj_xml_node *sub_node = xml_parse_node(pool, scanner); pj_list_push_back( &node->node_head, sub_node ); } /* Content. */ if (!pj_scan_is_eof(scanner) && *scanner->curptr != '<') { pj_scan_get_until_ch(scanner, '<', &node->content); } /* CDATA content. */ if (*scanner->curptr == '<' && *(scanner->curptr+1) == '!' && pj_scan_strcmp(scanner, "<![CDATA[", 9) == 0) { pj_scan_advance_n(scanner, 9, PJ_FALSE); pj_scan_get_until_ch(scanner, ']', &node->content); while (pj_scan_strcmp(scanner, "]]>", 3)) { pj_str_t dummy; pj_scan_advance_n(scanner, 1, PJ_FALSE); pj_scan_get_until_ch(scanner, ']', &dummy); } node->content.slen = scanner->curptr - node->content.ptr; pj_scan_advance_n(scanner, 3, PJ_TRUE); } /* Enclosing node. */ if (pj_scan_get_char(scanner) != '<' || pj_scan_get_char(scanner) != '/') on_syntax_error(scanner); pj_scan_get_until_chr(scanner, " \t>", &end_name); /* Compare name. */ if (pj_stricmp(&node->name, &end_name) != 0) on_syntax_error(scanner); /* Enclosing '>' */ if (pj_scan_get_char(scanner) != '>') on_syntax_error(scanner); return node; }
333353357538572797993178277028111202319
xml.c
137943726532385231053862962299057228575
CWE-703
CVE-2022-24763
PJSIP is a free and open source multimedia communication library written in the C language. Versions 2.12 and prior contain a denial-of-service vulnerability that affects PJSIP users that consume PJSIP's XML parsing in their apps. Users are advised to update. There are no known workarounds.
https://nvd.nist.gov/vuln/detail/CVE-2022-24763
195,691
mruby
a4d97934d51cb88954cc49161dc1d151f64afb6b
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/a4d97934d51cb88954cc49161dc1d151f64afb6b
vm.c: check if target_class is NULL (when prepended).
1
mrb_vm_exec(mrb_state *mrb, const struct RProc *proc, const mrb_code *pc) { /* mrb_assert(MRB_PROC_CFUNC_P(proc)) */ const mrb_irep *irep = proc->body.irep; const mrb_pool_value *pool = irep->pool; const mrb_sym *syms = irep->syms; mrb_code insn; int ai = mrb_gc_arena_save(mrb); struct mrb_jmpbuf *prev_jmp = mrb->jmp; struct mrb_jmpbuf c_jmp; uint32_t a; uint16_t b; uint16_t c; mrb_sym mid; const struct mrb_irep_catch_handler *ch; #ifdef DIRECT_THREADED static const void * const optable[] = { #define OPCODE(x,_) &&L_OP_ ## x, #include "mruby/ops.h" #undef OPCODE }; #endif mrb_bool exc_catched = FALSE; RETRY_TRY_BLOCK: MRB_TRY(&c_jmp) { if (exc_catched) { exc_catched = FALSE; mrb_gc_arena_restore(mrb, ai); if (mrb->exc && mrb->exc->tt == MRB_TT_BREAK) goto L_BREAK; goto L_RAISE; } mrb->jmp = &c_jmp; mrb_vm_ci_proc_set(mrb->c->ci, proc); #define regs (mrb->c->ci->stack) INIT_DISPATCH { CASE(OP_NOP, Z) { /* do nothing */ NEXT; } CASE(OP_MOVE, BB) { regs[a] = regs[b]; NEXT; } CASE(OP_LOADL, BB) { switch (pool[b].tt) { /* number */ case IREP_TT_INT32: regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i32); break; case IREP_TT_INT64: #if defined(MRB_INT64) regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64); break; #else #if defined(MRB_64BIT) if (INT32_MIN <= pool[b].u.i64 && pool[b].u.i64 <= INT32_MAX) { regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64); break; } #endif goto L_INT_OVERFLOW; #endif case IREP_TT_BIGINT: #ifdef MRB_USE_BIGINT { const char *s = pool[b].u.str; regs[a] = mrb_bint_new_str(mrb, s+2, (mrb_int)s[0], (mrb_int)s[1]); } break; #else goto L_INT_OVERFLOW; #endif #ifndef MRB_NO_FLOAT case IREP_TT_FLOAT: regs[a] = mrb_float_value(mrb, pool[b].u.f); break; #endif default: /* should not happen (tt:string) */ regs[a] = mrb_nil_value(); break; } NEXT; } CASE(OP_LOADI, BB) { SET_FIXNUM_VALUE(regs[a], b); NEXT; } CASE(OP_LOADINEG, BB) { SET_FIXNUM_VALUE(regs[a], -b); NEXT; } CASE(OP_LOADI__1,B) goto L_LOADI; CASE(OP_LOADI_0,B) goto L_LOADI; CASE(OP_LOADI_1,B) goto L_LOADI; CASE(OP_LOADI_2,B) goto L_LOADI; CASE(OP_LOADI_3,B) goto L_LOADI; CASE(OP_LOADI_4,B) goto L_LOADI; CASE(OP_LOADI_5,B) goto L_LOADI; CASE(OP_LOADI_6,B) goto L_LOADI; CASE(OP_LOADI_7, B) { L_LOADI: SET_FIXNUM_VALUE(regs[a], (mrb_int)insn - (mrb_int)OP_LOADI_0); NEXT; } CASE(OP_LOADI16, BS) { SET_FIXNUM_VALUE(regs[a], (mrb_int)(int16_t)b); NEXT; } CASE(OP_LOADI32, BSS) { SET_INT_VALUE(mrb, regs[a], (int32_t)(((uint32_t)b<<16)+c)); NEXT; } CASE(OP_LOADSYM, BB) { SET_SYM_VALUE(regs[a], syms[b]); NEXT; } CASE(OP_LOADNIL, B) { SET_NIL_VALUE(regs[a]); NEXT; } CASE(OP_LOADSELF, B) { regs[a] = regs[0]; NEXT; } CASE(OP_LOADT, B) { SET_TRUE_VALUE(regs[a]); NEXT; } CASE(OP_LOADF, B) { SET_FALSE_VALUE(regs[a]); NEXT; } CASE(OP_GETGV, BB) { mrb_value val = mrb_gv_get(mrb, syms[b]); regs[a] = val; NEXT; } CASE(OP_SETGV, BB) { mrb_gv_set(mrb, syms[b], regs[a]); NEXT; } CASE(OP_GETSV, BB) { mrb_value val = mrb_vm_special_get(mrb, syms[b]); regs[a] = val; NEXT; } CASE(OP_SETSV, BB) { mrb_vm_special_set(mrb, syms[b], regs[a]); NEXT; } CASE(OP_GETIV, BB) { regs[a] = mrb_iv_get(mrb, regs[0], syms[b]); NEXT; } CASE(OP_SETIV, BB) { mrb_iv_set(mrb, regs[0], syms[b], regs[a]); NEXT; } CASE(OP_GETCV, BB) { mrb_value val; val = mrb_vm_cv_get(mrb, syms[b]); regs[a] = val; NEXT; } CASE(OP_SETCV, BB) { mrb_vm_cv_set(mrb, syms[b], regs[a]); NEXT; } CASE(OP_GETIDX, B) { mrb_value va = regs[a], vb = regs[a+1]; switch (mrb_type(va)) { case MRB_TT_ARRAY: if (!mrb_integer_p(vb)) goto getidx_fallback; regs[a] = mrb_ary_entry(va, mrb_integer(vb)); break; case MRB_TT_HASH: va = mrb_hash_get(mrb, va, vb); regs[a] = va; break; case MRB_TT_STRING: switch (mrb_type(vb)) { case MRB_TT_INTEGER: case MRB_TT_STRING: case MRB_TT_RANGE: va = mrb_str_aref(mrb, va, vb, mrb_undef_value()); regs[a] = va; break; default: goto getidx_fallback; } break; default: getidx_fallback: mid = MRB_OPSYM(aref); goto L_SEND_SYM; } NEXT; } CASE(OP_SETIDX, B) { c = 2; mid = MRB_OPSYM(aset); SET_NIL_VALUE(regs[a+3]); goto L_SENDB_SYM; } CASE(OP_GETCONST, BB) { mrb_value v = mrb_vm_const_get(mrb, syms[b]); regs[a] = v; NEXT; } CASE(OP_SETCONST, BB) { mrb_vm_const_set(mrb, syms[b], regs[a]); NEXT; } CASE(OP_GETMCNST, BB) { mrb_value v = mrb_const_get(mrb, regs[a], syms[b]); regs[a] = v; NEXT; } CASE(OP_SETMCNST, BB) { mrb_const_set(mrb, regs[a+1], syms[b], regs[a]); NEXT; } CASE(OP_GETUPVAR, BBB) { mrb_value *regs_a = regs + a; struct REnv *e = uvenv(mrb, c); if (e && b < MRB_ENV_LEN(e)) { *regs_a = e->stack[b]; } else { *regs_a = mrb_nil_value(); } NEXT; } CASE(OP_SETUPVAR, BBB) { struct REnv *e = uvenv(mrb, c); if (e) { mrb_value *regs_a = regs + a; if (b < MRB_ENV_LEN(e)) { e->stack[b] = *regs_a; mrb_write_barrier(mrb, (struct RBasic*)e); } } NEXT; } CASE(OP_JMP, S) { pc += (int16_t)a; JUMP; } CASE(OP_JMPIF, BS) { if (mrb_test(regs[a])) { pc += (int16_t)b; JUMP; } NEXT; } CASE(OP_JMPNOT, BS) { if (!mrb_test(regs[a])) { pc += (int16_t)b; JUMP; } NEXT; } CASE(OP_JMPNIL, BS) { if (mrb_nil_p(regs[a])) { pc += (int16_t)b; JUMP; } NEXT; } CASE(OP_JMPUW, S) { a = (uint32_t)((pc - irep->iseq) + (int16_t)a); CHECKPOINT_RESTORE(RBREAK_TAG_JUMP) { struct RBreak *brk = (struct RBreak*)mrb->exc; mrb_value target = mrb_break_value_get(brk); mrb_assert(mrb_integer_p(target)); a = (uint32_t)mrb_integer(target); mrb_assert(a >= 0 && a < irep->ilen); } CHECKPOINT_MAIN(RBREAK_TAG_JUMP) { ch = catch_handler_find(mrb, mrb->c->ci, pc, MRB_CATCH_FILTER_ENSURE); if (ch) { /* avoiding a jump from a catch handler into the same handler */ if (a < mrb_irep_catch_handler_unpack(ch->begin) || a >= mrb_irep_catch_handler_unpack(ch->end)) { THROW_TAGGED_BREAK(mrb, RBREAK_TAG_JUMP, proc, mrb_fixnum_value(a)); } } } CHECKPOINT_END(RBREAK_TAG_JUMP); mrb->exc = NULL; /* clear break object */ pc = irep->iseq + a; JUMP; } CASE(OP_EXCEPT, B) { mrb_value exc; if (mrb->exc == NULL) { exc = mrb_nil_value(); } else { switch (mrb->exc->tt) { case MRB_TT_BREAK: case MRB_TT_EXCEPTION: exc = mrb_obj_value(mrb->exc); break; default: mrb_assert(!"bad mrb_type"); exc = mrb_nil_value(); break; } mrb->exc = NULL; } regs[a] = exc; NEXT; } CASE(OP_RESCUE, BB) { mrb_value exc = regs[a]; /* exc on stack */ mrb_value e = regs[b]; struct RClass *ec; switch (mrb_type(e)) { case MRB_TT_CLASS: case MRB_TT_MODULE: break; default: { mrb_value exc; exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR, "class or module required for rescue clause"); mrb_exc_set(mrb, exc); goto L_RAISE; } } ec = mrb_class_ptr(e); regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec)); NEXT; } CASE(OP_RAISEIF, B) { mrb_value exc = regs[a]; if (mrb_break_p(exc)) { mrb->exc = mrb_obj_ptr(exc); goto L_BREAK; } mrb_exc_set(mrb, exc); if (mrb->exc) { goto L_RAISE; } NEXT; } CASE(OP_SSEND, BBB) { regs[a] = regs[0]; insn = OP_SEND; } goto L_SENDB; CASE(OP_SSENDB, BBB) { regs[a] = regs[0]; } goto L_SENDB; CASE(OP_SEND, BBB) goto L_SENDB; L_SEND_SYM: c = 1; /* push nil after arguments */ SET_NIL_VALUE(regs[a+2]); goto L_SENDB_SYM; CASE(OP_SENDB, BBB) L_SENDB: mid = syms[b]; L_SENDB_SYM: { mrb_callinfo *ci = mrb->c->ci; mrb_method_t m; struct RClass *cls; mrb_value recv, blk; ARGUMENT_NORMALIZE(a, &c, insn); recv = regs[a]; cls = mrb_class(mrb, recv); m = mrb_method_search_vm(mrb, &cls, mid); if (MRB_METHOD_UNDEF_P(m)) { m = prepare_missing(mrb, recv, mid, &cls, a, &c, blk, 0); mid = MRB_SYM(method_missing); } /* push callinfo */ ci = cipush(mrb, a, 0, cls, NULL, mid, c); if (MRB_METHOD_CFUNC_P(m)) { if (MRB_METHOD_PROC_P(m)) { struct RProc *p = MRB_METHOD_PROC(m); mrb_vm_ci_proc_set(ci, p); recv = p->body.func(mrb, recv); } else { if (MRB_METHOD_NOARG_P(m)) { check_method_noarg(mrb, ci); } recv = MRB_METHOD_FUNC(m)(mrb, recv); } mrb_gc_arena_shrink(mrb, ai); if (mrb->exc) goto L_RAISE; ci = mrb->c->ci; if (mrb_proc_p(blk)) { struct RProc *p = mrb_proc_ptr(blk); if (p && !MRB_PROC_STRICT_P(p) && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) { p->flags |= MRB_PROC_ORPHAN; } } if (!ci->u.target_class) { /* return from context modifying method (resume/yield) */ if (ci->cci == CINFO_RESUMED) { mrb->jmp = prev_jmp; return recv; } else { mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc)); proc = ci[-1].proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; } } ci->stack[0] = recv; /* pop stackpos */ ci = cipop(mrb); pc = ci->pc; } else { /* setup environment for calling method */ mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m))); irep = proc->body.irep; pool = irep->pool; syms = irep->syms; mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs); pc = irep->iseq; } } JUMP; CASE(OP_CALL, Z) { mrb_callinfo *ci = mrb->c->ci; mrb_value recv = ci->stack[0]; struct RProc *m = mrb_proc_ptr(recv); /* replace callinfo */ ci->u.target_class = MRB_PROC_TARGET_CLASS(m); mrb_vm_ci_proc_set(ci, m); if (MRB_PROC_ENV_P(m)) { ci->mid = MRB_PROC_ENV(m)->mid; } /* prepare stack */ if (MRB_PROC_CFUNC_P(m)) { recv = MRB_PROC_CFUNC(m)(mrb, recv); mrb_gc_arena_shrink(mrb, ai); if (mrb->exc) goto L_RAISE; /* pop stackpos */ ci = cipop(mrb); pc = ci->pc; ci[1].stack[0] = recv; irep = mrb->c->ci->proc->body.irep; } else { /* setup environment for calling method */ proc = m; irep = m->body.irep; if (!irep) { mrb->c->ci->stack[0] = mrb_nil_value(); a = 0; c = OP_R_NORMAL; goto L_OP_RETURN_BODY; } mrb_int nargs = mrb_ci_bidx(ci)+1; if (nargs < irep->nregs) { mrb_stack_extend(mrb, irep->nregs); stack_clear(regs+nargs, irep->nregs-nargs); } if (MRB_PROC_ENV_P(m)) { regs[0] = MRB_PROC_ENV(m)->stack[0]; } pc = irep->iseq; } pool = irep->pool; syms = irep->syms; JUMP; } CASE(OP_SUPER, BB) { mrb_method_t m; struct RClass *cls; mrb_callinfo *ci = mrb->c->ci; mrb_value recv, blk; const struct RProc *p = ci->proc; mrb_sym mid = ci->mid; struct RClass* target_class = MRB_PROC_TARGET_CLASS(p); if (MRB_PROC_ENV_P(p) && p->e.env->mid && p->e.env->mid != mid) { /* alias support */ mid = p->e.env->mid; /* restore old mid */ } if (mid == 0 || !target_class) { mrb_value exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method"); mrb_exc_set(mrb, exc); goto L_RAISE; } if (target_class->flags & MRB_FL_CLASS_IS_PREPENDED) { target_class = mrb_vm_ci_target_class(ci); } else if (target_class->tt == MRB_TT_MODULE) { target_class = mrb_vm_ci_target_class(ci); if (!target_class || target_class->tt != MRB_TT_ICLASS) { goto super_typeerror; } } recv = regs[0]; if (!mrb_obj_is_kind_of(mrb, recv, target_class)) { super_typeerror: ; mrb_value exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR, "self has wrong type to call super in this context"); mrb_exc_set(mrb, exc); goto L_RAISE; } ARGUMENT_NORMALIZE(a, &b, OP_SUPER); cls = target_class->super; m = mrb_method_search_vm(mrb, &cls, mid); if (MRB_METHOD_UNDEF_P(m)) { m = prepare_missing(mrb, recv, mid, &cls, a, &b, blk, 1); mid = MRB_SYM(method_missing); } /* push callinfo */ ci = cipush(mrb, a, 0, cls, NULL, mid, b); /* prepare stack */ ci->stack[0] = recv; if (MRB_METHOD_CFUNC_P(m)) { mrb_value v; if (MRB_METHOD_PROC_P(m)) { mrb_vm_ci_proc_set(ci, MRB_METHOD_PROC(m)); } v = MRB_METHOD_CFUNC(m)(mrb, recv); mrb_gc_arena_restore(mrb, ai); if (mrb->exc) goto L_RAISE; ci = mrb->c->ci; mrb_assert(!mrb_break_p(v)); if (!mrb_vm_ci_target_class(ci)) { /* return from context modifying method (resume/yield) */ if (ci->cci == CINFO_RESUMED) { mrb->jmp = prev_jmp; return v; } else { mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc)); proc = ci[-1].proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; } } mrb->c->ci->stack[0] = v; ci = cipop(mrb); pc = ci->pc; } else { /* setup environment for calling method */ mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m))); irep = proc->body.irep; pool = irep->pool; syms = irep->syms; mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs); pc = irep->iseq; } JUMP; } CASE(OP_ARGARY, BS) { mrb_int m1 = (b>>11)&0x3f; mrb_int r = (b>>10)&0x1; mrb_int m2 = (b>>5)&0x1f; mrb_int kd = (b>>4)&0x1; mrb_int lv = (b>>0)&0xf; mrb_value *stack; if (mrb->c->ci->mid == 0 || mrb_vm_ci_target_class(mrb->c->ci) == NULL) { mrb_value exc; L_NOSUPER: exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method"); mrb_exc_set(mrb, exc); goto L_RAISE; } if (lv == 0) stack = regs + 1; else { struct REnv *e = uvenv(mrb, lv-1); if (!e) goto L_NOSUPER; if (MRB_ENV_LEN(e) <= m1+r+m2+1) goto L_NOSUPER; stack = e->stack + 1; } if (r == 0) { regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack); } else { mrb_value *pp = NULL; struct RArray *rest; mrb_int len = 0; if (mrb_array_p(stack[m1])) { struct RArray *ary = mrb_ary_ptr(stack[m1]); pp = ARY_PTR(ary); len = ARY_LEN(ary); } regs[a] = mrb_ary_new_capa(mrb, m1+len+m2); rest = mrb_ary_ptr(regs[a]); if (m1 > 0) { stack_copy(ARY_PTR(rest), stack, m1); } if (len > 0) { stack_copy(ARY_PTR(rest)+m1, pp, len); } if (m2 > 0) { stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2); } ARY_SET_LEN(rest, m1+len+m2); } if (kd) { regs[a+1] = stack[m1+r+m2]; regs[a+2] = stack[m1+r+m2+1]; } else { regs[a+1] = stack[m1+r+m2]; } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ENTER, W) { mrb_int m1 = MRB_ASPEC_REQ(a); mrb_int o = MRB_ASPEC_OPT(a); mrb_int r = MRB_ASPEC_REST(a); mrb_int m2 = MRB_ASPEC_POST(a); mrb_int kd = (MRB_ASPEC_KEY(a) > 0 || MRB_ASPEC_KDICT(a))? 1 : 0; /* unused int b = MRB_ASPEC_BLOCK(a); */ mrb_int const len = m1 + o + r + m2; mrb_callinfo *ci = mrb->c->ci; mrb_int argc = ci->n; mrb_value *argv = regs+1; mrb_value * const argv0 = argv; mrb_int const kw_pos = len + kd; /* where kwhash should be */ mrb_int const blk_pos = kw_pos + 1; /* where block should be */ mrb_value blk = regs[mrb_ci_bidx(ci)]; mrb_value kdict = mrb_nil_value(); /* keyword arguments */ if (ci->nk > 0) { mrb_int kidx = mrb_ci_kidx(ci); kdict = regs[kidx]; if (!mrb_hash_p(kdict) || mrb_hash_size(mrb, kdict) == 0) { kdict = mrb_nil_value(); ci->nk = 0; } } if (!kd && !mrb_nil_p(kdict)) { if (argc < 14) { ci->n++; argc++; /* include kdict in normal arguments */ } else if (argc == 14) { /* pack arguments and kdict */ regs[1] = mrb_ary_new_from_values(mrb, argc+1, &regs[1]); argc = ci->n = 15; } else {/* argc == 15 */ /* push kdict to packed arguments */ mrb_ary_push(mrb, regs[1], regs[2]); } ci->nk = 0; } if (kd && MRB_ASPEC_KEY(a) > 0 && mrb_hash_p(kdict)) { kdict = mrb_hash_dup(mrb, kdict); } /* arguments is passed with Array */ if (argc == 15) { struct RArray *ary = mrb_ary_ptr(regs[1]); argv = ARY_PTR(ary); argc = (int)ARY_LEN(ary); mrb_gc_protect(mrb, regs[1]); } /* strict argument check */ if (ci->proc && MRB_PROC_STRICT_P(ci->proc)) { if (argc < m1 + m2 || (r == 0 && argc > len)) { argnum_error(mrb, m1+m2); goto L_RAISE; } } /* extract first argument array to arguments */ else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) { mrb_gc_protect(mrb, argv[0]); argc = (int)RARRAY_LEN(argv[0]); argv = RARRAY_PTR(argv[0]); } /* rest arguments */ mrb_value rest = mrb_nil_value(); if (argc < len) { mrb_int mlen = m2; if (argc < m1+m2) { mlen = m1 < argc ? argc - m1 : 0; } /* copy mandatory and optional arguments */ if (argv0 != argv && argv) { value_move(&regs[1], argv, argc-mlen); /* m1 + o */ } if (argc < m1) { stack_clear(&regs[argc+1], m1-argc); } /* copy post mandatory arguments */ if (mlen) { value_move(&regs[len-m2+1], &argv[argc-mlen], mlen); } if (mlen < m2) { stack_clear(&regs[len-m2+mlen+1], m2-mlen); } /* initialize rest arguments with empty Array */ if (r) { rest = mrb_ary_new_capa(mrb, 0); regs[m1+o+1] = rest; } /* skip initializer of passed arguments */ if (o > 0 && argc > m1+m2) pc += (argc - m1 - m2)*3; } else { mrb_int rnum = 0; if (argv0 != argv) { value_move(&regs[1], argv, m1+o); } if (r) { rnum = argc-m1-o-m2; rest = mrb_ary_new_from_values(mrb, rnum, argv+m1+o); regs[m1+o+1] = rest; } if (m2 > 0 && argc-m2 > m1) { value_move(&regs[m1+o+r+1], &argv[m1+o+rnum], m2); } pc += o*3; } /* need to be update blk first to protect blk from GC */ regs[blk_pos] = blk; /* move block */ if (kd) { if (mrb_nil_p(kdict)) kdict = mrb_hash_new_capa(mrb, 0); regs[kw_pos] = kdict; /* set kwhash */ } /* format arguments for generated code */ mrb->c->ci->n = (uint8_t)len; /* clear local (but non-argument) variables */ if (irep->nlocals-blk_pos-1 > 0) { stack_clear(&regs[blk_pos+1], irep->nlocals-blk_pos-1); } JUMP; } CASE(OP_KARG, BB) { mrb_value k = mrb_symbol_value(syms[b]); mrb_int kidx = mrb_ci_kidx(mrb->c->ci); mrb_value kdict, v; if (kidx < 0 || !mrb_hash_p(kdict=regs[kidx]) || !mrb_hash_key_p(mrb, kdict, k)) { mrb_value str = mrb_format(mrb, "missing keyword: %v", k); mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str)); goto L_RAISE; } v = mrb_hash_get(mrb, kdict, k); regs[a] = v; mrb_hash_delete_key(mrb, kdict, k); NEXT; } CASE(OP_KEY_P, BB) { mrb_value k = mrb_symbol_value(syms[b]); mrb_int kidx = mrb_ci_kidx(mrb->c->ci); mrb_value kdict; mrb_bool key_p = FALSE; if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx])) { key_p = mrb_hash_key_p(mrb, kdict, k); } regs[a] = mrb_bool_value(key_p); NEXT; } CASE(OP_KEYEND, Z) { mrb_int kidx = mrb_ci_kidx(mrb->c->ci); mrb_value kdict; if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx]) && !mrb_hash_empty_p(mrb, kdict)) { mrb_value keys = mrb_hash_keys(mrb, kdict); mrb_value key1 = RARRAY_PTR(keys)[0]; mrb_value str = mrb_format(mrb, "unknown keyword: %v", key1); mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str)); goto L_RAISE; } NEXT; } CASE(OP_BREAK, B) { c = OP_R_BREAK; goto L_RETURN; } CASE(OP_RETURN_BLK, B) { c = OP_R_RETURN; goto L_RETURN; } CASE(OP_RETURN, B) c = OP_R_NORMAL; L_RETURN: { mrb_callinfo *ci; ci = mrb->c->ci; if (ci->mid) { mrb_value blk = regs[mrb_ci_bidx(ci)]; if (mrb_proc_p(blk)) { struct RProc *p = mrb_proc_ptr(blk); if (!MRB_PROC_STRICT_P(p) && ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) { p->flags |= MRB_PROC_ORPHAN; } } } if (mrb->exc) { L_RAISE: ci = mrb->c->ci; if (ci == mrb->c->cibase) { ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL); if (ch == NULL) goto L_FTOP; goto L_CATCH; } while ((ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL)) == NULL) { ci = cipop(mrb); if (ci[1].cci == CINFO_SKIP && prev_jmp) { mrb->jmp = prev_jmp; MRB_THROW(prev_jmp); } pc = ci[0].pc; if (ci == mrb->c->cibase) { ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL); if (ch == NULL) { L_FTOP: /* fiber top */ if (mrb->c == mrb->root_c) { mrb->c->ci->stack = mrb->c->stbase; goto L_STOP; } else { struct mrb_context *c = mrb->c; c->status = MRB_FIBER_TERMINATED; mrb->c = c->prev; c->prev = NULL; goto L_RAISE; } } break; } } L_CATCH: if (ch == NULL) goto L_STOP; if (FALSE) { L_CATCH_TAGGED_BREAK: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */ ci = mrb->c->ci; } proc = ci->proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; mrb_stack_extend(mrb, irep->nregs); pc = irep->iseq + mrb_irep_catch_handler_unpack(ch->target); } else { mrb_int acc; mrb_value v; ci = mrb->c->ci; v = regs[a]; mrb_gc_protect(mrb, v); switch (c) { case OP_R_RETURN: /* Fall through to OP_R_NORMAL otherwise */ if (ci->cci == CINFO_NONE && MRB_PROC_ENV_P(proc) && !MRB_PROC_STRICT_P(proc)) { const struct RProc *dst; mrb_callinfo *cibase; cibase = mrb->c->cibase; dst = top_proc(mrb, proc); if (MRB_PROC_ENV_P(dst)) { struct REnv *e = MRB_PROC_ENV(dst); if (!MRB_ENV_ONSTACK_P(e) || (e->cxt && e->cxt != mrb->c)) { localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } } /* check jump destination */ while (cibase <= ci && ci->proc != dst) { if (ci->cci > CINFO_NONE) { /* jump cross C boundary */ localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } ci--; } if (ci <= cibase) { /* no jump destination */ localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } ci = mrb->c->ci; while (cibase <= ci && ci->proc != dst) { CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_BLOCK) { cibase = mrb->c->cibase; dst = top_proc(mrb, proc); } CHECKPOINT_MAIN(RBREAK_TAG_RETURN_BLOCK) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_BLOCK, proc, v); } CHECKPOINT_END(RBREAK_TAG_RETURN_BLOCK); ci = cipop(mrb); pc = ci->pc; } proc = ci->proc; mrb->exc = NULL; /* clear break object */ break; } /* fallthrough */ case OP_R_NORMAL: NORMAL_RETURN: if (ci == mrb->c->cibase) { struct mrb_context *c; c = mrb->c; if (!c->prev) { /* toplevel return */ regs[irep->nlocals] = v; goto CHECKPOINT_LABEL_MAKE(RBREAK_TAG_STOP); } if (!c->vmexec && c->prev->ci == c->prev->cibase) { mrb_value exc = mrb_exc_new_lit(mrb, E_FIBER_ERROR, "double resume"); mrb_exc_set(mrb, exc); goto L_RAISE; } CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_TOPLEVEL) { c = mrb->c; } CHECKPOINT_MAIN(RBREAK_TAG_RETURN_TOPLEVEL) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_TOPLEVEL, proc, v); } CHECKPOINT_END(RBREAK_TAG_RETURN_TOPLEVEL); /* automatic yield at the end */ c->status = MRB_FIBER_TERMINATED; mrb->c = c->prev; mrb->c->status = MRB_FIBER_RUNNING; c->prev = NULL; if (c->vmexec) { mrb_gc_arena_restore(mrb, ai); c->vmexec = FALSE; mrb->jmp = prev_jmp; return v; } ci = mrb->c->ci; } CHECKPOINT_RESTORE(RBREAK_TAG_RETURN) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_RETURN) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN, proc, v); } CHECKPOINT_END(RBREAK_TAG_RETURN); mrb->exc = NULL; /* clear break object */ break; case OP_R_BREAK: if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN; if (MRB_PROC_ORPHAN_P(proc)) { mrb_value exc; L_BREAK_ERROR: exc = mrb_exc_new_lit(mrb, E_LOCALJUMP_ERROR, "break from proc-closure"); mrb_exc_set(mrb, exc); goto L_RAISE; } if (!MRB_PROC_ENV_P(proc) || !MRB_ENV_ONSTACK_P(MRB_PROC_ENV(proc))) { goto L_BREAK_ERROR; } else { struct REnv *e = MRB_PROC_ENV(proc); if (e->cxt != mrb->c) { goto L_BREAK_ERROR; } } CHECKPOINT_RESTORE(RBREAK_TAG_BREAK) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_BREAK) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK, proc, v); } CHECKPOINT_END(RBREAK_TAG_BREAK); /* break from fiber block */ if (ci == mrb->c->cibase && ci->pc) { struct mrb_context *c = mrb->c; mrb->c = c->prev; c->prev = NULL; ci = mrb->c->ci; } if (ci->cci > CINFO_NONE) { ci = cipop(mrb); mrb->exc = (struct RObject*)break_new(mrb, RBREAK_TAG_BREAK, proc, v); mrb_gc_arena_restore(mrb, ai); mrb->c->vmexec = FALSE; mrb->jmp = prev_jmp; MRB_THROW(prev_jmp); } if (FALSE) { struct RBreak *brk; L_BREAK: brk = (struct RBreak*)mrb->exc; proc = mrb_break_proc_get(brk); v = mrb_break_value_get(brk); ci = mrb->c->ci; switch (mrb_break_tag_get(brk)) { #define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n); RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS) #undef DISPATCH_CHECKPOINTS default: mrb_assert(!"wrong break tag"); } } while (mrb->c->cibase < ci && ci[-1].proc != proc->upper) { if (ci[-1].cci == CINFO_SKIP) { goto L_BREAK_ERROR; } CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_UPPER) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_BREAK_UPPER) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_UPPER, proc, v); } CHECKPOINT_END(RBREAK_TAG_BREAK_UPPER); ci = cipop(mrb); pc = ci->pc; } CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_INTARGET) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_BREAK_INTARGET) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_INTARGET, proc, v); } CHECKPOINT_END(RBREAK_TAG_BREAK_INTARGET); if (ci == mrb->c->cibase) { goto L_BREAK_ERROR; } mrb->exc = NULL; /* clear break object */ break; default: /* cannot happen */ break; } mrb_assert(ci == mrb->c->ci); mrb_assert(mrb->exc == NULL); if (mrb->c->vmexec && !mrb_vm_ci_target_class(ci)) { mrb_gc_arena_restore(mrb, ai); mrb->c->vmexec = FALSE; mrb->jmp = prev_jmp; return v; } acc = ci->cci; ci = cipop(mrb); if (acc == CINFO_SKIP || acc == CINFO_DIRECT) { mrb_gc_arena_restore(mrb, ai); mrb->jmp = prev_jmp; return v; } pc = ci->pc; DEBUG(fprintf(stderr, "from :%s\n", mrb_sym_name(mrb, ci->mid))); proc = ci->proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; ci[1].stack[0] = v; mrb_gc_arena_restore(mrb, ai); } JUMP; } CASE(OP_BLKPUSH, BS) { int m1 = (b>>11)&0x3f; int r = (b>>10)&0x1; int m2 = (b>>5)&0x1f; int kd = (b>>4)&0x1; int lv = (b>>0)&0xf; mrb_value *stack; if (lv == 0) stack = regs + 1; else { struct REnv *e = uvenv(mrb, lv-1); if (!e || (!MRB_ENV_ONSTACK_P(e) && e->mid == 0) || MRB_ENV_LEN(e) <= m1+r+m2+1) { localjump_error(mrb, LOCALJUMP_ERROR_YIELD); goto L_RAISE; } stack = e->stack + 1; } if (mrb_nil_p(stack[m1+r+m2+kd])) { localjump_error(mrb, LOCALJUMP_ERROR_YIELD); goto L_RAISE; } regs[a] = stack[m1+r+m2+kd]; NEXT; } #if !defined(MRB_USE_BIGINT) || defined(MRB_INT32) L_INT_OVERFLOW: { mrb_value exc = mrb_exc_new_lit(mrb, E_RANGE_ERROR, "integer overflow"); mrb_exc_set(mrb, exc); } goto L_RAISE; #endif #define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff)) #define OP_MATH(op_name) \ /* need to check if op is overridden */ \ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \ OP_MATH_CASE_INTEGER(op_name); \ OP_MATH_CASE_FLOAT(op_name, integer, float); \ OP_MATH_CASE_FLOAT(op_name, float, integer); \ OP_MATH_CASE_FLOAT(op_name, float, float); \ OP_MATH_CASE_STRING_##op_name(); \ default: \ mid = MRB_OPSYM(op_name); \ goto L_SEND_SYM; \ } \ NEXT; #define OP_MATH_CASE_INTEGER(op_name) \ case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \ { \ mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \ if (mrb_int_##op_name##_overflow(x, y, &z)) { \ OP_MATH_OVERFLOW_INT(op_name,x,y); \ } \ else \ SET_INT_VALUE(mrb,regs[a], z); \ } \ break #ifdef MRB_NO_FLOAT #define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0 #else #define OP_MATH_CASE_FLOAT(op_name, t1, t2) \ case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \ { \ mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \ SET_FLOAT_VALUE(mrb, regs[a], z); \ } \ break #endif #ifdef MRB_USE_BIGINT #define OP_MATH_OVERFLOW_INT(op,x,y) regs[a] = mrb_bint_##op##_ii(mrb,x,y) #else #define OP_MATH_OVERFLOW_INT(op,x,y) goto L_INT_OVERFLOW #endif #define OP_MATH_CASE_STRING_add() \ case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \ regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \ mrb_gc_arena_restore(mrb, ai); \ break #define OP_MATH_CASE_STRING_sub() (void)0 #define OP_MATH_CASE_STRING_mul() (void)0 #define OP_MATH_OP_add + #define OP_MATH_OP_sub - #define OP_MATH_OP_mul * #define OP_MATH_TT_integer MRB_TT_INTEGER #define OP_MATH_TT_float MRB_TT_FLOAT CASE(OP_ADD, B) { OP_MATH(add); } CASE(OP_SUB, B) { OP_MATH(sub); } CASE(OP_MUL, B) { OP_MATH(mul); } CASE(OP_DIV, B) { #ifndef MRB_NO_FLOAT mrb_float x, y, f; #endif /* need to check if op is overridden */ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER): { mrb_int x = mrb_integer(regs[a]); mrb_int y = mrb_integer(regs[a+1]); mrb_int div = mrb_div_int(mrb, x, y); SET_INT_VALUE(mrb, regs[a], div); } NEXT; #ifndef MRB_NO_FLOAT case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT): x = (mrb_float)mrb_integer(regs[a]); y = mrb_float(regs[a+1]); break; case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER): x = mrb_float(regs[a]); y = (mrb_float)mrb_integer(regs[a+1]); break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT): x = mrb_float(regs[a]); y = mrb_float(regs[a+1]); break; #endif default: mid = MRB_OPSYM(div); goto L_SEND_SYM; } #ifndef MRB_NO_FLOAT f = mrb_div_float(x, y); SET_FLOAT_VALUE(mrb, regs[a], f); #endif NEXT; } #define OP_MATHI(op_name) \ /* need to check if op is overridden */ \ switch (mrb_type(regs[a])) { \ OP_MATHI_CASE_INTEGER(op_name); \ OP_MATHI_CASE_FLOAT(op_name); \ default: \ SET_INT_VALUE(mrb,regs[a+1], b); \ mid = MRB_OPSYM(op_name); \ goto L_SEND_SYM; \ } \ NEXT; #define OP_MATHI_CASE_INTEGER(op_name) \ case MRB_TT_INTEGER: \ { \ mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \ if (mrb_int_##op_name##_overflow(x, y, &z)) { \ OP_MATH_OVERFLOW_INT(op_name,x,y); \ } \ else \ SET_INT_VALUE(mrb,regs[a], z); \ } \ break #ifdef MRB_NO_FLOAT #define OP_MATHI_CASE_FLOAT(op_name) (void)0 #else #define OP_MATHI_CASE_FLOAT(op_name) \ case MRB_TT_FLOAT: \ { \ mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \ SET_FLOAT_VALUE(mrb, regs[a], z); \ } \ break #endif CASE(OP_ADDI, BB) { OP_MATHI(add); } CASE(OP_SUBI, BB) { OP_MATHI(sub); } #define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1])) #ifdef MRB_NO_FLOAT #define OP_CMP(op,sym) do {\ int result;\ /* need to check if - is overridden */\ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\ case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\ break;\ default:\ mid = MRB_OPSYM(sym);\ goto L_SEND_SYM;\ }\ if (result) {\ SET_TRUE_VALUE(regs[a]);\ }\ else {\ SET_FALSE_VALUE(regs[a]);\ }\ } while(0) #else #define OP_CMP(op, sym) do {\ int result;\ /* need to check if - is overridden */\ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\ case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\ break;\ case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_float);\ break;\ case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\ result = OP_CMP_BODY(op,mrb_float,mrb_fixnum);\ break;\ case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\ result = OP_CMP_BODY(op,mrb_float,mrb_float);\ break;\ default:\ mid = MRB_OPSYM(sym);\ goto L_SEND_SYM;\ }\ if (result) {\ SET_TRUE_VALUE(regs[a]);\ }\ else {\ SET_FALSE_VALUE(regs[a]);\ }\ } while(0) #endif CASE(OP_EQ, B) { if (mrb_obj_eq(mrb, regs[a], regs[a+1])) { SET_TRUE_VALUE(regs[a]); } else { OP_CMP(==,eq); } NEXT; } CASE(OP_LT, B) { OP_CMP(<,lt); NEXT; } CASE(OP_LE, B) { OP_CMP(<=,le); NEXT; } CASE(OP_GT, B) { OP_CMP(>,gt); NEXT; } CASE(OP_GE, B) { OP_CMP(>=,ge); NEXT; } CASE(OP_ARRAY, BB) { regs[a] = mrb_ary_new_from_values(mrb, b, &regs[a]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ARRAY2, BBB) { regs[a] = mrb_ary_new_from_values(mrb, c, &regs[b]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ARYCAT, B) { mrb_value splat = mrb_ary_splat(mrb, regs[a+1]); if (mrb_nil_p(regs[a])) { regs[a] = splat; } else { mrb_assert(mrb_array_p(regs[a])); mrb_ary_concat(mrb, regs[a], splat); } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ARYPUSH, BB) { mrb_assert(mrb_array_p(regs[a])); for (mrb_int i=0; i<b; i++) { mrb_ary_push(mrb, regs[a], regs[a+i+1]); } NEXT; } CASE(OP_ARYDUP, B) { mrb_value ary = regs[a]; if (mrb_array_p(ary)) { ary = mrb_ary_new_from_values(mrb, RARRAY_LEN(ary), RARRAY_PTR(ary)); } else { ary = mrb_ary_new_from_values(mrb, 1, &ary); } regs[a] = ary; NEXT; } CASE(OP_AREF, BBB) { mrb_value v = regs[b]; if (!mrb_array_p(v)) { if (c == 0) { regs[a] = v; } else { SET_NIL_VALUE(regs[a]); } } else { v = mrb_ary_ref(mrb, v, c); regs[a] = v; } NEXT; } CASE(OP_ASET, BBB) { mrb_assert(mrb_array_p(regs[a])); mrb_ary_set(mrb, regs[b], c, regs[a]); NEXT; } CASE(OP_APOST, BBB) { mrb_value v = regs[a]; int pre = b; int post = c; struct RArray *ary; int len, idx; if (!mrb_array_p(v)) { v = mrb_ary_new_from_values(mrb, 1, &regs[a]); } ary = mrb_ary_ptr(v); len = (int)ARY_LEN(ary); if (len > pre + post) { v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre); regs[a++] = v; while (post--) { regs[a++] = ARY_PTR(ary)[len-post-1]; } } else { v = mrb_ary_new_capa(mrb, 0); regs[a++] = v; for (idx=0; idx+pre<len; idx++) { regs[a+idx] = ARY_PTR(ary)[pre+idx]; } while (idx < post) { SET_NIL_VALUE(regs[a+idx]); idx++; } } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_INTERN, B) { mrb_assert(mrb_string_p(regs[a])); mrb_sym sym = mrb_intern_str(mrb, regs[a]); regs[a] = mrb_symbol_value(sym); NEXT; } CASE(OP_SYMBOL, BB) { size_t len; mrb_sym sym; mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0); len = pool[b].tt >> 2; if (pool[b].tt & IREP_TT_SFLAG) { sym = mrb_intern_static(mrb, pool[b].u.str, len); } else { sym = mrb_intern(mrb, pool[b].u.str, len); } regs[a] = mrb_symbol_value(sym); NEXT; } CASE(OP_STRING, BB) { mrb_int len; mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0); len = pool[b].tt >> 2; if (pool[b].tt & IREP_TT_SFLAG) { regs[a] = mrb_str_new_static(mrb, pool[b].u.str, len); } else { regs[a] = mrb_str_new(mrb, pool[b].u.str, len); } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_STRCAT, B) { mrb_assert(mrb_string_p(regs[a])); mrb_str_concat(mrb, regs[a], regs[a+1]); NEXT; } CASE(OP_HASH, BB) { mrb_value hash = mrb_hash_new_capa(mrb, b); int i; int lim = a+b*2; for (i=a; i<lim; i+=2) { mrb_hash_set(mrb, hash, regs[i], regs[i+1]); } regs[a] = hash; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_HASHADD, BB) { mrb_value hash; int i; int lim = a+b*2+1; hash = regs[a]; mrb_ensure_hash_type(mrb, hash); for (i=a+1; i<lim; i+=2) { mrb_hash_set(mrb, hash, regs[i], regs[i+1]); } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_HASHCAT, B) { mrb_value hash = regs[a]; mrb_assert(mrb_hash_p(hash)); mrb_hash_merge(mrb, hash, regs[a+1]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_LAMBDA, BB) c = OP_L_LAMBDA; L_MAKE_LAMBDA: { struct RProc *p; const mrb_irep *nirep = irep->reps[b]; if (c & OP_L_CAPTURE) { p = mrb_closure_new(mrb, nirep); } else { p = mrb_proc_new(mrb, nirep); p->flags |= MRB_PROC_SCOPE; } if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT; regs[a] = mrb_obj_value(p); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_BLOCK, BB) { c = OP_L_BLOCK; goto L_MAKE_LAMBDA; } CASE(OP_METHOD, BB) { c = OP_L_METHOD; goto L_MAKE_LAMBDA; } CASE(OP_RANGE_INC, B) { mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], FALSE); regs[a] = v; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_RANGE_EXC, B) { mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], TRUE); regs[a] = v; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_OCLASS, B) { regs[a] = mrb_obj_value(mrb->object_class); NEXT; } CASE(OP_CLASS, BB) { struct RClass *c = 0, *baseclass; mrb_value base, super; mrb_sym id = syms[b]; base = regs[a]; super = regs[a+1]; if (mrb_nil_p(base)) { baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc); if (!baseclass) baseclass = mrb->object_class; base = mrb_obj_value(baseclass); } c = mrb_vm_define_class(mrb, base, super, id); regs[a] = mrb_obj_value(c); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_MODULE, BB) { struct RClass *cls = 0, *baseclass; mrb_value base; mrb_sym id = syms[b]; base = regs[a]; if (mrb_nil_p(base)) { baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc); if (!baseclass) baseclass = mrb->object_class; base = mrb_obj_value(baseclass); } cls = mrb_vm_define_module(mrb, base, id); regs[a] = mrb_obj_value(cls); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_EXEC, BB) { mrb_value recv = regs[a]; struct RProc *p; const mrb_irep *nirep = irep->reps[b]; /* prepare closure */ p = mrb_proc_new(mrb, nirep); p->c = NULL; mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc); MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv)); p->flags |= MRB_PROC_SCOPE; /* prepare call stack */ cipush(mrb, a, 0, mrb_class_ptr(recv), p, 0, 0); irep = p->body.irep; pool = irep->pool; syms = irep->syms; mrb_stack_extend(mrb, irep->nregs); stack_clear(regs+1, irep->nregs-1); pc = irep->iseq; JUMP; } CASE(OP_DEF, BB) { struct RClass *target = mrb_class_ptr(regs[a]); struct RProc *p = mrb_proc_ptr(regs[a+1]); mrb_method_t m; mrb_sym mid = syms[b]; MRB_METHOD_FROM_PROC(m, p); mrb_define_method_raw(mrb, target, mid, m); mrb_method_added(mrb, target, mid); mrb_gc_arena_restore(mrb, ai); regs[a] = mrb_symbol_value(mid); NEXT; } CASE(OP_SCLASS, B) { regs[a] = mrb_singleton_class(mrb, regs[a]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_TCLASS, B) { struct RClass *target = check_target_class(mrb); if (!target) goto L_RAISE; regs[a] = mrb_obj_value(target); NEXT; } CASE(OP_ALIAS, BB) { struct RClass *target = check_target_class(mrb); if (!target) goto L_RAISE; mrb_alias_method(mrb, target, syms[a], syms[b]); mrb_method_added(mrb, target, syms[a]); NEXT; } CASE(OP_UNDEF, B) { struct RClass *target = check_target_class(mrb); if (!target) goto L_RAISE; mrb_undef_method_id(mrb, target, syms[a]); NEXT; } CASE(OP_DEBUG, Z) { FETCH_BBB(); #ifdef MRB_USE_DEBUG_HOOK mrb->debug_op_hook(mrb, irep, pc, regs); #else #ifndef MRB_NO_STDIO printf("OP_DEBUG %d %d %d\n", a, b, c); #else abort(); #endif #endif NEXT; } CASE(OP_ERR, B) { size_t len = pool[a].tt >> 2; mrb_value exc; mrb_assert((pool[a].tt&IREP_TT_NFLAG)==0); exc = mrb_exc_new(mrb, E_LOCALJUMP_ERROR, pool[a].u.str, len); mrb_exc_set(mrb, exc); goto L_RAISE; } CASE(OP_EXT1, Z) { insn = READ_B(); switch (insn) { #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY; #include "mruby/ops.h" #undef OPCODE } pc--; NEXT; } CASE(OP_EXT2, Z) { insn = READ_B(); switch (insn) { #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY; #include "mruby/ops.h" #undef OPCODE } pc--; NEXT; } CASE(OP_EXT3, Z) { uint8_t insn = READ_B(); switch (insn) { #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY; #include "mruby/ops.h" #undef OPCODE } pc--; NEXT; } CASE(OP_STOP, Z) { /* stop VM */ CHECKPOINT_RESTORE(RBREAK_TAG_STOP) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_STOP) { UNWIND_ENSURE(mrb, mrb->c->ci, pc, RBREAK_TAG_STOP, proc, mrb_nil_value()); } CHECKPOINT_END(RBREAK_TAG_STOP); L_STOP: mrb->jmp = prev_jmp; if (mrb->exc) { mrb_assert(mrb->exc->tt == MRB_TT_EXCEPTION); return mrb_obj_value(mrb->exc); } return regs[irep->nlocals]; } } END_DISPATCH; #undef regs } MRB_CATCH(&c_jmp) { mrb_callinfo *ci = mrb->c->ci; while (ci > mrb->c->cibase && ci->cci == CINFO_DIRECT) { ci = cipop(mrb); } exc_catched = TRUE; pc = ci->pc; goto RETRY_TRY_BLOCK; } MRB_END_EXC(&c_jmp); }
44658455618798852770197569753613577766
vm.c
164058079299968555060167307813659313262
CWE-703
CVE-2022-1427
Out-of-bounds Read in mrb_obj_is_kind_of in in GitHub repository mruby/mruby prior to 3.2. # Impact: Possible arbitrary code execution if being exploited.
https://nvd.nist.gov/vuln/detail/CVE-2022-1427
231,012
mruby
a4d97934d51cb88954cc49161dc1d151f64afb6b
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/a4d97934d51cb88954cc49161dc1d151f64afb6b
vm.c: check if target_class is NULL (when prepended).
0
mrb_vm_exec(mrb_state *mrb, const struct RProc *proc, const mrb_code *pc) { /* mrb_assert(MRB_PROC_CFUNC_P(proc)) */ const mrb_irep *irep = proc->body.irep; const mrb_pool_value *pool = irep->pool; const mrb_sym *syms = irep->syms; mrb_code insn; int ai = mrb_gc_arena_save(mrb); struct mrb_jmpbuf *prev_jmp = mrb->jmp; struct mrb_jmpbuf c_jmp; uint32_t a; uint16_t b; uint16_t c; mrb_sym mid; const struct mrb_irep_catch_handler *ch; #ifdef DIRECT_THREADED static const void * const optable[] = { #define OPCODE(x,_) &&L_OP_ ## x, #include "mruby/ops.h" #undef OPCODE }; #endif mrb_bool exc_catched = FALSE; RETRY_TRY_BLOCK: MRB_TRY(&c_jmp) { if (exc_catched) { exc_catched = FALSE; mrb_gc_arena_restore(mrb, ai); if (mrb->exc && mrb->exc->tt == MRB_TT_BREAK) goto L_BREAK; goto L_RAISE; } mrb->jmp = &c_jmp; mrb_vm_ci_proc_set(mrb->c->ci, proc); #define regs (mrb->c->ci->stack) INIT_DISPATCH { CASE(OP_NOP, Z) { /* do nothing */ NEXT; } CASE(OP_MOVE, BB) { regs[a] = regs[b]; NEXT; } CASE(OP_LOADL, BB) { switch (pool[b].tt) { /* number */ case IREP_TT_INT32: regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i32); break; case IREP_TT_INT64: #if defined(MRB_INT64) regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64); break; #else #if defined(MRB_64BIT) if (INT32_MIN <= pool[b].u.i64 && pool[b].u.i64 <= INT32_MAX) { regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64); break; } #endif goto L_INT_OVERFLOW; #endif case IREP_TT_BIGINT: #ifdef MRB_USE_BIGINT { const char *s = pool[b].u.str; regs[a] = mrb_bint_new_str(mrb, s+2, (mrb_int)s[0], (mrb_int)s[1]); } break; #else goto L_INT_OVERFLOW; #endif #ifndef MRB_NO_FLOAT case IREP_TT_FLOAT: regs[a] = mrb_float_value(mrb, pool[b].u.f); break; #endif default: /* should not happen (tt:string) */ regs[a] = mrb_nil_value(); break; } NEXT; } CASE(OP_LOADI, BB) { SET_FIXNUM_VALUE(regs[a], b); NEXT; } CASE(OP_LOADINEG, BB) { SET_FIXNUM_VALUE(regs[a], -b); NEXT; } CASE(OP_LOADI__1,B) goto L_LOADI; CASE(OP_LOADI_0,B) goto L_LOADI; CASE(OP_LOADI_1,B) goto L_LOADI; CASE(OP_LOADI_2,B) goto L_LOADI; CASE(OP_LOADI_3,B) goto L_LOADI; CASE(OP_LOADI_4,B) goto L_LOADI; CASE(OP_LOADI_5,B) goto L_LOADI; CASE(OP_LOADI_6,B) goto L_LOADI; CASE(OP_LOADI_7, B) { L_LOADI: SET_FIXNUM_VALUE(regs[a], (mrb_int)insn - (mrb_int)OP_LOADI_0); NEXT; } CASE(OP_LOADI16, BS) { SET_FIXNUM_VALUE(regs[a], (mrb_int)(int16_t)b); NEXT; } CASE(OP_LOADI32, BSS) { SET_INT_VALUE(mrb, regs[a], (int32_t)(((uint32_t)b<<16)+c)); NEXT; } CASE(OP_LOADSYM, BB) { SET_SYM_VALUE(regs[a], syms[b]); NEXT; } CASE(OP_LOADNIL, B) { SET_NIL_VALUE(regs[a]); NEXT; } CASE(OP_LOADSELF, B) { regs[a] = regs[0]; NEXT; } CASE(OP_LOADT, B) { SET_TRUE_VALUE(regs[a]); NEXT; } CASE(OP_LOADF, B) { SET_FALSE_VALUE(regs[a]); NEXT; } CASE(OP_GETGV, BB) { mrb_value val = mrb_gv_get(mrb, syms[b]); regs[a] = val; NEXT; } CASE(OP_SETGV, BB) { mrb_gv_set(mrb, syms[b], regs[a]); NEXT; } CASE(OP_GETSV, BB) { mrb_value val = mrb_vm_special_get(mrb, syms[b]); regs[a] = val; NEXT; } CASE(OP_SETSV, BB) { mrb_vm_special_set(mrb, syms[b], regs[a]); NEXT; } CASE(OP_GETIV, BB) { regs[a] = mrb_iv_get(mrb, regs[0], syms[b]); NEXT; } CASE(OP_SETIV, BB) { mrb_iv_set(mrb, regs[0], syms[b], regs[a]); NEXT; } CASE(OP_GETCV, BB) { mrb_value val; val = mrb_vm_cv_get(mrb, syms[b]); regs[a] = val; NEXT; } CASE(OP_SETCV, BB) { mrb_vm_cv_set(mrb, syms[b], regs[a]); NEXT; } CASE(OP_GETIDX, B) { mrb_value va = regs[a], vb = regs[a+1]; switch (mrb_type(va)) { case MRB_TT_ARRAY: if (!mrb_integer_p(vb)) goto getidx_fallback; regs[a] = mrb_ary_entry(va, mrb_integer(vb)); break; case MRB_TT_HASH: va = mrb_hash_get(mrb, va, vb); regs[a] = va; break; case MRB_TT_STRING: switch (mrb_type(vb)) { case MRB_TT_INTEGER: case MRB_TT_STRING: case MRB_TT_RANGE: va = mrb_str_aref(mrb, va, vb, mrb_undef_value()); regs[a] = va; break; default: goto getidx_fallback; } break; default: getidx_fallback: mid = MRB_OPSYM(aref); goto L_SEND_SYM; } NEXT; } CASE(OP_SETIDX, B) { c = 2; mid = MRB_OPSYM(aset); SET_NIL_VALUE(regs[a+3]); goto L_SENDB_SYM; } CASE(OP_GETCONST, BB) { mrb_value v = mrb_vm_const_get(mrb, syms[b]); regs[a] = v; NEXT; } CASE(OP_SETCONST, BB) { mrb_vm_const_set(mrb, syms[b], regs[a]); NEXT; } CASE(OP_GETMCNST, BB) { mrb_value v = mrb_const_get(mrb, regs[a], syms[b]); regs[a] = v; NEXT; } CASE(OP_SETMCNST, BB) { mrb_const_set(mrb, regs[a+1], syms[b], regs[a]); NEXT; } CASE(OP_GETUPVAR, BBB) { mrb_value *regs_a = regs + a; struct REnv *e = uvenv(mrb, c); if (e && b < MRB_ENV_LEN(e)) { *regs_a = e->stack[b]; } else { *regs_a = mrb_nil_value(); } NEXT; } CASE(OP_SETUPVAR, BBB) { struct REnv *e = uvenv(mrb, c); if (e) { mrb_value *regs_a = regs + a; if (b < MRB_ENV_LEN(e)) { e->stack[b] = *regs_a; mrb_write_barrier(mrb, (struct RBasic*)e); } } NEXT; } CASE(OP_JMP, S) { pc += (int16_t)a; JUMP; } CASE(OP_JMPIF, BS) { if (mrb_test(regs[a])) { pc += (int16_t)b; JUMP; } NEXT; } CASE(OP_JMPNOT, BS) { if (!mrb_test(regs[a])) { pc += (int16_t)b; JUMP; } NEXT; } CASE(OP_JMPNIL, BS) { if (mrb_nil_p(regs[a])) { pc += (int16_t)b; JUMP; } NEXT; } CASE(OP_JMPUW, S) { a = (uint32_t)((pc - irep->iseq) + (int16_t)a); CHECKPOINT_RESTORE(RBREAK_TAG_JUMP) { struct RBreak *brk = (struct RBreak*)mrb->exc; mrb_value target = mrb_break_value_get(brk); mrb_assert(mrb_integer_p(target)); a = (uint32_t)mrb_integer(target); mrb_assert(a >= 0 && a < irep->ilen); } CHECKPOINT_MAIN(RBREAK_TAG_JUMP) { ch = catch_handler_find(mrb, mrb->c->ci, pc, MRB_CATCH_FILTER_ENSURE); if (ch) { /* avoiding a jump from a catch handler into the same handler */ if (a < mrb_irep_catch_handler_unpack(ch->begin) || a >= mrb_irep_catch_handler_unpack(ch->end)) { THROW_TAGGED_BREAK(mrb, RBREAK_TAG_JUMP, proc, mrb_fixnum_value(a)); } } } CHECKPOINT_END(RBREAK_TAG_JUMP); mrb->exc = NULL; /* clear break object */ pc = irep->iseq + a; JUMP; } CASE(OP_EXCEPT, B) { mrb_value exc; if (mrb->exc == NULL) { exc = mrb_nil_value(); } else { switch (mrb->exc->tt) { case MRB_TT_BREAK: case MRB_TT_EXCEPTION: exc = mrb_obj_value(mrb->exc); break; default: mrb_assert(!"bad mrb_type"); exc = mrb_nil_value(); break; } mrb->exc = NULL; } regs[a] = exc; NEXT; } CASE(OP_RESCUE, BB) { mrb_value exc = regs[a]; /* exc on stack */ mrb_value e = regs[b]; struct RClass *ec; switch (mrb_type(e)) { case MRB_TT_CLASS: case MRB_TT_MODULE: break; default: { mrb_value exc; exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR, "class or module required for rescue clause"); mrb_exc_set(mrb, exc); goto L_RAISE; } } ec = mrb_class_ptr(e); regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec)); NEXT; } CASE(OP_RAISEIF, B) { mrb_value exc = regs[a]; if (mrb_break_p(exc)) { mrb->exc = mrb_obj_ptr(exc); goto L_BREAK; } mrb_exc_set(mrb, exc); if (mrb->exc) { goto L_RAISE; } NEXT; } CASE(OP_SSEND, BBB) { regs[a] = regs[0]; insn = OP_SEND; } goto L_SENDB; CASE(OP_SSENDB, BBB) { regs[a] = regs[0]; } goto L_SENDB; CASE(OP_SEND, BBB) goto L_SENDB; L_SEND_SYM: c = 1; /* push nil after arguments */ SET_NIL_VALUE(regs[a+2]); goto L_SENDB_SYM; CASE(OP_SENDB, BBB) L_SENDB: mid = syms[b]; L_SENDB_SYM: { mrb_callinfo *ci = mrb->c->ci; mrb_method_t m; struct RClass *cls; mrb_value recv, blk; ARGUMENT_NORMALIZE(a, &c, insn); recv = regs[a]; cls = mrb_class(mrb, recv); m = mrb_method_search_vm(mrb, &cls, mid); if (MRB_METHOD_UNDEF_P(m)) { m = prepare_missing(mrb, recv, mid, &cls, a, &c, blk, 0); mid = MRB_SYM(method_missing); } /* push callinfo */ ci = cipush(mrb, a, 0, cls, NULL, mid, c); if (MRB_METHOD_CFUNC_P(m)) { if (MRB_METHOD_PROC_P(m)) { struct RProc *p = MRB_METHOD_PROC(m); mrb_vm_ci_proc_set(ci, p); recv = p->body.func(mrb, recv); } else { if (MRB_METHOD_NOARG_P(m)) { check_method_noarg(mrb, ci); } recv = MRB_METHOD_FUNC(m)(mrb, recv); } mrb_gc_arena_shrink(mrb, ai); if (mrb->exc) goto L_RAISE; ci = mrb->c->ci; if (mrb_proc_p(blk)) { struct RProc *p = mrb_proc_ptr(blk); if (p && !MRB_PROC_STRICT_P(p) && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) { p->flags |= MRB_PROC_ORPHAN; } } if (!ci->u.target_class) { /* return from context modifying method (resume/yield) */ if (ci->cci == CINFO_RESUMED) { mrb->jmp = prev_jmp; return recv; } else { mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc)); proc = ci[-1].proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; } } ci->stack[0] = recv; /* pop stackpos */ ci = cipop(mrb); pc = ci->pc; } else { /* setup environment for calling method */ mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m))); irep = proc->body.irep; pool = irep->pool; syms = irep->syms; mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs); pc = irep->iseq; } } JUMP; CASE(OP_CALL, Z) { mrb_callinfo *ci = mrb->c->ci; mrb_value recv = ci->stack[0]; struct RProc *m = mrb_proc_ptr(recv); /* replace callinfo */ ci->u.target_class = MRB_PROC_TARGET_CLASS(m); mrb_vm_ci_proc_set(ci, m); if (MRB_PROC_ENV_P(m)) { ci->mid = MRB_PROC_ENV(m)->mid; } /* prepare stack */ if (MRB_PROC_CFUNC_P(m)) { recv = MRB_PROC_CFUNC(m)(mrb, recv); mrb_gc_arena_shrink(mrb, ai); if (mrb->exc) goto L_RAISE; /* pop stackpos */ ci = cipop(mrb); pc = ci->pc; ci[1].stack[0] = recv; irep = mrb->c->ci->proc->body.irep; } else { /* setup environment for calling method */ proc = m; irep = m->body.irep; if (!irep) { mrb->c->ci->stack[0] = mrb_nil_value(); a = 0; c = OP_R_NORMAL; goto L_OP_RETURN_BODY; } mrb_int nargs = mrb_ci_bidx(ci)+1; if (nargs < irep->nregs) { mrb_stack_extend(mrb, irep->nregs); stack_clear(regs+nargs, irep->nregs-nargs); } if (MRB_PROC_ENV_P(m)) { regs[0] = MRB_PROC_ENV(m)->stack[0]; } pc = irep->iseq; } pool = irep->pool; syms = irep->syms; JUMP; } CASE(OP_SUPER, BB) { mrb_method_t m; struct RClass *cls; mrb_callinfo *ci = mrb->c->ci; mrb_value recv, blk; const struct RProc *p = ci->proc; mrb_sym mid = ci->mid; struct RClass* target_class = MRB_PROC_TARGET_CLASS(p); if (MRB_PROC_ENV_P(p) && p->e.env->mid && p->e.env->mid != mid) { /* alias support */ mid = p->e.env->mid; /* restore old mid */ } if (mid == 0 || !target_class) { mrb_value exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method"); mrb_exc_set(mrb, exc); goto L_RAISE; } if ((target_class->flags & MRB_FL_CLASS_IS_PREPENDED) || target_class->tt == MRB_TT_MODULE) { target_class = mrb_vm_ci_target_class(ci); if (!target_class || target_class->tt != MRB_TT_ICLASS) { goto super_typeerror; } } recv = regs[0]; if (!mrb_obj_is_kind_of(mrb, recv, target_class)) { super_typeerror: ; mrb_value exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR, "self has wrong type to call super in this context"); mrb_exc_set(mrb, exc); goto L_RAISE; } ARGUMENT_NORMALIZE(a, &b, OP_SUPER); cls = target_class->super; m = mrb_method_search_vm(mrb, &cls, mid); if (MRB_METHOD_UNDEF_P(m)) { m = prepare_missing(mrb, recv, mid, &cls, a, &b, blk, 1); mid = MRB_SYM(method_missing); } /* push callinfo */ ci = cipush(mrb, a, 0, cls, NULL, mid, b); /* prepare stack */ ci->stack[0] = recv; if (MRB_METHOD_CFUNC_P(m)) { mrb_value v; if (MRB_METHOD_PROC_P(m)) { mrb_vm_ci_proc_set(ci, MRB_METHOD_PROC(m)); } v = MRB_METHOD_CFUNC(m)(mrb, recv); mrb_gc_arena_restore(mrb, ai); if (mrb->exc) goto L_RAISE; ci = mrb->c->ci; mrb_assert(!mrb_break_p(v)); if (!mrb_vm_ci_target_class(ci)) { /* return from context modifying method (resume/yield) */ if (ci->cci == CINFO_RESUMED) { mrb->jmp = prev_jmp; return v; } else { mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc)); proc = ci[-1].proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; } } mrb->c->ci->stack[0] = v; ci = cipop(mrb); pc = ci->pc; } else { /* setup environment for calling method */ mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m))); irep = proc->body.irep; pool = irep->pool; syms = irep->syms; mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs); pc = irep->iseq; } JUMP; } CASE(OP_ARGARY, BS) { mrb_int m1 = (b>>11)&0x3f; mrb_int r = (b>>10)&0x1; mrb_int m2 = (b>>5)&0x1f; mrb_int kd = (b>>4)&0x1; mrb_int lv = (b>>0)&0xf; mrb_value *stack; if (mrb->c->ci->mid == 0 || mrb_vm_ci_target_class(mrb->c->ci) == NULL) { mrb_value exc; L_NOSUPER: exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method"); mrb_exc_set(mrb, exc); goto L_RAISE; } if (lv == 0) stack = regs + 1; else { struct REnv *e = uvenv(mrb, lv-1); if (!e) goto L_NOSUPER; if (MRB_ENV_LEN(e) <= m1+r+m2+1) goto L_NOSUPER; stack = e->stack + 1; } if (r == 0) { regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack); } else { mrb_value *pp = NULL; struct RArray *rest; mrb_int len = 0; if (mrb_array_p(stack[m1])) { struct RArray *ary = mrb_ary_ptr(stack[m1]); pp = ARY_PTR(ary); len = ARY_LEN(ary); } regs[a] = mrb_ary_new_capa(mrb, m1+len+m2); rest = mrb_ary_ptr(regs[a]); if (m1 > 0) { stack_copy(ARY_PTR(rest), stack, m1); } if (len > 0) { stack_copy(ARY_PTR(rest)+m1, pp, len); } if (m2 > 0) { stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2); } ARY_SET_LEN(rest, m1+len+m2); } if (kd) { regs[a+1] = stack[m1+r+m2]; regs[a+2] = stack[m1+r+m2+1]; } else { regs[a+1] = stack[m1+r+m2]; } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ENTER, W) { mrb_int m1 = MRB_ASPEC_REQ(a); mrb_int o = MRB_ASPEC_OPT(a); mrb_int r = MRB_ASPEC_REST(a); mrb_int m2 = MRB_ASPEC_POST(a); mrb_int kd = (MRB_ASPEC_KEY(a) > 0 || MRB_ASPEC_KDICT(a))? 1 : 0; /* unused int b = MRB_ASPEC_BLOCK(a); */ mrb_int const len = m1 + o + r + m2; mrb_callinfo *ci = mrb->c->ci; mrb_int argc = ci->n; mrb_value *argv = regs+1; mrb_value * const argv0 = argv; mrb_int const kw_pos = len + kd; /* where kwhash should be */ mrb_int const blk_pos = kw_pos + 1; /* where block should be */ mrb_value blk = regs[mrb_ci_bidx(ci)]; mrb_value kdict = mrb_nil_value(); /* keyword arguments */ if (ci->nk > 0) { mrb_int kidx = mrb_ci_kidx(ci); kdict = regs[kidx]; if (!mrb_hash_p(kdict) || mrb_hash_size(mrb, kdict) == 0) { kdict = mrb_nil_value(); ci->nk = 0; } } if (!kd && !mrb_nil_p(kdict)) { if (argc < 14) { ci->n++; argc++; /* include kdict in normal arguments */ } else if (argc == 14) { /* pack arguments and kdict */ regs[1] = mrb_ary_new_from_values(mrb, argc+1, &regs[1]); argc = ci->n = 15; } else {/* argc == 15 */ /* push kdict to packed arguments */ mrb_ary_push(mrb, regs[1], regs[2]); } ci->nk = 0; } if (kd && MRB_ASPEC_KEY(a) > 0 && mrb_hash_p(kdict)) { kdict = mrb_hash_dup(mrb, kdict); } /* arguments is passed with Array */ if (argc == 15) { struct RArray *ary = mrb_ary_ptr(regs[1]); argv = ARY_PTR(ary); argc = (int)ARY_LEN(ary); mrb_gc_protect(mrb, regs[1]); } /* strict argument check */ if (ci->proc && MRB_PROC_STRICT_P(ci->proc)) { if (argc < m1 + m2 || (r == 0 && argc > len)) { argnum_error(mrb, m1+m2); goto L_RAISE; } } /* extract first argument array to arguments */ else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) { mrb_gc_protect(mrb, argv[0]); argc = (int)RARRAY_LEN(argv[0]); argv = RARRAY_PTR(argv[0]); } /* rest arguments */ mrb_value rest = mrb_nil_value(); if (argc < len) { mrb_int mlen = m2; if (argc < m1+m2) { mlen = m1 < argc ? argc - m1 : 0; } /* copy mandatory and optional arguments */ if (argv0 != argv && argv) { value_move(&regs[1], argv, argc-mlen); /* m1 + o */ } if (argc < m1) { stack_clear(&regs[argc+1], m1-argc); } /* copy post mandatory arguments */ if (mlen) { value_move(&regs[len-m2+1], &argv[argc-mlen], mlen); } if (mlen < m2) { stack_clear(&regs[len-m2+mlen+1], m2-mlen); } /* initialize rest arguments with empty Array */ if (r) { rest = mrb_ary_new_capa(mrb, 0); regs[m1+o+1] = rest; } /* skip initializer of passed arguments */ if (o > 0 && argc > m1+m2) pc += (argc - m1 - m2)*3; } else { mrb_int rnum = 0; if (argv0 != argv) { value_move(&regs[1], argv, m1+o); } if (r) { rnum = argc-m1-o-m2; rest = mrb_ary_new_from_values(mrb, rnum, argv+m1+o); regs[m1+o+1] = rest; } if (m2 > 0 && argc-m2 > m1) { value_move(&regs[m1+o+r+1], &argv[m1+o+rnum], m2); } pc += o*3; } /* need to be update blk first to protect blk from GC */ regs[blk_pos] = blk; /* move block */ if (kd) { if (mrb_nil_p(kdict)) kdict = mrb_hash_new_capa(mrb, 0); regs[kw_pos] = kdict; /* set kwhash */ } /* format arguments for generated code */ mrb->c->ci->n = (uint8_t)len; /* clear local (but non-argument) variables */ if (irep->nlocals-blk_pos-1 > 0) { stack_clear(&regs[blk_pos+1], irep->nlocals-blk_pos-1); } JUMP; } CASE(OP_KARG, BB) { mrb_value k = mrb_symbol_value(syms[b]); mrb_int kidx = mrb_ci_kidx(mrb->c->ci); mrb_value kdict, v; if (kidx < 0 || !mrb_hash_p(kdict=regs[kidx]) || !mrb_hash_key_p(mrb, kdict, k)) { mrb_value str = mrb_format(mrb, "missing keyword: %v", k); mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str)); goto L_RAISE; } v = mrb_hash_get(mrb, kdict, k); regs[a] = v; mrb_hash_delete_key(mrb, kdict, k); NEXT; } CASE(OP_KEY_P, BB) { mrb_value k = mrb_symbol_value(syms[b]); mrb_int kidx = mrb_ci_kidx(mrb->c->ci); mrb_value kdict; mrb_bool key_p = FALSE; if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx])) { key_p = mrb_hash_key_p(mrb, kdict, k); } regs[a] = mrb_bool_value(key_p); NEXT; } CASE(OP_KEYEND, Z) { mrb_int kidx = mrb_ci_kidx(mrb->c->ci); mrb_value kdict; if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx]) && !mrb_hash_empty_p(mrb, kdict)) { mrb_value keys = mrb_hash_keys(mrb, kdict); mrb_value key1 = RARRAY_PTR(keys)[0]; mrb_value str = mrb_format(mrb, "unknown keyword: %v", key1); mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str)); goto L_RAISE; } NEXT; } CASE(OP_BREAK, B) { c = OP_R_BREAK; goto L_RETURN; } CASE(OP_RETURN_BLK, B) { c = OP_R_RETURN; goto L_RETURN; } CASE(OP_RETURN, B) c = OP_R_NORMAL; L_RETURN: { mrb_callinfo *ci; ci = mrb->c->ci; if (ci->mid) { mrb_value blk = regs[mrb_ci_bidx(ci)]; if (mrb_proc_p(blk)) { struct RProc *p = mrb_proc_ptr(blk); if (!MRB_PROC_STRICT_P(p) && ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) { p->flags |= MRB_PROC_ORPHAN; } } } if (mrb->exc) { L_RAISE: ci = mrb->c->ci; if (ci == mrb->c->cibase) { ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL); if (ch == NULL) goto L_FTOP; goto L_CATCH; } while ((ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL)) == NULL) { ci = cipop(mrb); if (ci[1].cci == CINFO_SKIP && prev_jmp) { mrb->jmp = prev_jmp; MRB_THROW(prev_jmp); } pc = ci[0].pc; if (ci == mrb->c->cibase) { ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL); if (ch == NULL) { L_FTOP: /* fiber top */ if (mrb->c == mrb->root_c) { mrb->c->ci->stack = mrb->c->stbase; goto L_STOP; } else { struct mrb_context *c = mrb->c; c->status = MRB_FIBER_TERMINATED; mrb->c = c->prev; c->prev = NULL; goto L_RAISE; } } break; } } L_CATCH: if (ch == NULL) goto L_STOP; if (FALSE) { L_CATCH_TAGGED_BREAK: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */ ci = mrb->c->ci; } proc = ci->proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; mrb_stack_extend(mrb, irep->nregs); pc = irep->iseq + mrb_irep_catch_handler_unpack(ch->target); } else { mrb_int acc; mrb_value v; ci = mrb->c->ci; v = regs[a]; mrb_gc_protect(mrb, v); switch (c) { case OP_R_RETURN: /* Fall through to OP_R_NORMAL otherwise */ if (ci->cci == CINFO_NONE && MRB_PROC_ENV_P(proc) && !MRB_PROC_STRICT_P(proc)) { const struct RProc *dst; mrb_callinfo *cibase; cibase = mrb->c->cibase; dst = top_proc(mrb, proc); if (MRB_PROC_ENV_P(dst)) { struct REnv *e = MRB_PROC_ENV(dst); if (!MRB_ENV_ONSTACK_P(e) || (e->cxt && e->cxt != mrb->c)) { localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } } /* check jump destination */ while (cibase <= ci && ci->proc != dst) { if (ci->cci > CINFO_NONE) { /* jump cross C boundary */ localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } ci--; } if (ci <= cibase) { /* no jump destination */ localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } ci = mrb->c->ci; while (cibase <= ci && ci->proc != dst) { CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_BLOCK) { cibase = mrb->c->cibase; dst = top_proc(mrb, proc); } CHECKPOINT_MAIN(RBREAK_TAG_RETURN_BLOCK) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_BLOCK, proc, v); } CHECKPOINT_END(RBREAK_TAG_RETURN_BLOCK); ci = cipop(mrb); pc = ci->pc; } proc = ci->proc; mrb->exc = NULL; /* clear break object */ break; } /* fallthrough */ case OP_R_NORMAL: NORMAL_RETURN: if (ci == mrb->c->cibase) { struct mrb_context *c; c = mrb->c; if (!c->prev) { /* toplevel return */ regs[irep->nlocals] = v; goto CHECKPOINT_LABEL_MAKE(RBREAK_TAG_STOP); } if (!c->vmexec && c->prev->ci == c->prev->cibase) { mrb_value exc = mrb_exc_new_lit(mrb, E_FIBER_ERROR, "double resume"); mrb_exc_set(mrb, exc); goto L_RAISE; } CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_TOPLEVEL) { c = mrb->c; } CHECKPOINT_MAIN(RBREAK_TAG_RETURN_TOPLEVEL) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_TOPLEVEL, proc, v); } CHECKPOINT_END(RBREAK_TAG_RETURN_TOPLEVEL); /* automatic yield at the end */ c->status = MRB_FIBER_TERMINATED; mrb->c = c->prev; mrb->c->status = MRB_FIBER_RUNNING; c->prev = NULL; if (c->vmexec) { mrb_gc_arena_restore(mrb, ai); c->vmexec = FALSE; mrb->jmp = prev_jmp; return v; } ci = mrb->c->ci; } CHECKPOINT_RESTORE(RBREAK_TAG_RETURN) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_RETURN) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN, proc, v); } CHECKPOINT_END(RBREAK_TAG_RETURN); mrb->exc = NULL; /* clear break object */ break; case OP_R_BREAK: if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN; if (MRB_PROC_ORPHAN_P(proc)) { mrb_value exc; L_BREAK_ERROR: exc = mrb_exc_new_lit(mrb, E_LOCALJUMP_ERROR, "break from proc-closure"); mrb_exc_set(mrb, exc); goto L_RAISE; } if (!MRB_PROC_ENV_P(proc) || !MRB_ENV_ONSTACK_P(MRB_PROC_ENV(proc))) { goto L_BREAK_ERROR; } else { struct REnv *e = MRB_PROC_ENV(proc); if (e->cxt != mrb->c) { goto L_BREAK_ERROR; } } CHECKPOINT_RESTORE(RBREAK_TAG_BREAK) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_BREAK) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK, proc, v); } CHECKPOINT_END(RBREAK_TAG_BREAK); /* break from fiber block */ if (ci == mrb->c->cibase && ci->pc) { struct mrb_context *c = mrb->c; mrb->c = c->prev; c->prev = NULL; ci = mrb->c->ci; } if (ci->cci > CINFO_NONE) { ci = cipop(mrb); mrb->exc = (struct RObject*)break_new(mrb, RBREAK_TAG_BREAK, proc, v); mrb_gc_arena_restore(mrb, ai); mrb->c->vmexec = FALSE; mrb->jmp = prev_jmp; MRB_THROW(prev_jmp); } if (FALSE) { struct RBreak *brk; L_BREAK: brk = (struct RBreak*)mrb->exc; proc = mrb_break_proc_get(brk); v = mrb_break_value_get(brk); ci = mrb->c->ci; switch (mrb_break_tag_get(brk)) { #define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n); RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS) #undef DISPATCH_CHECKPOINTS default: mrb_assert(!"wrong break tag"); } } while (mrb->c->cibase < ci && ci[-1].proc != proc->upper) { if (ci[-1].cci == CINFO_SKIP) { goto L_BREAK_ERROR; } CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_UPPER) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_BREAK_UPPER) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_UPPER, proc, v); } CHECKPOINT_END(RBREAK_TAG_BREAK_UPPER); ci = cipop(mrb); pc = ci->pc; } CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_INTARGET) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_BREAK_INTARGET) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_INTARGET, proc, v); } CHECKPOINT_END(RBREAK_TAG_BREAK_INTARGET); if (ci == mrb->c->cibase) { goto L_BREAK_ERROR; } mrb->exc = NULL; /* clear break object */ break; default: /* cannot happen */ break; } mrb_assert(ci == mrb->c->ci); mrb_assert(mrb->exc == NULL); if (mrb->c->vmexec && !mrb_vm_ci_target_class(ci)) { mrb_gc_arena_restore(mrb, ai); mrb->c->vmexec = FALSE; mrb->jmp = prev_jmp; return v; } acc = ci->cci; ci = cipop(mrb); if (acc == CINFO_SKIP || acc == CINFO_DIRECT) { mrb_gc_arena_restore(mrb, ai); mrb->jmp = prev_jmp; return v; } pc = ci->pc; DEBUG(fprintf(stderr, "from :%s\n", mrb_sym_name(mrb, ci->mid))); proc = ci->proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; ci[1].stack[0] = v; mrb_gc_arena_restore(mrb, ai); } JUMP; } CASE(OP_BLKPUSH, BS) { int m1 = (b>>11)&0x3f; int r = (b>>10)&0x1; int m2 = (b>>5)&0x1f; int kd = (b>>4)&0x1; int lv = (b>>0)&0xf; mrb_value *stack; if (lv == 0) stack = regs + 1; else { struct REnv *e = uvenv(mrb, lv-1); if (!e || (!MRB_ENV_ONSTACK_P(e) && e->mid == 0) || MRB_ENV_LEN(e) <= m1+r+m2+1) { localjump_error(mrb, LOCALJUMP_ERROR_YIELD); goto L_RAISE; } stack = e->stack + 1; } if (mrb_nil_p(stack[m1+r+m2+kd])) { localjump_error(mrb, LOCALJUMP_ERROR_YIELD); goto L_RAISE; } regs[a] = stack[m1+r+m2+kd]; NEXT; } #if !defined(MRB_USE_BIGINT) || defined(MRB_INT32) L_INT_OVERFLOW: { mrb_value exc = mrb_exc_new_lit(mrb, E_RANGE_ERROR, "integer overflow"); mrb_exc_set(mrb, exc); } goto L_RAISE; #endif #define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff)) #define OP_MATH(op_name) \ /* need to check if op is overridden */ \ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \ OP_MATH_CASE_INTEGER(op_name); \ OP_MATH_CASE_FLOAT(op_name, integer, float); \ OP_MATH_CASE_FLOAT(op_name, float, integer); \ OP_MATH_CASE_FLOAT(op_name, float, float); \ OP_MATH_CASE_STRING_##op_name(); \ default: \ mid = MRB_OPSYM(op_name); \ goto L_SEND_SYM; \ } \ NEXT; #define OP_MATH_CASE_INTEGER(op_name) \ case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \ { \ mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \ if (mrb_int_##op_name##_overflow(x, y, &z)) { \ OP_MATH_OVERFLOW_INT(op_name,x,y); \ } \ else \ SET_INT_VALUE(mrb,regs[a], z); \ } \ break #ifdef MRB_NO_FLOAT #define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0 #else #define OP_MATH_CASE_FLOAT(op_name, t1, t2) \ case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \ { \ mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \ SET_FLOAT_VALUE(mrb, regs[a], z); \ } \ break #endif #ifdef MRB_USE_BIGINT #define OP_MATH_OVERFLOW_INT(op,x,y) regs[a] = mrb_bint_##op##_ii(mrb,x,y) #else #define OP_MATH_OVERFLOW_INT(op,x,y) goto L_INT_OVERFLOW #endif #define OP_MATH_CASE_STRING_add() \ case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \ regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \ mrb_gc_arena_restore(mrb, ai); \ break #define OP_MATH_CASE_STRING_sub() (void)0 #define OP_MATH_CASE_STRING_mul() (void)0 #define OP_MATH_OP_add + #define OP_MATH_OP_sub - #define OP_MATH_OP_mul * #define OP_MATH_TT_integer MRB_TT_INTEGER #define OP_MATH_TT_float MRB_TT_FLOAT CASE(OP_ADD, B) { OP_MATH(add); } CASE(OP_SUB, B) { OP_MATH(sub); } CASE(OP_MUL, B) { OP_MATH(mul); } CASE(OP_DIV, B) { #ifndef MRB_NO_FLOAT mrb_float x, y, f; #endif /* need to check if op is overridden */ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER): { mrb_int x = mrb_integer(regs[a]); mrb_int y = mrb_integer(regs[a+1]); mrb_int div = mrb_div_int(mrb, x, y); SET_INT_VALUE(mrb, regs[a], div); } NEXT; #ifndef MRB_NO_FLOAT case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT): x = (mrb_float)mrb_integer(regs[a]); y = mrb_float(regs[a+1]); break; case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER): x = mrb_float(regs[a]); y = (mrb_float)mrb_integer(regs[a+1]); break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT): x = mrb_float(regs[a]); y = mrb_float(regs[a+1]); break; #endif default: mid = MRB_OPSYM(div); goto L_SEND_SYM; } #ifndef MRB_NO_FLOAT f = mrb_div_float(x, y); SET_FLOAT_VALUE(mrb, regs[a], f); #endif NEXT; } #define OP_MATHI(op_name) \ /* need to check if op is overridden */ \ switch (mrb_type(regs[a])) { \ OP_MATHI_CASE_INTEGER(op_name); \ OP_MATHI_CASE_FLOAT(op_name); \ default: \ SET_INT_VALUE(mrb,regs[a+1], b); \ mid = MRB_OPSYM(op_name); \ goto L_SEND_SYM; \ } \ NEXT; #define OP_MATHI_CASE_INTEGER(op_name) \ case MRB_TT_INTEGER: \ { \ mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \ if (mrb_int_##op_name##_overflow(x, y, &z)) { \ OP_MATH_OVERFLOW_INT(op_name,x,y); \ } \ else \ SET_INT_VALUE(mrb,regs[a], z); \ } \ break #ifdef MRB_NO_FLOAT #define OP_MATHI_CASE_FLOAT(op_name) (void)0 #else #define OP_MATHI_CASE_FLOAT(op_name) \ case MRB_TT_FLOAT: \ { \ mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \ SET_FLOAT_VALUE(mrb, regs[a], z); \ } \ break #endif CASE(OP_ADDI, BB) { OP_MATHI(add); } CASE(OP_SUBI, BB) { OP_MATHI(sub); } #define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1])) #ifdef MRB_NO_FLOAT #define OP_CMP(op,sym) do {\ int result;\ /* need to check if - is overridden */\ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\ case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\ break;\ default:\ mid = MRB_OPSYM(sym);\ goto L_SEND_SYM;\ }\ if (result) {\ SET_TRUE_VALUE(regs[a]);\ }\ else {\ SET_FALSE_VALUE(regs[a]);\ }\ } while(0) #else #define OP_CMP(op, sym) do {\ int result;\ /* need to check if - is overridden */\ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\ case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\ break;\ case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_float);\ break;\ case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\ result = OP_CMP_BODY(op,mrb_float,mrb_fixnum);\ break;\ case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\ result = OP_CMP_BODY(op,mrb_float,mrb_float);\ break;\ default:\ mid = MRB_OPSYM(sym);\ goto L_SEND_SYM;\ }\ if (result) {\ SET_TRUE_VALUE(regs[a]);\ }\ else {\ SET_FALSE_VALUE(regs[a]);\ }\ } while(0) #endif CASE(OP_EQ, B) { if (mrb_obj_eq(mrb, regs[a], regs[a+1])) { SET_TRUE_VALUE(regs[a]); } else { OP_CMP(==,eq); } NEXT; } CASE(OP_LT, B) { OP_CMP(<,lt); NEXT; } CASE(OP_LE, B) { OP_CMP(<=,le); NEXT; } CASE(OP_GT, B) { OP_CMP(>,gt); NEXT; } CASE(OP_GE, B) { OP_CMP(>=,ge); NEXT; } CASE(OP_ARRAY, BB) { regs[a] = mrb_ary_new_from_values(mrb, b, &regs[a]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ARRAY2, BBB) { regs[a] = mrb_ary_new_from_values(mrb, c, &regs[b]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ARYCAT, B) { mrb_value splat = mrb_ary_splat(mrb, regs[a+1]); if (mrb_nil_p(regs[a])) { regs[a] = splat; } else { mrb_assert(mrb_array_p(regs[a])); mrb_ary_concat(mrb, regs[a], splat); } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ARYPUSH, BB) { mrb_assert(mrb_array_p(regs[a])); for (mrb_int i=0; i<b; i++) { mrb_ary_push(mrb, regs[a], regs[a+i+1]); } NEXT; } CASE(OP_ARYDUP, B) { mrb_value ary = regs[a]; if (mrb_array_p(ary)) { ary = mrb_ary_new_from_values(mrb, RARRAY_LEN(ary), RARRAY_PTR(ary)); } else { ary = mrb_ary_new_from_values(mrb, 1, &ary); } regs[a] = ary; NEXT; } CASE(OP_AREF, BBB) { mrb_value v = regs[b]; if (!mrb_array_p(v)) { if (c == 0) { regs[a] = v; } else { SET_NIL_VALUE(regs[a]); } } else { v = mrb_ary_ref(mrb, v, c); regs[a] = v; } NEXT; } CASE(OP_ASET, BBB) { mrb_assert(mrb_array_p(regs[a])); mrb_ary_set(mrb, regs[b], c, regs[a]); NEXT; } CASE(OP_APOST, BBB) { mrb_value v = regs[a]; int pre = b; int post = c; struct RArray *ary; int len, idx; if (!mrb_array_p(v)) { v = mrb_ary_new_from_values(mrb, 1, &regs[a]); } ary = mrb_ary_ptr(v); len = (int)ARY_LEN(ary); if (len > pre + post) { v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre); regs[a++] = v; while (post--) { regs[a++] = ARY_PTR(ary)[len-post-1]; } } else { v = mrb_ary_new_capa(mrb, 0); regs[a++] = v; for (idx=0; idx+pre<len; idx++) { regs[a+idx] = ARY_PTR(ary)[pre+idx]; } while (idx < post) { SET_NIL_VALUE(regs[a+idx]); idx++; } } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_INTERN, B) { mrb_assert(mrb_string_p(regs[a])); mrb_sym sym = mrb_intern_str(mrb, regs[a]); regs[a] = mrb_symbol_value(sym); NEXT; } CASE(OP_SYMBOL, BB) { size_t len; mrb_sym sym; mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0); len = pool[b].tt >> 2; if (pool[b].tt & IREP_TT_SFLAG) { sym = mrb_intern_static(mrb, pool[b].u.str, len); } else { sym = mrb_intern(mrb, pool[b].u.str, len); } regs[a] = mrb_symbol_value(sym); NEXT; } CASE(OP_STRING, BB) { mrb_int len; mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0); len = pool[b].tt >> 2; if (pool[b].tt & IREP_TT_SFLAG) { regs[a] = mrb_str_new_static(mrb, pool[b].u.str, len); } else { regs[a] = mrb_str_new(mrb, pool[b].u.str, len); } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_STRCAT, B) { mrb_assert(mrb_string_p(regs[a])); mrb_str_concat(mrb, regs[a], regs[a+1]); NEXT; } CASE(OP_HASH, BB) { mrb_value hash = mrb_hash_new_capa(mrb, b); int i; int lim = a+b*2; for (i=a; i<lim; i+=2) { mrb_hash_set(mrb, hash, regs[i], regs[i+1]); } regs[a] = hash; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_HASHADD, BB) { mrb_value hash; int i; int lim = a+b*2+1; hash = regs[a]; mrb_ensure_hash_type(mrb, hash); for (i=a+1; i<lim; i+=2) { mrb_hash_set(mrb, hash, regs[i], regs[i+1]); } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_HASHCAT, B) { mrb_value hash = regs[a]; mrb_assert(mrb_hash_p(hash)); mrb_hash_merge(mrb, hash, regs[a+1]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_LAMBDA, BB) c = OP_L_LAMBDA; L_MAKE_LAMBDA: { struct RProc *p; const mrb_irep *nirep = irep->reps[b]; if (c & OP_L_CAPTURE) { p = mrb_closure_new(mrb, nirep); } else { p = mrb_proc_new(mrb, nirep); p->flags |= MRB_PROC_SCOPE; } if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT; regs[a] = mrb_obj_value(p); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_BLOCK, BB) { c = OP_L_BLOCK; goto L_MAKE_LAMBDA; } CASE(OP_METHOD, BB) { c = OP_L_METHOD; goto L_MAKE_LAMBDA; } CASE(OP_RANGE_INC, B) { mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], FALSE); regs[a] = v; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_RANGE_EXC, B) { mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], TRUE); regs[a] = v; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_OCLASS, B) { regs[a] = mrb_obj_value(mrb->object_class); NEXT; } CASE(OP_CLASS, BB) { struct RClass *c = 0, *baseclass; mrb_value base, super; mrb_sym id = syms[b]; base = regs[a]; super = regs[a+1]; if (mrb_nil_p(base)) { baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc); if (!baseclass) baseclass = mrb->object_class; base = mrb_obj_value(baseclass); } c = mrb_vm_define_class(mrb, base, super, id); regs[a] = mrb_obj_value(c); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_MODULE, BB) { struct RClass *cls = 0, *baseclass; mrb_value base; mrb_sym id = syms[b]; base = regs[a]; if (mrb_nil_p(base)) { baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc); if (!baseclass) baseclass = mrb->object_class; base = mrb_obj_value(baseclass); } cls = mrb_vm_define_module(mrb, base, id); regs[a] = mrb_obj_value(cls); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_EXEC, BB) { mrb_value recv = regs[a]; struct RProc *p; const mrb_irep *nirep = irep->reps[b]; /* prepare closure */ p = mrb_proc_new(mrb, nirep); p->c = NULL; mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc); MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv)); p->flags |= MRB_PROC_SCOPE; /* prepare call stack */ cipush(mrb, a, 0, mrb_class_ptr(recv), p, 0, 0); irep = p->body.irep; pool = irep->pool; syms = irep->syms; mrb_stack_extend(mrb, irep->nregs); stack_clear(regs+1, irep->nregs-1); pc = irep->iseq; JUMP; } CASE(OP_DEF, BB) { struct RClass *target = mrb_class_ptr(regs[a]); struct RProc *p = mrb_proc_ptr(regs[a+1]); mrb_method_t m; mrb_sym mid = syms[b]; MRB_METHOD_FROM_PROC(m, p); mrb_define_method_raw(mrb, target, mid, m); mrb_method_added(mrb, target, mid); mrb_gc_arena_restore(mrb, ai); regs[a] = mrb_symbol_value(mid); NEXT; } CASE(OP_SCLASS, B) { regs[a] = mrb_singleton_class(mrb, regs[a]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_TCLASS, B) { struct RClass *target = check_target_class(mrb); if (!target) goto L_RAISE; regs[a] = mrb_obj_value(target); NEXT; } CASE(OP_ALIAS, BB) { struct RClass *target = check_target_class(mrb); if (!target) goto L_RAISE; mrb_alias_method(mrb, target, syms[a], syms[b]); mrb_method_added(mrb, target, syms[a]); NEXT; } CASE(OP_UNDEF, B) { struct RClass *target = check_target_class(mrb); if (!target) goto L_RAISE; mrb_undef_method_id(mrb, target, syms[a]); NEXT; } CASE(OP_DEBUG, Z) { FETCH_BBB(); #ifdef MRB_USE_DEBUG_HOOK mrb->debug_op_hook(mrb, irep, pc, regs); #else #ifndef MRB_NO_STDIO printf("OP_DEBUG %d %d %d\n", a, b, c); #else abort(); #endif #endif NEXT; } CASE(OP_ERR, B) { size_t len = pool[a].tt >> 2; mrb_value exc; mrb_assert((pool[a].tt&IREP_TT_NFLAG)==0); exc = mrb_exc_new(mrb, E_LOCALJUMP_ERROR, pool[a].u.str, len); mrb_exc_set(mrb, exc); goto L_RAISE; } CASE(OP_EXT1, Z) { insn = READ_B(); switch (insn) { #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY; #include "mruby/ops.h" #undef OPCODE } pc--; NEXT; } CASE(OP_EXT2, Z) { insn = READ_B(); switch (insn) { #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY; #include "mruby/ops.h" #undef OPCODE } pc--; NEXT; } CASE(OP_EXT3, Z) { uint8_t insn = READ_B(); switch (insn) { #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY; #include "mruby/ops.h" #undef OPCODE } pc--; NEXT; } CASE(OP_STOP, Z) { /* stop VM */ CHECKPOINT_RESTORE(RBREAK_TAG_STOP) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_STOP) { UNWIND_ENSURE(mrb, mrb->c->ci, pc, RBREAK_TAG_STOP, proc, mrb_nil_value()); } CHECKPOINT_END(RBREAK_TAG_STOP); L_STOP: mrb->jmp = prev_jmp; if (mrb->exc) { mrb_assert(mrb->exc->tt == MRB_TT_EXCEPTION); return mrb_obj_value(mrb->exc); } return regs[irep->nlocals]; } } END_DISPATCH; #undef regs } MRB_CATCH(&c_jmp) { mrb_callinfo *ci = mrb->c->ci; while (ci > mrb->c->cibase && ci->cci == CINFO_DIRECT) { ci = cipop(mrb); } exc_catched = TRUE; pc = ci->pc; goto RETRY_TRY_BLOCK; } MRB_END_EXC(&c_jmp); }
104703994590688087058145131124808583742
vm.c
126976644189338180144758453935885521875
CWE-703
CVE-2022-1427
Out-of-bounds Read in mrb_obj_is_kind_of in in GitHub repository mruby/mruby prior to 3.2. # Impact: Possible arbitrary code execution if being exploited.
https://nvd.nist.gov/vuln/detail/CVE-2022-1427
195,742
gpac
37592ad86c6ca934d34740012213e467acc4a3b0
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/37592ad86c6ca934d34740012213e467acc4a3b0
fixed #2163
1
static GF_Err gf_isom_parse_movie_boxes_internal(GF_ISOFile *mov, u32 *boxType, u64 *bytesMissing, Bool progressive_mode) { GF_Box *a; u64 totSize, mdat_end=0; GF_Err e = GF_OK; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (mov->single_moof_mode && mov->single_moof_state == 2) { return e; } /*restart from where we stopped last*/ totSize = mov->current_top_box_start; if (mov->bytes_removed) { assert(totSize >= mov->bytes_removed); totSize -= mov->bytes_removed; } gf_bs_seek(mov->movieFileMap->bs, totSize); #endif /*while we have some data, parse our boxes*/ while (gf_bs_available(mov->movieFileMap->bs)) { *bytesMissing = 0; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS mov->current_top_box_start = gf_bs_get_position(mov->movieFileMap->bs) + mov->bytes_removed; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[iso file] Parsing a top-level box at position %d\n", mov->current_top_box_start)); #endif e = gf_isom_parse_root_box(&a, mov->movieFileMap->bs, boxType, bytesMissing, progressive_mode); if (e >= 0) { } else if (e == GF_ISOM_INCOMPLETE_FILE) { /*our mdat is uncomplete, only valid for READ ONLY files...*/ if (mov->openMode != GF_ISOM_OPEN_READ) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Incomplete MDAT while file is not read-only\n")); return GF_ISOM_INVALID_FILE; } if ((mov->openMode == GF_ISOM_OPEN_READ) && !progressive_mode) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Incomplete file while reading for dump - aborting parsing\n")); break; } return e; } else { return e; } switch (a->type) { /*MOOV box*/ case GF_ISOM_BOX_TYPE_MOOV: if (mov->moov) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate MOOV detected!\n")); gf_isom_box_del(a); return GF_ISOM_INVALID_FILE; } mov->moov = (GF_MovieBox *)a; mov->original_moov_offset = mov->current_top_box_start; /*set our pointer to the movie*/ mov->moov->mov = mov; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (mov->moov->mvex) mov->moov->mvex->mov = mov; #ifdef GF_ENABLE_CTRN if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { gf_isom_setup_traf_inheritance(mov); } #endif #endif e = gf_list_add(mov->TopBoxes, a); if (e) return e; totSize += a->size; if (!mov->moov->mvhd) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MovieHeaderBox\n")); return GF_ISOM_INVALID_FILE; } if (mov->meta) { gf_isom_meta_restore_items_ref(mov, mov->meta); } //dump senc info in dump mode if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) { u32 k; for (k=0; k<gf_list_count(mov->moov->trackList); k++) { GF_TrackBox *trak = (GF_TrackBox *)gf_list_get(mov->moov->trackList, k); if (trak->sample_encryption) { e = senc_Parse(mov->movieFileMap->bs, trak, NULL, trak->sample_encryption); if (e) return e; } } } else { u32 k; for (k=0; k<gf_list_count(mov->moov->trackList); k++) { GF_TrackBox *trak = (GF_TrackBox *)gf_list_get(mov->moov->trackList, k); if (trak->Media->information->sampleTable->sampleGroups) { convert_compact_sample_groups(trak->Media->information->sampleTable->child_boxes, trak->Media->information->sampleTable->sampleGroups); } } } if (mdat_end && mov->signal_frag_bounds && !(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) ) { gf_isom_push_mdat_end(mov, mdat_end); mdat_end=0; } break; /*META box*/ case GF_ISOM_BOX_TYPE_META: if (mov->meta) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate META detected!\n")); gf_isom_box_del(a); return GF_ISOM_INVALID_FILE; } mov->meta = (GF_MetaBox *)a; mov->original_meta_offset = mov->current_top_box_start; e = gf_list_add(mov->TopBoxes, a); if (e) { return e; } totSize += a->size; gf_isom_meta_restore_items_ref(mov, mov->meta); break; /*we only keep the MDAT in READ for dump purposes*/ case GF_ISOM_BOX_TYPE_MDAT: if (!mov->first_data_toplevel_offset) { mov->first_data_toplevel_offset = mov->current_top_box_start; mov->first_data_toplevel_size = a->size; } totSize += a->size; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (mov->emsgs) { gf_isom_box_array_del(mov->emsgs); mov->emsgs = NULL; } #endif if (mov->openMode == GF_ISOM_OPEN_READ) { if (!mov->mdat) { mov->mdat = (GF_MediaDataBox *) a; e = gf_list_add(mov->TopBoxes, mov->mdat); if (e) { return e; } } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS else if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) gf_list_add(mov->TopBoxes, a); #endif else gf_isom_box_del(a); //in other modes we don't care if (mov->signal_frag_bounds && !(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) ) { mdat_end = gf_bs_get_position(mov->movieFileMap->bs); if (mov->moov) { gf_isom_push_mdat_end(mov, mdat_end); mdat_end=0; } } } /*if we don't have any MDAT yet, create one (edit-write mode) We only work with one mdat, but we're puting it at the place of the first mdat found when opening a file for editing*/ else if (!mov->mdat && (mov->openMode != GF_ISOM_OPEN_READ) && (mov->openMode != GF_ISOM_OPEN_KEEP_FRAGMENTS)) { gf_isom_box_del(a); mov->mdat = (GF_MediaDataBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_MDAT); if (!mov->mdat) return GF_OUT_OF_MEM; e = gf_list_add(mov->TopBoxes, mov->mdat); if (e) { return e; } } else { gf_isom_box_del(a); } break; case GF_ISOM_BOX_TYPE_FTYP: /*ONE AND ONLY ONE FTYP*/ if (mov->brand) { gf_isom_box_del(a); GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate 'ftyp' detected!\n")); return GF_ISOM_INVALID_FILE; } mov->brand = (GF_FileTypeBox *)a; totSize += a->size; e = gf_list_add(mov->TopBoxes, a); if (e) return e; break; case GF_ISOM_BOX_TYPE_OTYP: /*ONE AND ONLY ONE FTYP*/ if (mov->otyp) { gf_isom_box_del(a); GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate 'otyp' detected!\n")); return GF_ISOM_INVALID_FILE; } if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) { mov->otyp = (GF_Box *)a; totSize += a->size; e = gf_list_add(mov->TopBoxes, a); if (e) return e; } else { GF_FileTypeBox *brand = (GF_FileTypeBox *) gf_isom_box_find_child(a->child_boxes, GF_ISOM_BOX_TYPE_FTYP); if (brand) { s32 pos; gf_list_del_item(a->child_boxes, brand); pos = gf_list_del_item(mov->TopBoxes, mov->brand); gf_isom_box_del((GF_Box *) mov->brand); mov->brand = brand; if (pos<0) pos=0; gf_list_insert(mov->TopBoxes, brand, pos); } gf_isom_box_del(a); } break; case GF_ISOM_BOX_TYPE_PDIN: /*ONE AND ONLY ONE PDIN*/ if (mov->pdin) { gf_isom_box_del(a); GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate 'pdin'' detected!\n")); return GF_ISOM_INVALID_FILE; } mov->pdin = (GF_ProgressiveDownloadBox *) a; totSize += a->size; e = gf_list_add(mov->TopBoxes, a); if (e) return e; break; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS case GF_ISOM_BOX_TYPE_STYP: { u32 brand = ((GF_FileTypeBox *)a)->majorBrand; switch (brand) { case GF_ISOM_BRAND_SISX: case GF_ISOM_BRAND_RISX: case GF_ISOM_BRAND_SSSS: mov->is_index_segment = GF_TRUE; break; default: break; } } /*fall-through*/ case GF_ISOM_BOX_TYPE_SIDX: case GF_ISOM_BOX_TYPE_SSIX: if (mov->moov && !mov->first_data_toplevel_offset) { mov->first_data_toplevel_offset = mov->current_top_box_start; mov->first_data_toplevel_size = a->size; } totSize += a->size; if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) { e = gf_list_add(mov->TopBoxes, a); if (e) return e; } else if (mov->signal_frag_bounds && !(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) && (mov->openMode!=GF_ISOM_OPEN_KEEP_FRAGMENTS) ) { if (a->type==GF_ISOM_BOX_TYPE_SIDX) { if (mov->root_sidx) gf_isom_box_del( (GF_Box *) mov->root_sidx); mov->root_sidx = (GF_SegmentIndexBox *) a; mov->sidx_start_offset = mov->current_top_box_start; mov->sidx_end_offset = gf_bs_get_position(mov->movieFileMap->bs); } else if (a->type==GF_ISOM_BOX_TYPE_STYP) { mov->styp_start_offset = mov->current_top_box_start; if (mov->seg_styp) gf_isom_box_del(mov->seg_styp); mov->seg_styp = a; } else if (a->type==GF_ISOM_BOX_TYPE_SSIX) { if (mov->seg_ssix) gf_isom_box_del(mov->seg_ssix); mov->seg_ssix = a; } else { gf_isom_box_del(a); } gf_isom_push_mdat_end(mov, mov->current_top_box_start); } else if (!mov->NextMoofNumber && (a->type==GF_ISOM_BOX_TYPE_SIDX)) { if (mov->main_sidx) gf_isom_box_del( (GF_Box *) mov->main_sidx); mov->main_sidx = (GF_SegmentIndexBox *) a; mov->main_sidx_end_pos = mov->current_top_box_start + a->size; } else { gf_isom_box_del(a); } break; case GF_ISOM_BOX_TYPE_MOOF: //no support for inplace rewrite for fragmented files gf_isom_disable_inplace_rewrite(mov); if (!mov->moov) { GF_LOG(mov->moof ? GF_LOG_DEBUG : GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Movie fragment but no moov (yet) - possibly broken parsing!\n")); } if (mov->single_moof_mode) { mov->single_moof_state++; if (mov->single_moof_state > 1) { gf_isom_box_del(a); return GF_OK; } } ((GF_MovieFragmentBox *)a)->mov = mov; totSize += a->size; mov->moof = (GF_MovieFragmentBox *) a; /*some smooth streaming streams contain a SDTP under the TRAF: this is incorrect, convert it*/ FixTrackID(mov); if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { FixSDTPInTRAF(mov->moof); } else { u32 k; for (k=0; k<gf_list_count(mov->moof->TrackList); k++) { GF_TrackFragmentBox *traf = (GF_TrackFragmentBox *)gf_list_get(mov->moof->TrackList, k); if (traf->sampleGroups) { convert_compact_sample_groups(traf->child_boxes, traf->sampleGroups); } } } /*read & debug: store at root level*/ if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) { u32 k; gf_list_add(mov->TopBoxes, a); /*also update pointers to trex for debug*/ if (mov->moov) { for (k=0; k<gf_list_count(mov->moof->TrackList); k++) { GF_TrackFragmentBox *traf = gf_list_get(mov->moof->TrackList, k); if (traf->tfhd && mov->moov->mvex && mov->moov->mvex->TrackExList) { GF_TrackBox *trak = gf_isom_get_track_from_id(mov->moov, traf->tfhd->trackID); u32 j=0; while ((traf->trex = (GF_TrackExtendsBox*)gf_list_enum(mov->moov->mvex->TrackExList, &j))) { if (traf->trex->trackID == traf->tfhd->trackID) { if (!traf->trex->track) traf->trex->track = trak; break; } traf->trex = NULL; } } //we should only parse senc/psec when no saiz/saio is present, otherwise we fetch the info directly if (traf->trex && traf->tfhd && traf->trex->track && traf->sample_encryption) { GF_TrackBox *trak = GetTrackbyID(mov->moov, traf->tfhd->trackID); if (trak) { trak->current_traf_stsd_idx = traf->tfhd->sample_desc_index ? traf->tfhd->sample_desc_index : traf->trex->def_sample_desc_index; e = senc_Parse(mov->movieFileMap->bs, trak, traf, traf->sample_encryption); if (e) return e; trak->current_traf_stsd_idx = 0; } } } } else { for (k=0; k<gf_list_count(mov->moof->TrackList); k++) { GF_TrackFragmentBox *traf = gf_list_get(mov->moof->TrackList, k); if (traf->sample_encryption) { e = senc_Parse(mov->movieFileMap->bs, NULL, traf, traf->sample_encryption); if (e) return e; } } } } else if (mov->openMode==GF_ISOM_OPEN_KEEP_FRAGMENTS) { mov->NextMoofNumber = mov->moof->mfhd->sequence_number+1; mov->moof = NULL; gf_isom_box_del(a); } else { /*merge all info*/ e = MergeFragment((GF_MovieFragmentBox *)a, mov); gf_isom_box_del(a); if (e) return e; } //done with moov if (mov->root_sidx) { gf_isom_box_del((GF_Box *) mov->root_sidx); mov->root_sidx = NULL; } if (mov->root_ssix) { gf_isom_box_del(mov->seg_ssix); mov->root_ssix = NULL; } if (mov->seg_styp) { gf_isom_box_del(mov->seg_styp); mov->seg_styp = NULL; } mov->sidx_start_offset = 0; mov->sidx_end_offset = 0; mov->styp_start_offset = 0; break; #endif case GF_ISOM_BOX_TYPE_UNKNOWN: { GF_UnknownBox *box = (GF_UnknownBox*)a; if (box->original_4cc == GF_ISOM_BOX_TYPE_JP) { u8 *c = (u8 *) box->data; if ((box->dataSize==4) && (GF_4CC(c[0],c[1],c[2],c[3])==(u32)0x0D0A870A)) mov->is_jp2 = 1; gf_isom_box_del(a); } else { e = gf_list_add(mov->TopBoxes, a); if (e) return e; } } break; case GF_ISOM_BOX_TYPE_PRFT: #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (!(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { //keep the last one read if (mov->last_producer_ref_time) gf_isom_box_del(a); else mov->last_producer_ref_time = (GF_ProducerReferenceTimeBox *)a; break; } #endif //fallthrough case GF_ISOM_BOX_TYPE_EMSG: #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { if (!mov->emsgs) mov->emsgs = gf_list_new(); gf_list_add(mov->emsgs, a); break; } #endif case GF_ISOM_BOX_TYPE_MFRA: case GF_ISOM_BOX_TYPE_MFRO: //only keep for dump mode, otherwise we ignore these boxes and we don't want to carry them over in non-fragmented file if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { totSize += a->size; gf_isom_box_del(a); break; } default: totSize += a->size; e = gf_list_add(mov->TopBoxes, a); if (e) return e; break; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS /*remember where we left, in case we append an entire number of movie fragments*/ mov->current_top_box_start = gf_bs_get_position(mov->movieFileMap->bs) + mov->bytes_removed; #endif } /*we need at least moov or meta*/ if (!mov->moov && !mov->meta #ifndef GPAC_DISABLE_ISOM_FRAGMENTS && !mov->moof && !mov->is_index_segment #endif ) { return GF_ISOM_INCOMPLETE_FILE; } /*we MUST have movie header*/ if (!gf_opts_get_bool("core", "no-check")) { if (mov->moov && !mov->moov->mvhd) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MVHD in MOOV!\n")); return GF_ISOM_INVALID_FILE; } /*we MUST have meta handler*/ if (mov->meta && !mov->meta->handler) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing handler in META!\n")); return GF_ISOM_INVALID_FILE; } } #ifndef GPAC_DISABLE_ISOM_WRITE if (mov->moov) { /*set the default interleaving time*/ mov->interleavingTime = mov->moov->mvhd->timeScale; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS /*in edit mode with successfully loaded fragments, delete all fragment signaling since file is no longer fragmented*/ if ((mov->openMode > GF_ISOM_OPEN_READ) && (mov->openMode != GF_ISOM_OPEN_KEEP_FRAGMENTS) && mov->moov->mvex) { gf_isom_box_del_parent(&mov->moov->child_boxes, (GF_Box *)mov->moov->mvex); mov->moov->mvex = NULL; } #endif } //create a default mdat if none was found if (!mov->mdat && (mov->openMode != GF_ISOM_OPEN_READ) && (mov->openMode != GF_ISOM_OPEN_KEEP_FRAGMENTS)) { mov->mdat = (GF_MediaDataBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_MDAT); if (!mov->mdat) return GF_OUT_OF_MEM; e = gf_list_add(mov->TopBoxes, mov->mdat); if (e) return e; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ return GF_OK; }
68912157747726016692935177449045975431
None
CWE-476
CVE-2022-29340
GPAC 2.1-DEV-rev87-g053aae8-master. has a Null Pointer Dereference vulnerability in gf_isom_parse_movie_boxes_internal due to improper return value handling of GF_SKIP_BOX, which causes a Denial of Service. This vulnerability was fixed in commit 37592ad.
https://nvd.nist.gov/vuln/detail/CVE-2022-29340
232,329
gpac
37592ad86c6ca934d34740012213e467acc4a3b0
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/37592ad86c6ca934d34740012213e467acc4a3b0
fixed #2163
0
static GF_Err gf_isom_parse_movie_boxes_internal(GF_ISOFile *mov, u32 *boxType, u64 *bytesMissing, Bool progressive_mode) { GF_Box *a; u64 totSize, mdat_end=0; GF_Err e = GF_OK; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (mov->single_moof_mode && mov->single_moof_state == 2) { return e; } /*restart from where we stopped last*/ totSize = mov->current_top_box_start; if (mov->bytes_removed) { assert(totSize >= mov->bytes_removed); totSize -= mov->bytes_removed; } gf_bs_seek(mov->movieFileMap->bs, totSize); #endif /*while we have some data, parse our boxes*/ while (gf_bs_available(mov->movieFileMap->bs)) { *bytesMissing = 0; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS mov->current_top_box_start = gf_bs_get_position(mov->movieFileMap->bs) + mov->bytes_removed; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[iso file] Parsing a top-level box at position %d\n", mov->current_top_box_start)); #endif e = gf_isom_parse_root_box(&a, mov->movieFileMap->bs, boxType, bytesMissing, progressive_mode); if (e >= 0) { //safety check, should never happen if (!a) return GF_ISOM_INVALID_FILE; } else if (e == GF_ISOM_INCOMPLETE_FILE) { /*our mdat is uncomplete, only valid for READ ONLY files...*/ if (mov->openMode != GF_ISOM_OPEN_READ) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Incomplete MDAT while file is not read-only\n")); return GF_ISOM_INVALID_FILE; } if ((mov->openMode == GF_ISOM_OPEN_READ) && !progressive_mode) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Incomplete file while reading for dump - aborting parsing\n")); break; } return e; } else { return e; } switch (a->type) { /*MOOV box*/ case GF_ISOM_BOX_TYPE_MOOV: if (mov->moov) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate MOOV detected!\n")); gf_isom_box_del(a); return GF_ISOM_INVALID_FILE; } mov->moov = (GF_MovieBox *)a; mov->original_moov_offset = mov->current_top_box_start; /*set our pointer to the movie*/ mov->moov->mov = mov; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (mov->moov->mvex) mov->moov->mvex->mov = mov; #ifdef GF_ENABLE_CTRN if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { gf_isom_setup_traf_inheritance(mov); } #endif #endif e = gf_list_add(mov->TopBoxes, a); if (e) return e; totSize += a->size; if (!mov->moov->mvhd) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MovieHeaderBox\n")); return GF_ISOM_INVALID_FILE; } if (mov->meta) { gf_isom_meta_restore_items_ref(mov, mov->meta); } //dump senc info in dump mode if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) { u32 k; for (k=0; k<gf_list_count(mov->moov->trackList); k++) { GF_TrackBox *trak = (GF_TrackBox *)gf_list_get(mov->moov->trackList, k); if (trak->sample_encryption) { e = senc_Parse(mov->movieFileMap->bs, trak, NULL, trak->sample_encryption); if (e) return e; } } } else { u32 k; for (k=0; k<gf_list_count(mov->moov->trackList); k++) { GF_TrackBox *trak = (GF_TrackBox *)gf_list_get(mov->moov->trackList, k); if (trak->Media->information->sampleTable->sampleGroups) { convert_compact_sample_groups(trak->Media->information->sampleTable->child_boxes, trak->Media->information->sampleTable->sampleGroups); } } } if (mdat_end && mov->signal_frag_bounds && !(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) ) { gf_isom_push_mdat_end(mov, mdat_end); mdat_end=0; } break; /*META box*/ case GF_ISOM_BOX_TYPE_META: if (mov->meta) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate META detected!\n")); gf_isom_box_del(a); return GF_ISOM_INVALID_FILE; } mov->meta = (GF_MetaBox *)a; mov->original_meta_offset = mov->current_top_box_start; e = gf_list_add(mov->TopBoxes, a); if (e) { return e; } totSize += a->size; gf_isom_meta_restore_items_ref(mov, mov->meta); break; /*we only keep the MDAT in READ for dump purposes*/ case GF_ISOM_BOX_TYPE_MDAT: if (!mov->first_data_toplevel_offset) { mov->first_data_toplevel_offset = mov->current_top_box_start; mov->first_data_toplevel_size = a->size; } totSize += a->size; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (mov->emsgs) { gf_isom_box_array_del(mov->emsgs); mov->emsgs = NULL; } #endif if (mov->openMode == GF_ISOM_OPEN_READ) { if (!mov->mdat) { mov->mdat = (GF_MediaDataBox *) a; e = gf_list_add(mov->TopBoxes, mov->mdat); if (e) { return e; } } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS else if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) gf_list_add(mov->TopBoxes, a); #endif else gf_isom_box_del(a); //in other modes we don't care if (mov->signal_frag_bounds && !(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) ) { mdat_end = gf_bs_get_position(mov->movieFileMap->bs); if (mov->moov) { gf_isom_push_mdat_end(mov, mdat_end); mdat_end=0; } } } /*if we don't have any MDAT yet, create one (edit-write mode) We only work with one mdat, but we're puting it at the place of the first mdat found when opening a file for editing*/ else if (!mov->mdat && (mov->openMode != GF_ISOM_OPEN_READ) && (mov->openMode != GF_ISOM_OPEN_KEEP_FRAGMENTS)) { gf_isom_box_del(a); mov->mdat = (GF_MediaDataBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_MDAT); if (!mov->mdat) return GF_OUT_OF_MEM; e = gf_list_add(mov->TopBoxes, mov->mdat); if (e) { return e; } } else { gf_isom_box_del(a); } break; case GF_ISOM_BOX_TYPE_FTYP: /*ONE AND ONLY ONE FTYP*/ if (mov->brand) { gf_isom_box_del(a); GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate 'ftyp' detected!\n")); return GF_ISOM_INVALID_FILE; } mov->brand = (GF_FileTypeBox *)a; totSize += a->size; e = gf_list_add(mov->TopBoxes, a); if (e) return e; break; case GF_ISOM_BOX_TYPE_OTYP: /*ONE AND ONLY ONE FTYP*/ if (mov->otyp) { gf_isom_box_del(a); GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate 'otyp' detected!\n")); return GF_ISOM_INVALID_FILE; } if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) { mov->otyp = (GF_Box *)a; totSize += a->size; e = gf_list_add(mov->TopBoxes, a); if (e) return e; } else { GF_FileTypeBox *brand = (GF_FileTypeBox *) gf_isom_box_find_child(a->child_boxes, GF_ISOM_BOX_TYPE_FTYP); if (brand) { s32 pos; gf_list_del_item(a->child_boxes, brand); pos = gf_list_del_item(mov->TopBoxes, mov->brand); gf_isom_box_del((GF_Box *) mov->brand); mov->brand = brand; if (pos<0) pos=0; gf_list_insert(mov->TopBoxes, brand, pos); } gf_isom_box_del(a); } break; case GF_ISOM_BOX_TYPE_PDIN: /*ONE AND ONLY ONE PDIN*/ if (mov->pdin) { gf_isom_box_del(a); GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate 'pdin'' detected!\n")); return GF_ISOM_INVALID_FILE; } mov->pdin = (GF_ProgressiveDownloadBox *) a; totSize += a->size; e = gf_list_add(mov->TopBoxes, a); if (e) return e; break; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS case GF_ISOM_BOX_TYPE_STYP: { u32 brand = ((GF_FileTypeBox *)a)->majorBrand; switch (brand) { case GF_ISOM_BRAND_SISX: case GF_ISOM_BRAND_RISX: case GF_ISOM_BRAND_SSSS: mov->is_index_segment = GF_TRUE; break; default: break; } } /*fall-through*/ case GF_ISOM_BOX_TYPE_SIDX: case GF_ISOM_BOX_TYPE_SSIX: if (mov->moov && !mov->first_data_toplevel_offset) { mov->first_data_toplevel_offset = mov->current_top_box_start; mov->first_data_toplevel_size = a->size; } totSize += a->size; if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) { e = gf_list_add(mov->TopBoxes, a); if (e) return e; } else if (mov->signal_frag_bounds && !(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) && (mov->openMode!=GF_ISOM_OPEN_KEEP_FRAGMENTS) ) { if (a->type==GF_ISOM_BOX_TYPE_SIDX) { if (mov->root_sidx) gf_isom_box_del( (GF_Box *) mov->root_sidx); mov->root_sidx = (GF_SegmentIndexBox *) a; mov->sidx_start_offset = mov->current_top_box_start; mov->sidx_end_offset = gf_bs_get_position(mov->movieFileMap->bs); } else if (a->type==GF_ISOM_BOX_TYPE_STYP) { mov->styp_start_offset = mov->current_top_box_start; if (mov->seg_styp) gf_isom_box_del(mov->seg_styp); mov->seg_styp = a; } else if (a->type==GF_ISOM_BOX_TYPE_SSIX) { if (mov->seg_ssix) gf_isom_box_del(mov->seg_ssix); mov->seg_ssix = a; } else { gf_isom_box_del(a); } gf_isom_push_mdat_end(mov, mov->current_top_box_start); } else if (!mov->NextMoofNumber && (a->type==GF_ISOM_BOX_TYPE_SIDX)) { if (mov->main_sidx) gf_isom_box_del( (GF_Box *) mov->main_sidx); mov->main_sidx = (GF_SegmentIndexBox *) a; mov->main_sidx_end_pos = mov->current_top_box_start + a->size; } else { gf_isom_box_del(a); } break; case GF_ISOM_BOX_TYPE_MOOF: //no support for inplace rewrite for fragmented files gf_isom_disable_inplace_rewrite(mov); if (!mov->moov) { GF_LOG(mov->moof ? GF_LOG_DEBUG : GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Movie fragment but no moov (yet) - possibly broken parsing!\n")); } if (mov->single_moof_mode) { mov->single_moof_state++; if (mov->single_moof_state > 1) { gf_isom_box_del(a); return GF_OK; } } ((GF_MovieFragmentBox *)a)->mov = mov; totSize += a->size; mov->moof = (GF_MovieFragmentBox *) a; /*some smooth streaming streams contain a SDTP under the TRAF: this is incorrect, convert it*/ FixTrackID(mov); if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { FixSDTPInTRAF(mov->moof); } else { u32 k; for (k=0; k<gf_list_count(mov->moof->TrackList); k++) { GF_TrackFragmentBox *traf = (GF_TrackFragmentBox *)gf_list_get(mov->moof->TrackList, k); if (traf->sampleGroups) { convert_compact_sample_groups(traf->child_boxes, traf->sampleGroups); } } } /*read & debug: store at root level*/ if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) { u32 k; gf_list_add(mov->TopBoxes, a); /*also update pointers to trex for debug*/ if (mov->moov) { for (k=0; k<gf_list_count(mov->moof->TrackList); k++) { GF_TrackFragmentBox *traf = gf_list_get(mov->moof->TrackList, k); if (traf->tfhd && mov->moov->mvex && mov->moov->mvex->TrackExList) { GF_TrackBox *trak = gf_isom_get_track_from_id(mov->moov, traf->tfhd->trackID); u32 j=0; while ((traf->trex = (GF_TrackExtendsBox*)gf_list_enum(mov->moov->mvex->TrackExList, &j))) { if (traf->trex->trackID == traf->tfhd->trackID) { if (!traf->trex->track) traf->trex->track = trak; break; } traf->trex = NULL; } } //we should only parse senc/psec when no saiz/saio is present, otherwise we fetch the info directly if (traf->trex && traf->tfhd && traf->trex->track && traf->sample_encryption) { GF_TrackBox *trak = GetTrackbyID(mov->moov, traf->tfhd->trackID); if (trak) { trak->current_traf_stsd_idx = traf->tfhd->sample_desc_index ? traf->tfhd->sample_desc_index : traf->trex->def_sample_desc_index; e = senc_Parse(mov->movieFileMap->bs, trak, traf, traf->sample_encryption); if (e) return e; trak->current_traf_stsd_idx = 0; } } } } else { for (k=0; k<gf_list_count(mov->moof->TrackList); k++) { GF_TrackFragmentBox *traf = gf_list_get(mov->moof->TrackList, k); if (traf->sample_encryption) { e = senc_Parse(mov->movieFileMap->bs, NULL, traf, traf->sample_encryption); if (e) return e; } } } } else if (mov->openMode==GF_ISOM_OPEN_KEEP_FRAGMENTS) { mov->NextMoofNumber = mov->moof->mfhd->sequence_number+1; mov->moof = NULL; gf_isom_box_del(a); } else { /*merge all info*/ e = MergeFragment((GF_MovieFragmentBox *)a, mov); gf_isom_box_del(a); if (e) return e; } //done with moov if (mov->root_sidx) { gf_isom_box_del((GF_Box *) mov->root_sidx); mov->root_sidx = NULL; } if (mov->root_ssix) { gf_isom_box_del(mov->seg_ssix); mov->root_ssix = NULL; } if (mov->seg_styp) { gf_isom_box_del(mov->seg_styp); mov->seg_styp = NULL; } mov->sidx_start_offset = 0; mov->sidx_end_offset = 0; mov->styp_start_offset = 0; break; #endif case GF_ISOM_BOX_TYPE_UNKNOWN: { GF_UnknownBox *box = (GF_UnknownBox*)a; if (box->original_4cc == GF_ISOM_BOX_TYPE_JP) { u8 *c = (u8 *) box->data; if ((box->dataSize==4) && (GF_4CC(c[0],c[1],c[2],c[3])==(u32)0x0D0A870A)) mov->is_jp2 = 1; gf_isom_box_del(a); } else { e = gf_list_add(mov->TopBoxes, a); if (e) return e; } } break; case GF_ISOM_BOX_TYPE_PRFT: #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (!(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { //keep the last one read if (mov->last_producer_ref_time) gf_isom_box_del(a); else mov->last_producer_ref_time = (GF_ProducerReferenceTimeBox *)a; break; } #endif //fallthrough case GF_ISOM_BOX_TYPE_EMSG: #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { if (!mov->emsgs) mov->emsgs = gf_list_new(); gf_list_add(mov->emsgs, a); break; } #endif case GF_ISOM_BOX_TYPE_MFRA: case GF_ISOM_BOX_TYPE_MFRO: //only keep for dump mode, otherwise we ignore these boxes and we don't want to carry them over in non-fragmented file if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { totSize += a->size; gf_isom_box_del(a); break; } default: totSize += a->size; e = gf_list_add(mov->TopBoxes, a); if (e) return e; break; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS /*remember where we left, in case we append an entire number of movie fragments*/ mov->current_top_box_start = gf_bs_get_position(mov->movieFileMap->bs) + mov->bytes_removed; #endif } /*we need at least moov or meta*/ if (!mov->moov && !mov->meta #ifndef GPAC_DISABLE_ISOM_FRAGMENTS && !mov->moof && !mov->is_index_segment #endif ) { return GF_ISOM_INCOMPLETE_FILE; } /*we MUST have movie header*/ if (!gf_opts_get_bool("core", "no-check")) { if (mov->moov && !mov->moov->mvhd) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MVHD in MOOV!\n")); return GF_ISOM_INVALID_FILE; } /*we MUST have meta handler*/ if (mov->meta && !mov->meta->handler) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing handler in META!\n")); return GF_ISOM_INVALID_FILE; } } #ifndef GPAC_DISABLE_ISOM_WRITE if (mov->moov) { /*set the default interleaving time*/ mov->interleavingTime = mov->moov->mvhd->timeScale; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS /*in edit mode with successfully loaded fragments, delete all fragment signaling since file is no longer fragmented*/ if ((mov->openMode > GF_ISOM_OPEN_READ) && (mov->openMode != GF_ISOM_OPEN_KEEP_FRAGMENTS) && mov->moov->mvex) { gf_isom_box_del_parent(&mov->moov->child_boxes, (GF_Box *)mov->moov->mvex); mov->moov->mvex = NULL; } #endif } //create a default mdat if none was found if (!mov->mdat && (mov->openMode != GF_ISOM_OPEN_READ) && (mov->openMode != GF_ISOM_OPEN_KEEP_FRAGMENTS)) { mov->mdat = (GF_MediaDataBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_MDAT); if (!mov->mdat) return GF_OUT_OF_MEM; e = gf_list_add(mov->TopBoxes, mov->mdat); if (e) return e; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ return GF_OK; }
33398552270127430256413281511092874838
None
CWE-476
CVE-2022-29340
GPAC 2.1-DEV-rev87-g053aae8-master. has a Null Pointer Dereference vulnerability in gf_isom_parse_movie_boxes_internal due to improper return value handling of GF_SKIP_BOX, which causes a Denial of Service. This vulnerability was fixed in commit 37592ad.
https://nvd.nist.gov/vuln/detail/CVE-2022-29340
195,752
tensorflow
02cc160e29d20631de3859c6653184e3f876b9d7
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/02cc160e29d20631de3859c6653184e3f876b9d7
Prevent nullptr deref in SparseTensorSliceDataset The arguments must determine a valid sparse tensor. This means that when indices are empty then the values must be empty too (and the reverse). Also added test, by modifying existing test with empty sparse tensor to now run with an invalid sparse tensor input. PiperOrigin-RevId: 388562757 Change-Id: Id8b54cd7c2316025b4f9a77292c8fb5344d17609
1
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override { // Create a new SparseTensorSliceDatasetOp::Dataset, insert it in // the step container, and return it as the output. const Tensor* indices; OP_REQUIRES_OK(ctx, ctx->input("indices", &indices)); const Tensor* values; OP_REQUIRES_OK(ctx, ctx->input("values", &values)); const Tensor* dense_shape; OP_REQUIRES_OK(ctx, ctx->input("dense_shape", &dense_shape)); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices->shape()), errors::InvalidArgument( "Input indices should be a matrix but received shape ", indices->shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()), errors::InvalidArgument( "Input values should be a vector but received shape ", indices->shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(dense_shape->shape()), errors::InvalidArgument( "Input shape should be a vector but received shape ", dense_shape->shape().DebugString())); // We currently ensure that `sparse_tensor` is ordered in the // batch dimension. // TODO(mrry): Investigate ways to avoid this unconditional check // if we can be sure that the sparse tensor was produced in an // appropriate order (e.g. by `tf.parse_example()` or a Dataset // that batches elements into rows of a SparseTensor). int64_t previous_batch_index = -1; for (int64_t i = 0; i < indices->dim_size(0); ++i) { int64_t next_batch_index = indices->matrix<int64>()(i, 0); OP_REQUIRES( ctx, next_batch_index >= previous_batch_index, errors::Unimplemented("The SparseTensor must be ordered in the batch " "dimension; handling arbitrarily ordered input " "is not currently supported.")); previous_batch_index = next_batch_index; } gtl::InlinedVector<int64, 8> std_order(dense_shape->NumElements(), 0); sparse::SparseTensor tensor; OP_REQUIRES_OK( ctx, sparse::SparseTensor::Create( *indices, *values, TensorShape(dense_shape->vec<int64>()), std_order, &tensor)); *output = new Dataset<T>(ctx, std::move(tensor)); }
111818826187244494245403789873500831419
sparse_tensor_slice_dataset_op.cc
152047584060469134260687844063366554733
CWE-476
CVE-2021-37647
TensorFlow is an end-to-end open source platform for machine learning. When a user does not supply arguments that determine a valid sparse tensor, `tf.raw_ops.SparseTensorSliceDataset` implementation can be made to dereference a null pointer. The [implementation](https://github.com/tensorflow/tensorflow/blob/8d72537c6abf5a44103b57b9c2e22c14f5f49698/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc#L240-L251) has some argument validation but fails to consider the case when either `indices` or `values` are provided for an empty sparse tensor when the other is not. If `indices` is empty, then [code that performs validation](https://github.com/tensorflow/tensorflow/blob/8d72537c6abf5a44103b57b9c2e22c14f5f49698/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc#L260-L261) (i.e., checking that the indices are monotonically increasing) results in a null pointer dereference. If `indices` as provided by the user is empty, then `indices` in the C++ code above is backed by an empty `std::vector`, hence calling `indices->dim_size(0)` results in null pointer dereferencing (same as calling `std::vector::at()` on an empty vector). We have patched the issue in GitHub commit 02cc160e29d20631de3859c6653184e3f876b9d7. The fix will be included in TensorFlow 2.6.0. We will also cherrypick this commit on TensorFlow 2.5.1, TensorFlow 2.4.3, and TensorFlow 2.3.4, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2021-37647
232,405
tensorflow
02cc160e29d20631de3859c6653184e3f876b9d7
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/02cc160e29d20631de3859c6653184e3f876b9d7
Prevent nullptr deref in SparseTensorSliceDataset The arguments must determine a valid sparse tensor. This means that when indices are empty then the values must be empty too (and the reverse). Also added test, by modifying existing test with empty sparse tensor to now run with an invalid sparse tensor input. PiperOrigin-RevId: 388562757 Change-Id: Id8b54cd7c2316025b4f9a77292c8fb5344d17609
0
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override { // Create a new SparseTensorSliceDatasetOp::Dataset, insert it in // the step container, and return it as the output. const Tensor* indices; OP_REQUIRES_OK(ctx, ctx->input("indices", &indices)); const Tensor* values; OP_REQUIRES_OK(ctx, ctx->input("values", &values)); const Tensor* dense_shape; OP_REQUIRES_OK(ctx, ctx->input("dense_shape", &dense_shape)); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices->shape()), errors::InvalidArgument( "Input indices should be a matrix but received shape ", indices->shape().DebugString())); const auto num_indices = indices->NumElements(); const auto num_values = values->NumElements(); if (num_indices == 0 || num_values == 0) { OP_REQUIRES(ctx, num_indices == num_values, errors::InvalidArgument( "If indices or values are empty, the other one must also " "be. Got indices of shape ", indices->shape().DebugString(), " and values of shape ", values->shape().DebugString())); } OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()), errors::InvalidArgument( "Input values should be a vector but received shape ", indices->shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(dense_shape->shape()), errors::InvalidArgument( "Input shape should be a vector but received shape ", dense_shape->shape().DebugString())); // We currently ensure that `sparse_tensor` is ordered in the // batch dimension. // TODO(mrry): Investigate ways to avoid this unconditional check // if we can be sure that the sparse tensor was produced in an // appropriate order (e.g. by `tf.parse_example()` or a Dataset // that batches elements into rows of a SparseTensor). int64_t previous_batch_index = -1; for (int64_t i = 0; i < indices->dim_size(0); ++i) { int64_t next_batch_index = indices->matrix<int64>()(i, 0); OP_REQUIRES( ctx, next_batch_index >= previous_batch_index, errors::Unimplemented("The SparseTensor must be ordered in the batch " "dimension; handling arbitrarily ordered input " "is not currently supported.")); previous_batch_index = next_batch_index; } gtl::InlinedVector<int64, 8> std_order(dense_shape->NumElements(), 0); sparse::SparseTensor tensor; OP_REQUIRES_OK( ctx, sparse::SparseTensor::Create( *indices, *values, TensorShape(dense_shape->vec<int64>()), std_order, &tensor)); *output = new Dataset<T>(ctx, std::move(tensor)); }
126239557450594194918340916106898261954
sparse_tensor_slice_dataset_op.cc
188414793374606504588915566744198162485
CWE-476
CVE-2021-37647
TensorFlow is an end-to-end open source platform for machine learning. When a user does not supply arguments that determine a valid sparse tensor, `tf.raw_ops.SparseTensorSliceDataset` implementation can be made to dereference a null pointer. The [implementation](https://github.com/tensorflow/tensorflow/blob/8d72537c6abf5a44103b57b9c2e22c14f5f49698/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc#L240-L251) has some argument validation but fails to consider the case when either `indices` or `values` are provided for an empty sparse tensor when the other is not. If `indices` is empty, then [code that performs validation](https://github.com/tensorflow/tensorflow/blob/8d72537c6abf5a44103b57b9c2e22c14f5f49698/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc#L260-L261) (i.e., checking that the indices are monotonically increasing) results in a null pointer dereference. If `indices` as provided by the user is empty, then `indices` in the C++ code above is backed by an empty `std::vector`, hence calling `indices->dim_size(0)` results in null pointer dereferencing (same as calling `std::vector::at()` on an empty vector). We have patched the issue in GitHub commit 02cc160e29d20631de3859c6653184e3f876b9d7. The fix will be included in TensorFlow 2.6.0. We will also cherrypick this commit on TensorFlow 2.5.1, TensorFlow 2.4.3, and TensorFlow 2.3.4, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2021-37647
195,800
deark
62acb7753b0e3c0d3ab3c15057b0a65222313334
https://github.com/jsummers/deark
https://github.com/jsummers/deark/commit/62acb7753b0e3c0d3ab3c15057b0a65222313334
pict,macrsrc: Fixed a bug that could cause division by 0 Found by F. Çelik.
1
void fmtutil_macbitmap_read_pixmap_only_fields(deark *c, dbuf *f, struct fmtutil_macbitmap_info *bi, i64 pos) { i64 pixmap_version; i64 pack_size; i64 plane_bytes; i64 n; de_dbg(c, "additional PixMap header fields, at %d", (int)pos); de_dbg_indent(c, 1); pixmap_version = dbuf_getu16be(f, pos+0); de_dbg(c, "pixmap version: %d", (int)pixmap_version); bi->packing_type = dbuf_getu16be(f, pos+2); de_dbg(c, "packing type: %d", (int)bi->packing_type); pack_size = dbuf_getu32be(f, pos+4); de_dbg(c, "pixel data length: %d", (int)pack_size); bi->hdpi = pict_read_fixed(f, pos+8); bi->vdpi = pict_read_fixed(f, pos+12); de_dbg(c, "dpi: %.2f"DE_CHAR_TIMES"%.2f", bi->hdpi, bi->vdpi); bi->pixeltype = dbuf_getu16be(f, pos+16); bi->pixelsize = dbuf_getu16be(f, pos+18); bi->cmpcount = dbuf_getu16be(f, pos+20); bi->cmpsize = dbuf_getu16be(f, pos+22); de_dbg(c, "pixel type=%d, bits/pixel=%d, components/pixel=%d, bits/comp=%d", (int)bi->pixeltype, (int)bi->pixelsize, (int)bi->cmpcount, (int)bi->cmpsize); bi->pdwidth = (bi->rowbytes*8)/bi->pixelsize; if(bi->pdwidth < bi->npwidth) { bi->pdwidth = bi->npwidth; } plane_bytes = dbuf_getu32be(f, pos+24); de_dbg(c, "plane bytes: %d", (int)plane_bytes); bi->pmTable = (u32)dbuf_getu32be(f, pos+28); de_dbg(c, "pmTable: 0x%08x", (unsigned int)bi->pmTable); n = dbuf_getu32be(f, pos+32); de_dbg(c, "pmReserved: 0x%08x", (unsigned int)n); de_dbg_indent(c, -1); }
203544519943268578056087775697493086183
fmtutil.c
198892381443353894699781903058114971913
CWE-369
CVE-2021-28856
In Deark before v1.5.8, a specially crafted input file can cause a division by zero in (src/fmtutil.c) because of the value of pixelsize.
https://nvd.nist.gov/vuln/detail/CVE-2021-28856
233,862
deark
62acb7753b0e3c0d3ab3c15057b0a65222313334
https://github.com/jsummers/deark
https://github.com/jsummers/deark/commit/62acb7753b0e3c0d3ab3c15057b0a65222313334
pict,macrsrc: Fixed a bug that could cause division by 0 Found by F. Çelik.
0
void fmtutil_macbitmap_read_pixmap_only_fields(deark *c, dbuf *f, struct fmtutil_macbitmap_info *bi, i64 pos) { i64 pixmap_version; i64 pack_size; i64 plane_bytes; i64 n; de_dbg(c, "additional PixMap header fields, at %d", (int)pos); de_dbg_indent(c, 1); pixmap_version = dbuf_getu16be(f, pos+0); de_dbg(c, "pixmap version: %d", (int)pixmap_version); bi->packing_type = dbuf_getu16be(f, pos+2); de_dbg(c, "packing type: %d", (int)bi->packing_type); pack_size = dbuf_getu32be(f, pos+4); de_dbg(c, "pixel data length: %d", (int)pack_size); bi->hdpi = pict_read_fixed(f, pos+8); bi->vdpi = pict_read_fixed(f, pos+12); de_dbg(c, "dpi: %.2f"DE_CHAR_TIMES"%.2f", bi->hdpi, bi->vdpi); bi->pixeltype = dbuf_getu16be(f, pos+16); bi->pixelsize = dbuf_getu16be(f, pos+18); bi->cmpcount = dbuf_getu16be(f, pos+20); bi->cmpsize = dbuf_getu16be(f, pos+22); de_dbg(c, "pixel type=%d, bits/pixel=%d, components/pixel=%d, bits/comp=%d", (int)bi->pixeltype, (int)bi->pixelsize, (int)bi->cmpcount, (int)bi->cmpsize); if(bi->pixelsize>0) { bi->pdwidth = (bi->rowbytes*8)/bi->pixelsize; } if(bi->pdwidth < bi->npwidth) { bi->pdwidth = bi->npwidth; } plane_bytes = dbuf_getu32be(f, pos+24); de_dbg(c, "plane bytes: %d", (int)plane_bytes); bi->pmTable = (u32)dbuf_getu32be(f, pos+28); de_dbg(c, "pmTable: 0x%08x", (unsigned int)bi->pmTable); n = dbuf_getu32be(f, pos+32); de_dbg(c, "pmReserved: 0x%08x", (unsigned int)n); de_dbg_indent(c, -1); }
165750695801886950946994956858448129756
fmtutil.c
134744298134309373748233562166931104409
CWE-369
CVE-2021-28856
In Deark before v1.5.8, a specially crafted input file can cause a division by zero in (src/fmtutil.c) because of the value of pixelsize.
https://nvd.nist.gov/vuln/detail/CVE-2021-28856
195,965
tensorflow
30721cf564cb029d34535446d6a5a6357bebc8e7
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/30721cf564cb029d34535446d6a5a6357bebc8e7
Fix tf.raw_ops.EditDistance vulnerability with negative indices. Check that indices are non-negative. Fix several identical code sites. Clean up grammar in error message. PiperOrigin-RevId: 445442017
1
void Compute(OpKernelContext* ctx) override { const Tensor* hypothesis_indices; const Tensor* hypothesis_values; const Tensor* hypothesis_shape; const Tensor* truth_indices; const Tensor* truth_values; const Tensor* truth_shape; OP_REQUIRES_OK(ctx, ctx->input("hypothesis_indices", &hypothesis_indices)); OP_REQUIRES_OK(ctx, ctx->input("hypothesis_values", &hypothesis_values)); OP_REQUIRES_OK(ctx, ctx->input("hypothesis_shape", &hypothesis_shape)); OP_REQUIRES_OK(ctx, ctx->input("truth_indices", &truth_indices)); OP_REQUIRES_OK(ctx, ctx->input("truth_values", &truth_values)); OP_REQUIRES_OK(ctx, ctx->input("truth_shape", &truth_shape)); OP_REQUIRES_OK( ctx, ValidateShapes(ctx, *hypothesis_indices, *hypothesis_values, *hypothesis_shape, *truth_indices, *truth_values, *truth_shape)); TensorShape hypothesis_st_shape; OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( hypothesis_shape->vec<int64_t>().data(), hypothesis_shape->NumElements(), &hypothesis_st_shape)); TensorShape truth_st_shape; OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( truth_shape->vec<int64_t>().data(), truth_shape->NumElements(), &truth_st_shape)); // Assume indices are sorted in row-major order. std::vector<int64_t> sorted_order(truth_st_shape.dims()); std::iota(sorted_order.begin(), sorted_order.end(), 0); sparse::SparseTensor hypothesis; OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( *hypothesis_indices, *hypothesis_values, hypothesis_st_shape, sorted_order, &hypothesis)); sparse::SparseTensor truth; OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( *truth_indices, *truth_values, truth_st_shape, sorted_order, &truth)); // Group dims 0, 1, ..., RANK - 1. The very last dim is assumed // to store the variable length sequences. std::vector<int64_t> group_dims(truth_st_shape.dims() - 1); std::iota(group_dims.begin(), group_dims.end(), 0); TensorShape output_shape; for (int d = 0; d < static_cast<int>(group_dims.size()); ++d) { output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d), truth_st_shape.dim_size(d))); } const auto output_elements = output_shape.num_elements(); OP_REQUIRES( ctx, output_elements > 0, errors::InvalidArgument("Got output shape ", output_shape.DebugString(), " which has 0 elements")); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output("output", output_shape, &output)); auto output_t = output->flat<float>(); output_t.setZero(); std::vector<int64_t> output_strides(output_shape.dims()); output_strides[output_shape.dims() - 1] = 1; for (int d = output_shape.dims() - 2; d >= 0; --d) { output_strides[d] = output_strides[d + 1] * output_shape.dim_size(d + 1); } auto hypothesis_grouper = hypothesis.group(group_dims); auto truth_grouper = truth.group(group_dims); auto hypothesis_iter = hypothesis_grouper.begin(); auto truth_iter = truth_grouper.begin(); auto cmp = std::equal_to<T>(); while (hypothesis_iter != hypothesis_grouper.end() && truth_iter != truth_grouper.end()) { sparse::Group truth_i = *truth_iter; sparse::Group hypothesis_j = *hypothesis_iter; std::vector<int64_t> g_truth = truth_i.group(); std::vector<int64_t> g_hypothesis = hypothesis_j.group(); auto truth_seq = truth_i.values<T>(); auto hypothesis_seq = hypothesis_j.values<T>(); if (g_truth == g_hypothesis) { auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = gtl::LevenshteinDistance<T>(truth_seq, hypothesis_seq, cmp); if (normalize_) output_t(loc) /= truth_seq.size(); ++hypothesis_iter; ++truth_iter; } else if (g_truth > g_hypothesis) { // zero-length truth auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits<float>::infinity(); } ++hypothesis_iter; } else { // zero-length hypothesis auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } } while (hypothesis_iter != hypothesis_grouper.end()) { // zero-length truths sparse::Group hypothesis_j = *hypothesis_iter; std::vector<int64_t> g_hypothesis = hypothesis_j.group(); auto hypothesis_seq = hypothesis_j.values<T>(); auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of the " "buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits<float>::infinity(); } ++hypothesis_iter; } while (truth_iter != truth_grouper.end()) { // missing hypotheses sparse::Group truth_i = *truth_iter; std::vector<int64_t> g_truth = truth_i.group(); auto truth_seq = truth_i.values<T>(); auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of the " "buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } }
330908344605810468129440704571471984591
edit_distance_op.cc
218278977682570302037113861275408263141
CWE-787
CVE-2022-29208
TensorFlow is an open source platform for machine learning. Prior to versions 2.9.0, 2.8.1, 2.7.2, and 2.6.4, the implementation of `tf.raw_ops.EditDistance` has incomplete validation. Users can pass negative values to cause a segmentation fault based denial of service. In multiple places throughout the code, one may compute an index for a write operation. However, the existing validation only checks against the upper bound of the array. Hence, it is possible to write before the array by massaging the input to generate negative values for `loc`. Versions 2.9.0, 2.8.1, 2.7.2, and 2.6.4 contain a patch for this issue.
https://nvd.nist.gov/vuln/detail/CVE-2022-29208
235,765
tensorflow
30721cf564cb029d34535446d6a5a6357bebc8e7
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/30721cf564cb029d34535446d6a5a6357bebc8e7
Fix tf.raw_ops.EditDistance vulnerability with negative indices. Check that indices are non-negative. Fix several identical code sites. Clean up grammar in error message. PiperOrigin-RevId: 445442017
0
void Compute(OpKernelContext* ctx) override { const Tensor* hypothesis_indices; const Tensor* hypothesis_values; const Tensor* hypothesis_shape; const Tensor* truth_indices; const Tensor* truth_values; const Tensor* truth_shape; OP_REQUIRES_OK(ctx, ctx->input("hypothesis_indices", &hypothesis_indices)); OP_REQUIRES_OK(ctx, ctx->input("hypothesis_values", &hypothesis_values)); OP_REQUIRES_OK(ctx, ctx->input("hypothesis_shape", &hypothesis_shape)); OP_REQUIRES_OK(ctx, ctx->input("truth_indices", &truth_indices)); OP_REQUIRES_OK(ctx, ctx->input("truth_values", &truth_values)); OP_REQUIRES_OK(ctx, ctx->input("truth_shape", &truth_shape)); OP_REQUIRES_OK( ctx, ValidateShapes(ctx, *hypothesis_indices, *hypothesis_values, *hypothesis_shape, *truth_indices, *truth_values, *truth_shape)); TensorShape hypothesis_st_shape; OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( hypothesis_shape->vec<int64_t>().data(), hypothesis_shape->NumElements(), &hypothesis_st_shape)); TensorShape truth_st_shape; OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( truth_shape->vec<int64_t>().data(), truth_shape->NumElements(), &truth_st_shape)); // Assume indices are sorted in row-major order. std::vector<int64_t> sorted_order(truth_st_shape.dims()); std::iota(sorted_order.begin(), sorted_order.end(), 0); sparse::SparseTensor hypothesis; OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( *hypothesis_indices, *hypothesis_values, hypothesis_st_shape, sorted_order, &hypothesis)); sparse::SparseTensor truth; OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( *truth_indices, *truth_values, truth_st_shape, sorted_order, &truth)); // Group dims 0, 1, ..., RANK - 1. The very last dim is assumed // to store the variable length sequences. std::vector<int64_t> group_dims(truth_st_shape.dims() - 1); std::iota(group_dims.begin(), group_dims.end(), 0); TensorShape output_shape; for (int d = 0; d < static_cast<int>(group_dims.size()); ++d) { output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d), truth_st_shape.dim_size(d))); } const auto output_elements = output_shape.num_elements(); OP_REQUIRES( ctx, output_elements > 0, errors::InvalidArgument("Got output shape ", output_shape.DebugString(), " which has 0 elements")); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output("output", output_shape, &output)); auto output_t = output->flat<float>(); output_t.setZero(); std::vector<int64_t> output_strides(output_shape.dims()); output_strides[output_shape.dims() - 1] = 1; for (int d = output_shape.dims() - 2; d >= 0; --d) { output_strides[d] = output_strides[d + 1] * output_shape.dim_size(d + 1); } auto hypothesis_grouper = hypothesis.group(group_dims); auto truth_grouper = truth.group(group_dims); auto hypothesis_iter = hypothesis_grouper.begin(); auto truth_iter = truth_grouper.begin(); auto cmp = std::equal_to<T>(); while (hypothesis_iter != hypothesis_grouper.end() && truth_iter != truth_grouper.end()) { sparse::Group truth_i = *truth_iter; sparse::Group hypothesis_j = *hypothesis_iter; std::vector<int64_t> g_truth = truth_i.group(); std::vector<int64_t> g_hypothesis = hypothesis_j.group(); auto truth_seq = truth_i.values<T>(); auto hypothesis_seq = hypothesis_j.values<T>(); if (g_truth == g_hypothesis) { auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = gtl::LevenshteinDistance<T>(truth_seq, hypothesis_seq, cmp); if (normalize_) output_t(loc) /= truth_seq.size(); ++hypothesis_iter; ++truth_iter; } else if (g_truth > g_hypothesis) { // zero-length truth auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits<float>::infinity(); } ++hypothesis_iter; } else { // zero-length hypothesis auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } } while (hypothesis_iter != hypothesis_grouper.end()) { // zero-length truths sparse::Group hypothesis_j = *hypothesis_iter; std::vector<int64_t> g_hypothesis = hypothesis_j.group(); auto hypothesis_seq = hypothesis_j.values<T>(); auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of the " "buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits<float>::infinity(); } ++hypothesis_iter; } while (truth_iter != truth_grouper.end()) { // missing hypotheses sparse::Group truth_i = *truth_iter; std::vector<int64_t> g_truth = truth_i.group(); auto truth_seq = truth_i.values<T>(); auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of the " "buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } }
248166369480919859740493813092777127281
edit_distance_op.cc
224900034498319197270351200055296266799
CWE-787
CVE-2022-29208
TensorFlow is an open source platform for machine learning. Prior to versions 2.9.0, 2.8.1, 2.7.2, and 2.6.4, the implementation of `tf.raw_ops.EditDistance` has incomplete validation. Users can pass negative values to cause a segmentation fault based denial of service. In multiple places throughout the code, one may compute an index for a write operation. However, the existing validation only checks against the upper bound of the array. Hence, it is possible to write before the array by massaging the input to generate negative values for `loc`. Versions 2.9.0, 2.8.1, 2.7.2, and 2.6.4 contain a patch for this issue.
https://nvd.nist.gov/vuln/detail/CVE-2022-29208
195,984
gpac
3dbe11b37d65c8472faf0654410068e5500b3adb
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/3dbe11b37d65c8472faf0654410068e5500b3adb
fixed #2175
1
GF_Err diST_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; char str[1024]; GF_DIMSScriptTypesBox *p = (GF_DIMSScriptTypesBox *)s; i=0; str[0]=0; while (1) { str[i] = gf_bs_read_u8(bs); if (!str[i]) break; i++; } ISOM_DECREASE_SIZE(p, i); p->content_script_types = gf_strdup(str); return GF_OK; }
337508066102203205232219987774332438264
box_code_3gpp.c
236995747067078276861335410375287788449
CWE-703
CVE-2022-1441
MP4Box is a component of GPAC-2.0.0, which is a widely-used third-party package on RPM Fusion. When MP4Box tries to parse a MP4 file, it calls the function `diST_box_read()` to read from video. In this function, it allocates a buffer `str` with fixed length. However, content read from `bs` is controllable by user, so is the length, which causes a buffer overflow.
https://nvd.nist.gov/vuln/detail/CVE-2022-1441
236,125
gpac
3dbe11b37d65c8472faf0654410068e5500b3adb
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/3dbe11b37d65c8472faf0654410068e5500b3adb
fixed #2175
0
GF_Err diST_box_read(GF_Box *s, GF_BitStream *bs) { GF_DIMSScriptTypesBox *p = (GF_DIMSScriptTypesBox *)s; p->content_script_types = gf_malloc(sizeof(char) * (s->size+1)); if (!p->content_script_types) return GF_OUT_OF_MEM; gf_bs_read_data(bs, p->content_script_types, s->size); p->content_script_types[s->size] = 0; return GF_OK; }
296645158113148498272091333023062986276
box_code_3gpp.c
53964259103061248299506412136196602333
CWE-703
CVE-2022-1441
MP4Box is a component of GPAC-2.0.0, which is a widely-used third-party package on RPM Fusion. When MP4Box tries to parse a MP4 file, it calls the function `diST_box_read()` to read from video. In this function, it allocates a buffer `str` with fixed length. However, content read from `bs` is controllable by user, so is the length, which causes a buffer overflow.
https://nvd.nist.gov/vuln/detail/CVE-2022-1441
196,276
lsquic
a74702c630e108125e71898398737baec8f02238
https://github.com/litespeedtech/lsquic
https://github.com/litespeedtech/lsquic/commit/a74702c630e108125e71898398737baec8f02238
Release 3.1.0
1
lsquic_qeh_settings (struct qpack_enc_hdl *qeh, unsigned max_table_size, unsigned dyn_table_size, unsigned max_risked_streams, int server) { enum lsqpack_enc_opts enc_opts; assert(qeh->qeh_flags & QEH_INITIALIZED); if (qeh->qeh_flags & QEH_HAVE_SETTINGS) { LSQ_WARN("settings already set"); return -1; } enc_opts = LSQPACK_ENC_OPT_STAGE_2 | (server ? LSQPACK_ENC_OPT_SERVER : 0); qeh->qeh_tsu_sz = sizeof(qeh->qeh_tsu_buf); if (0 != lsqpack_enc_init(&qeh->qeh_encoder, (void *) qeh->qeh_conn, max_table_size, dyn_table_size, max_risked_streams, enc_opts, qeh->qeh_tsu_buf, &qeh->qeh_tsu_sz)) { LSQ_INFO("could not initialize QPACK encoder"); return -1; } LSQ_DEBUG("%zu-byte post-init TSU", qeh->qeh_tsu_sz); qeh->qeh_flags |= QEH_HAVE_SETTINGS; qeh->qeh_max_prefix_size = lsqpack_enc_header_block_prefix_size(&qeh->qeh_encoder); LSQ_DEBUG("have settings: max table size=%u; dyn table size=%u; max risked " "streams=%u", max_table_size, dyn_table_size, max_risked_streams); if (qeh->qeh_enc_sm_out) qeh_begin_out(qeh); return 0; }
304358665951404548699605657299704903588
None
CWE-269
CVE-2022-30592
liblsquic/lsquic_qenc_hdl.c in LiteSpeed QUIC (aka LSQUIC) before 3.1.0 mishandles MAX_TABLE_CAPACITY.
https://nvd.nist.gov/vuln/detail/CVE-2022-30592
237,885
lsquic
a74702c630e108125e71898398737baec8f02238
https://github.com/litespeedtech/lsquic
https://github.com/litespeedtech/lsquic/commit/a74702c630e108125e71898398737baec8f02238
Release 3.1.0
0
lsquic_qeh_settings (struct qpack_enc_hdl *qeh, unsigned max_table_size, unsigned dyn_table_size, unsigned max_risked_streams, int server) { enum lsqpack_enc_opts enc_opts; assert(qeh->qeh_flags & QEH_INITIALIZED); if (qeh->qeh_flags & QEH_HAVE_SETTINGS) { LSQ_WARN("settings already set"); return -1; } enc_opts = LSQPACK_ENC_OPT_STAGE_2 | (server ? LSQPACK_ENC_OPT_SERVER : 0); qeh->qeh_tsu_sz = sizeof(qeh->qeh_tsu_buf); if (QENC_MIN_DYN_TABLE_SIZE > dyn_table_size) dyn_table_size = 0; if (0 != lsqpack_enc_init(&qeh->qeh_encoder, (void *) qeh->qeh_conn, max_table_size, dyn_table_size, max_risked_streams, enc_opts, qeh->qeh_tsu_buf, &qeh->qeh_tsu_sz)) { LSQ_INFO("could not initialize QPACK encoder"); return -1; } LSQ_DEBUG("%zu-byte post-init TSU", qeh->qeh_tsu_sz); qeh->qeh_flags |= QEH_HAVE_SETTINGS; qeh->qeh_max_prefix_size = lsqpack_enc_header_block_prefix_size(&qeh->qeh_encoder); LSQ_DEBUG("have settings: max table size=%u; dyn table size=%u; max risked " "streams=%u", max_table_size, dyn_table_size, max_risked_streams); if (qeh->qeh_enc_sm_out) qeh_begin_out(qeh); return 0; }
213495580933926260913554982368628219722
None
CWE-269
CVE-2022-30592
liblsquic/lsquic_qenc_hdl.c in LiteSpeed QUIC (aka LSQUIC) before 3.1.0 mishandles MAX_TABLE_CAPACITY.
https://nvd.nist.gov/vuln/detail/CVE-2022-30592
196,316
barebox
0a9f9a7410681e55362f8311537ebc7be9ad0fbe
https://github.com/saschahauer/barebox
https://github.com/saschahauer/barebox/commit/0a9f9a7410681e55362f8311537ebc7be9ad0fbe
crypto: digest: use crypto_memneq() When verifying a digest it is important not to leak timing information through memcmp(). Use crypto_memneq() instead. Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
1
int digest_generic_verify(struct digest *d, const unsigned char *md) { int ret; int len = digest_length(d); unsigned char *tmp; tmp = xmalloc(len); ret = digest_final(d, tmp); if (ret) goto end; ret = memcmp(md, tmp, len); ret = ret ? -EINVAL : 0; end: free(tmp); return ret; }
71480685616976545176363965575731858659
digest.c
309636649404648894565051311749383985179
CWE-200
CVE-2021-37847
crypto/digest.c in Pengutronix barebox through 2021.07.0 leaks timing information because memcmp is used during digest verification.
https://nvd.nist.gov/vuln/detail/CVE-2021-37847
238,322
barebox
0a9f9a7410681e55362f8311537ebc7be9ad0fbe
https://github.com/saschahauer/barebox
https://github.com/saschahauer/barebox/commit/0a9f9a7410681e55362f8311537ebc7be9ad0fbe
crypto: digest: use crypto_memneq() When verifying a digest it is important not to leak timing information through memcmp(). Use crypto_memneq() instead. Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
0
int digest_generic_verify(struct digest *d, const unsigned char *md) { int ret; int len = digest_length(d); unsigned char *tmp; tmp = xmalloc(len); ret = digest_final(d, tmp); if (ret) goto end; if (crypto_memneq(md, tmp, len)) ret = -EINVAL; else ret = 0; end: free(tmp); return ret; }
320239189684468072245266956547158344510
digest.c
195235961590850585727206932057971391683
CWE-200
CVE-2021-37847
crypto/digest.c in Pengutronix barebox through 2021.07.0 leaks timing information because memcmp is used during digest verification.
https://nvd.nist.gov/vuln/detail/CVE-2021-37847
196,328
vim
409510c588b1eec1ae33511ae97a21eb8e110895
https://github.com/vim/vim
https://github.com/vim/vim/commit/409510c588b1eec1ae33511ae97a21eb8e110895
patch 8.2.5050: using freed memory when searching for pattern in path Problem: Using freed memory when searching for pattern in path. Solution: Make a copy of the line.
1
find_pattern_in_path( char_u *ptr, // pointer to search pattern int dir UNUSED, // direction of expansion int len, // length of search pattern int whole, // match whole words only int skip_comments, // don't match inside comments int type, // Type of search; are we looking for a type? // a macro? long count, int action, // What to do when we find it linenr_T start_lnum, // first line to start searching linenr_T end_lnum) // last line for searching { SearchedFile *files; // Stack of included files SearchedFile *bigger; // When we need more space int max_path_depth = 50; long match_count = 1; char_u *pat; char_u *new_fname; char_u *curr_fname = curbuf->b_fname; char_u *prev_fname = NULL; linenr_T lnum; int depth; int depth_displayed; // For type==CHECK_PATH int old_files; int already_searched; char_u *file_line; char_u *line; char_u *p; char_u save_char; int define_matched; regmatch_T regmatch; regmatch_T incl_regmatch; regmatch_T def_regmatch; int matched = FALSE; int did_show = FALSE; int found = FALSE; int i; char_u *already = NULL; char_u *startp = NULL; char_u *inc_opt = NULL; #if defined(FEAT_QUICKFIX) win_T *curwin_save = NULL; #endif regmatch.regprog = NULL; incl_regmatch.regprog = NULL; def_regmatch.regprog = NULL; file_line = alloc(LSIZE); if (file_line == NULL) return; if (type != CHECK_PATH && type != FIND_DEFINE // when CONT_SOL is set compare "ptr" with the beginning of the // line is faster than quote_meta/regcomp/regexec "ptr" -- Acevedo && !compl_status_sol()) { pat = alloc(len + 5); if (pat == NULL) goto fpip_end; sprintf((char *)pat, whole ? "\\<%.*s\\>" : "%.*s", len, ptr); // ignore case according to p_ic, p_scs and pat regmatch.rm_ic = ignorecase(pat); regmatch.regprog = vim_regcomp(pat, magic_isset() ? RE_MAGIC : 0); vim_free(pat); if (regmatch.regprog == NULL) goto fpip_end; } inc_opt = (*curbuf->b_p_inc == NUL) ? p_inc : curbuf->b_p_inc; if (*inc_opt != NUL) { incl_regmatch.regprog = vim_regcomp(inc_opt, magic_isset() ? RE_MAGIC : 0); if (incl_regmatch.regprog == NULL) goto fpip_end; incl_regmatch.rm_ic = FALSE; // don't ignore case in incl. pat. } if (type == FIND_DEFINE && (*curbuf->b_p_def != NUL || *p_def != NUL)) { def_regmatch.regprog = vim_regcomp(*curbuf->b_p_def == NUL ? p_def : curbuf->b_p_def, magic_isset() ? RE_MAGIC : 0); if (def_regmatch.regprog == NULL) goto fpip_end; def_regmatch.rm_ic = FALSE; // don't ignore case in define pat. } files = lalloc_clear(max_path_depth * sizeof(SearchedFile), TRUE); if (files == NULL) goto fpip_end; old_files = max_path_depth; depth = depth_displayed = -1; lnum = start_lnum; if (end_lnum > curbuf->b_ml.ml_line_count) end_lnum = curbuf->b_ml.ml_line_count; if (lnum > end_lnum) // do at least one line lnum = end_lnum; line = ml_get(lnum); for (;;) { if (incl_regmatch.regprog != NULL && vim_regexec(&incl_regmatch, line, (colnr_T)0)) { char_u *p_fname = (curr_fname == curbuf->b_fname) ? curbuf->b_ffname : curr_fname; if (inc_opt != NULL && strstr((char *)inc_opt, "\\zs") != NULL) // Use text from '\zs' to '\ze' (or end) of 'include'. new_fname = find_file_name_in_path(incl_regmatch.startp[0], (int)(incl_regmatch.endp[0] - incl_regmatch.startp[0]), FNAME_EXP|FNAME_INCL|FNAME_REL, 1L, p_fname); else // Use text after match with 'include'. new_fname = file_name_in_line(incl_regmatch.endp[0], 0, FNAME_EXP|FNAME_INCL|FNAME_REL, 1L, p_fname, NULL); already_searched = FALSE; if (new_fname != NULL) { // Check whether we have already searched in this file for (i = 0;; i++) { if (i == depth + 1) i = old_files; if (i == max_path_depth) break; if (fullpathcmp(new_fname, files[i].name, TRUE, TRUE) & FPC_SAME) { if (type != CHECK_PATH && action == ACTION_SHOW_ALL && files[i].matched) { msg_putchar('\n'); // cursor below last one if (!got_int) // don't display if 'q' // typed at "--more--" // message { msg_home_replace_hl(new_fname); msg_puts(_(" (includes previously listed match)")); prev_fname = NULL; } } VIM_CLEAR(new_fname); already_searched = TRUE; break; } } } if (type == CHECK_PATH && (action == ACTION_SHOW_ALL || (new_fname == NULL && !already_searched))) { if (did_show) msg_putchar('\n'); // cursor below last one else { gotocmdline(TRUE); // cursor at status line msg_puts_title(_("--- Included files ")); if (action != ACTION_SHOW_ALL) msg_puts_title(_("not found ")); msg_puts_title(_("in path ---\n")); } did_show = TRUE; while (depth_displayed < depth && !got_int) { ++depth_displayed; for (i = 0; i < depth_displayed; i++) msg_puts(" "); msg_home_replace(files[depth_displayed].name); msg_puts(" -->\n"); } if (!got_int) // don't display if 'q' typed // for "--more--" message { for (i = 0; i <= depth_displayed; i++) msg_puts(" "); if (new_fname != NULL) { // using "new_fname" is more reliable, e.g., when // 'includeexpr' is set. msg_outtrans_attr(new_fname, HL_ATTR(HLF_D)); } else { /* * Isolate the file name. * Include the surrounding "" or <> if present. */ if (inc_opt != NULL && strstr((char *)inc_opt, "\\zs") != NULL) { // pattern contains \zs, use the match p = incl_regmatch.startp[0]; i = (int)(incl_regmatch.endp[0] - incl_regmatch.startp[0]); } else { // find the file name after the end of the match for (p = incl_regmatch.endp[0]; *p && !vim_isfilec(*p); p++) ; for (i = 0; vim_isfilec(p[i]); i++) ; } if (i == 0) { // Nothing found, use the rest of the line. p = incl_regmatch.endp[0]; i = (int)STRLEN(p); } // Avoid checking before the start of the line, can // happen if \zs appears in the regexp. else if (p > line) { if (p[-1] == '"' || p[-1] == '<') { --p; ++i; } if (p[i] == '"' || p[i] == '>') ++i; } save_char = p[i]; p[i] = NUL; msg_outtrans_attr(p, HL_ATTR(HLF_D)); p[i] = save_char; } if (new_fname == NULL && action == ACTION_SHOW_ALL) { if (already_searched) msg_puts(_(" (Already listed)")); else msg_puts(_(" NOT FOUND")); } } out_flush(); // output each line directly } if (new_fname != NULL) { // Push the new file onto the file stack if (depth + 1 == old_files) { bigger = ALLOC_MULT(SearchedFile, max_path_depth * 2); if (bigger != NULL) { for (i = 0; i <= depth; i++) bigger[i] = files[i]; for (i = depth + 1; i < old_files + max_path_depth; i++) { bigger[i].fp = NULL; bigger[i].name = NULL; bigger[i].lnum = 0; bigger[i].matched = FALSE; } for (i = old_files; i < max_path_depth; i++) bigger[i + max_path_depth] = files[i]; old_files += max_path_depth; max_path_depth *= 2; vim_free(files); files = bigger; } } if ((files[depth + 1].fp = mch_fopen((char *)new_fname, "r")) == NULL) vim_free(new_fname); else { if (++depth == old_files) { /* * lalloc() for 'bigger' must have failed above. We * will forget one of our already visited files now. */ vim_free(files[old_files].name); ++old_files; } files[depth].name = curr_fname = new_fname; files[depth].lnum = 0; files[depth].matched = FALSE; if (action == ACTION_EXPAND) { msg_hist_off = TRUE; // reset in msg_trunc_attr() vim_snprintf((char*)IObuff, IOSIZE, _("Scanning included file: %s"), (char *)new_fname); msg_trunc_attr((char *)IObuff, TRUE, HL_ATTR(HLF_R)); } else if (p_verbose >= 5) { verbose_enter(); smsg(_("Searching included file %s"), (char *)new_fname); verbose_leave(); } } } } else { /* * Check if the line is a define (type == FIND_DEFINE) */ p = line; search_line: define_matched = FALSE; if (def_regmatch.regprog != NULL && vim_regexec(&def_regmatch, line, (colnr_T)0)) { /* * Pattern must be first identifier after 'define', so skip * to that position before checking for match of pattern. Also * don't let it match beyond the end of this identifier. */ p = def_regmatch.endp[0]; while (*p && !vim_iswordc(*p)) p++; define_matched = TRUE; } /* * Look for a match. Don't do this if we are looking for a * define and this line didn't match define_prog above. */ if (def_regmatch.regprog == NULL || define_matched) { if (define_matched || compl_status_sol()) { // compare the first "len" chars from "ptr" startp = skipwhite(p); if (p_ic) matched = !MB_STRNICMP(startp, ptr, len); else matched = !STRNCMP(startp, ptr, len); if (matched && define_matched && whole && vim_iswordc(startp[len])) matched = FALSE; } else if (regmatch.regprog != NULL && vim_regexec(&regmatch, line, (colnr_T)(p - line))) { matched = TRUE; startp = regmatch.startp[0]; /* * Check if the line is not a comment line (unless we are * looking for a define). A line starting with "# define" * is not considered to be a comment line. */ if (!define_matched && skip_comments) { if ((*line != '#' || STRNCMP(skipwhite(line + 1), "define", 6) != 0) && get_leader_len(line, NULL, FALSE, TRUE)) matched = FALSE; /* * Also check for a "/ *" or "/ /" before the match. * Skips lines like "int backwards; / * normal index * * /" when looking for "normal". * Note: Doesn't skip "/ *" in comments. */ p = skipwhite(line); if (matched || (p[0] == '/' && p[1] == '*') || p[0] == '*') for (p = line; *p && p < startp; ++p) { if (matched && p[0] == '/' && (p[1] == '*' || p[1] == '/')) { matched = FALSE; // After "//" all text is comment if (p[1] == '/') break; ++p; } else if (!matched && p[0] == '*' && p[1] == '/') { // Can find match after "* /". matched = TRUE; ++p; } } } } } } if (matched) { if (action == ACTION_EXPAND) { int cont_s_ipos = FALSE; int add_r; char_u *aux; if (depth == -1 && lnum == curwin->w_cursor.lnum) break; found = TRUE; aux = p = startp; if (compl_status_adding()) { p += ins_compl_len(); if (vim_iswordp(p)) goto exit_matched; p = find_word_start(p); } p = find_word_end(p); i = (int)(p - aux); if (compl_status_adding() && i == ins_compl_len()) { // IOSIZE > compl_length, so the STRNCPY works STRNCPY(IObuff, aux, i); // Get the next line: when "depth" < 0 from the current // buffer, otherwise from the included file. Jump to // exit_matched when past the last line. if (depth < 0) { if (lnum >= end_lnum) goto exit_matched; line = ml_get(++lnum); } else if (vim_fgets(line = file_line, LSIZE, files[depth].fp)) goto exit_matched; // we read a line, set "already" to check this "line" later // if depth >= 0 we'll increase files[depth].lnum far // below -- Acevedo already = aux = p = skipwhite(line); p = find_word_start(p); p = find_word_end(p); if (p > aux) { if (*aux != ')' && IObuff[i-1] != TAB) { if (IObuff[i-1] != ' ') IObuff[i++] = ' '; // IObuf =~ "\(\k\|\i\).* ", thus i >= 2 if (p_js && (IObuff[i-2] == '.' || (vim_strchr(p_cpo, CPO_JOINSP) == NULL && (IObuff[i-2] == '?' || IObuff[i-2] == '!')))) IObuff[i++] = ' '; } // copy as much as possible of the new word if (p - aux >= IOSIZE - i) p = aux + IOSIZE - i - 1; STRNCPY(IObuff + i, aux, p - aux); i += (int)(p - aux); cont_s_ipos = TRUE; } IObuff[i] = NUL; aux = IObuff; if (i == ins_compl_len()) goto exit_matched; } add_r = ins_compl_add_infercase(aux, i, p_ic, curr_fname == curbuf->b_fname ? NULL : curr_fname, dir, cont_s_ipos); if (add_r == OK) // if dir was BACKWARD then honor it just once dir = FORWARD; else if (add_r == FAIL) break; } else if (action == ACTION_SHOW_ALL) { found = TRUE; if (!did_show) gotocmdline(TRUE); // cursor at status line if (curr_fname != prev_fname) { if (did_show) msg_putchar('\n'); // cursor below last one if (!got_int) // don't display if 'q' typed // at "--more--" message msg_home_replace_hl(curr_fname); prev_fname = curr_fname; } did_show = TRUE; if (!got_int) show_pat_in_path(line, type, TRUE, action, (depth == -1) ? NULL : files[depth].fp, (depth == -1) ? &lnum : &files[depth].lnum, match_count++); // Set matched flag for this file and all the ones that // include it for (i = 0; i <= depth; ++i) files[i].matched = TRUE; } else if (--count <= 0) { found = TRUE; if (depth == -1 && lnum == curwin->w_cursor.lnum #if defined(FEAT_QUICKFIX) && g_do_tagpreview == 0 #endif ) emsg(_(e_match_is_on_current_line)); else if (action == ACTION_SHOW) { show_pat_in_path(line, type, did_show, action, (depth == -1) ? NULL : files[depth].fp, (depth == -1) ? &lnum : &files[depth].lnum, 1L); did_show = TRUE; } else { #ifdef FEAT_GUI need_mouse_correct = TRUE; #endif #if defined(FEAT_QUICKFIX) // ":psearch" uses the preview window if (g_do_tagpreview != 0) { curwin_save = curwin; prepare_tagpreview(TRUE, TRUE, FALSE); } #endif if (action == ACTION_SPLIT) { if (win_split(0, 0) == FAIL) break; RESET_BINDING(curwin); } if (depth == -1) { // match in current file #if defined(FEAT_QUICKFIX) if (g_do_tagpreview != 0) { if (!win_valid(curwin_save)) break; if (!GETFILE_SUCCESS(getfile( curwin_save->w_buffer->b_fnum, NULL, NULL, TRUE, lnum, FALSE))) break; // failed to jump to file } else #endif setpcmark(); curwin->w_cursor.lnum = lnum; check_cursor(); } else { if (!GETFILE_SUCCESS(getfile( 0, files[depth].name, NULL, TRUE, files[depth].lnum, FALSE))) break; // failed to jump to file // autocommands may have changed the lnum, we don't // want that here curwin->w_cursor.lnum = files[depth].lnum; } } if (action != ACTION_SHOW) { curwin->w_cursor.col = (colnr_T)(startp - line); curwin->w_set_curswant = TRUE; } #if defined(FEAT_QUICKFIX) if (g_do_tagpreview != 0 && curwin != curwin_save && win_valid(curwin_save)) { // Return cursor to where we were validate_cursor(); redraw_later(VALID); win_enter(curwin_save, TRUE); } # ifdef FEAT_PROP_POPUP else if (WIN_IS_POPUP(curwin)) // can't keep focus in popup window win_enter(firstwin, TRUE); # endif #endif break; } exit_matched: matched = FALSE; // look for other matches in the rest of the line if we // are not at the end of it already if (def_regmatch.regprog == NULL && action == ACTION_EXPAND && !compl_status_sol() && *startp != NUL && *(p = startp + mb_ptr2len(startp)) != NUL) goto search_line; } line_breakcheck(); if (action == ACTION_EXPAND) ins_compl_check_keys(30, FALSE); if (got_int || ins_compl_interrupted()) break; /* * Read the next line. When reading an included file and encountering * end-of-file, close the file and continue in the file that included * it. */ while (depth >= 0 && !already && vim_fgets(line = file_line, LSIZE, files[depth].fp)) { fclose(files[depth].fp); --old_files; files[old_files].name = files[depth].name; files[old_files].matched = files[depth].matched; --depth; curr_fname = (depth == -1) ? curbuf->b_fname : files[depth].name; if (depth < depth_displayed) depth_displayed = depth; } if (depth >= 0) // we could read the line { files[depth].lnum++; // Remove any CR and LF from the line. i = (int)STRLEN(line); if (i > 0 && line[i - 1] == '\n') line[--i] = NUL; if (i > 0 && line[i - 1] == '\r') line[--i] = NUL; } else if (!already) { if (++lnum > end_lnum) break; line = ml_get(lnum); } already = NULL; } // End of big for (;;) loop. // Close any files that are still open. for (i = 0; i <= depth; i++) { fclose(files[i].fp); vim_free(files[i].name); } for (i = old_files; i < max_path_depth; i++) vim_free(files[i].name); vim_free(files); if (type == CHECK_PATH) { if (!did_show) { if (action != ACTION_SHOW_ALL) msg(_("All included files were found")); else msg(_("No included files")); } } else if (!found && action != ACTION_EXPAND) { if (got_int || ins_compl_interrupted()) emsg(_(e_interrupted)); else if (type == FIND_DEFINE) emsg(_(e_couldnt_find_definition)); else emsg(_(e_couldnt_find_pattern)); } if (action == ACTION_SHOW || action == ACTION_SHOW_ALL) msg_end(); fpip_end: vim_free(file_line); vim_regfree(regmatch.regprog); vim_regfree(incl_regmatch.regprog); vim_regfree(def_regmatch.regprog); }
123923862521809134964983633516065480238
search.c
229512534202460810065416633781657256150
CWE-416
CVE-2022-1968
Use After Free in GitHub repository vim/vim prior to 8.2.
https://nvd.nist.gov/vuln/detail/CVE-2022-1968
238,803
vim
409510c588b1eec1ae33511ae97a21eb8e110895
https://github.com/vim/vim
https://github.com/vim/vim/commit/409510c588b1eec1ae33511ae97a21eb8e110895
patch 8.2.5050: using freed memory when searching for pattern in path Problem: Using freed memory when searching for pattern in path. Solution: Make a copy of the line.
0
find_pattern_in_path( char_u *ptr, // pointer to search pattern int dir UNUSED, // direction of expansion int len, // length of search pattern int whole, // match whole words only int skip_comments, // don't match inside comments int type, // Type of search; are we looking for a type? // a macro? long count, int action, // What to do when we find it linenr_T start_lnum, // first line to start searching linenr_T end_lnum) // last line for searching { SearchedFile *files; // Stack of included files SearchedFile *bigger; // When we need more space int max_path_depth = 50; long match_count = 1; char_u *pat; char_u *new_fname; char_u *curr_fname = curbuf->b_fname; char_u *prev_fname = NULL; linenr_T lnum; int depth; int depth_displayed; // For type==CHECK_PATH int old_files; int already_searched; char_u *file_line; char_u *line; char_u *p; char_u save_char; int define_matched; regmatch_T regmatch; regmatch_T incl_regmatch; regmatch_T def_regmatch; int matched = FALSE; int did_show = FALSE; int found = FALSE; int i; char_u *already = NULL; char_u *startp = NULL; char_u *inc_opt = NULL; #if defined(FEAT_QUICKFIX) win_T *curwin_save = NULL; #endif regmatch.regprog = NULL; incl_regmatch.regprog = NULL; def_regmatch.regprog = NULL; file_line = alloc(LSIZE); if (file_line == NULL) return; if (type != CHECK_PATH && type != FIND_DEFINE // when CONT_SOL is set compare "ptr" with the beginning of the // line is faster than quote_meta/regcomp/regexec "ptr" -- Acevedo && !compl_status_sol()) { pat = alloc(len + 5); if (pat == NULL) goto fpip_end; sprintf((char *)pat, whole ? "\\<%.*s\\>" : "%.*s", len, ptr); // ignore case according to p_ic, p_scs and pat regmatch.rm_ic = ignorecase(pat); regmatch.regprog = vim_regcomp(pat, magic_isset() ? RE_MAGIC : 0); vim_free(pat); if (regmatch.regprog == NULL) goto fpip_end; } inc_opt = (*curbuf->b_p_inc == NUL) ? p_inc : curbuf->b_p_inc; if (*inc_opt != NUL) { incl_regmatch.regprog = vim_regcomp(inc_opt, magic_isset() ? RE_MAGIC : 0); if (incl_regmatch.regprog == NULL) goto fpip_end; incl_regmatch.rm_ic = FALSE; // don't ignore case in incl. pat. } if (type == FIND_DEFINE && (*curbuf->b_p_def != NUL || *p_def != NUL)) { def_regmatch.regprog = vim_regcomp(*curbuf->b_p_def == NUL ? p_def : curbuf->b_p_def, magic_isset() ? RE_MAGIC : 0); if (def_regmatch.regprog == NULL) goto fpip_end; def_regmatch.rm_ic = FALSE; // don't ignore case in define pat. } files = lalloc_clear(max_path_depth * sizeof(SearchedFile), TRUE); if (files == NULL) goto fpip_end; old_files = max_path_depth; depth = depth_displayed = -1; lnum = start_lnum; if (end_lnum > curbuf->b_ml.ml_line_count) end_lnum = curbuf->b_ml.ml_line_count; if (lnum > end_lnum) // do at least one line lnum = end_lnum; line = get_line_and_copy(lnum, file_line); for (;;) { if (incl_regmatch.regprog != NULL && vim_regexec(&incl_regmatch, line, (colnr_T)0)) { char_u *p_fname = (curr_fname == curbuf->b_fname) ? curbuf->b_ffname : curr_fname; if (inc_opt != NULL && strstr((char *)inc_opt, "\\zs") != NULL) // Use text from '\zs' to '\ze' (or end) of 'include'. new_fname = find_file_name_in_path(incl_regmatch.startp[0], (int)(incl_regmatch.endp[0] - incl_regmatch.startp[0]), FNAME_EXP|FNAME_INCL|FNAME_REL, 1L, p_fname); else // Use text after match with 'include'. new_fname = file_name_in_line(incl_regmatch.endp[0], 0, FNAME_EXP|FNAME_INCL|FNAME_REL, 1L, p_fname, NULL); already_searched = FALSE; if (new_fname != NULL) { // Check whether we have already searched in this file for (i = 0;; i++) { if (i == depth + 1) i = old_files; if (i == max_path_depth) break; if (fullpathcmp(new_fname, files[i].name, TRUE, TRUE) & FPC_SAME) { if (type != CHECK_PATH && action == ACTION_SHOW_ALL && files[i].matched) { msg_putchar('\n'); // cursor below last one if (!got_int) // don't display if 'q' // typed at "--more--" // message { msg_home_replace_hl(new_fname); msg_puts(_(" (includes previously listed match)")); prev_fname = NULL; } } VIM_CLEAR(new_fname); already_searched = TRUE; break; } } } if (type == CHECK_PATH && (action == ACTION_SHOW_ALL || (new_fname == NULL && !already_searched))) { if (did_show) msg_putchar('\n'); // cursor below last one else { gotocmdline(TRUE); // cursor at status line msg_puts_title(_("--- Included files ")); if (action != ACTION_SHOW_ALL) msg_puts_title(_("not found ")); msg_puts_title(_("in path ---\n")); } did_show = TRUE; while (depth_displayed < depth && !got_int) { ++depth_displayed; for (i = 0; i < depth_displayed; i++) msg_puts(" "); msg_home_replace(files[depth_displayed].name); msg_puts(" -->\n"); } if (!got_int) // don't display if 'q' typed // for "--more--" message { for (i = 0; i <= depth_displayed; i++) msg_puts(" "); if (new_fname != NULL) { // using "new_fname" is more reliable, e.g., when // 'includeexpr' is set. msg_outtrans_attr(new_fname, HL_ATTR(HLF_D)); } else { /* * Isolate the file name. * Include the surrounding "" or <> if present. */ if (inc_opt != NULL && strstr((char *)inc_opt, "\\zs") != NULL) { // pattern contains \zs, use the match p = incl_regmatch.startp[0]; i = (int)(incl_regmatch.endp[0] - incl_regmatch.startp[0]); } else { // find the file name after the end of the match for (p = incl_regmatch.endp[0]; *p && !vim_isfilec(*p); p++) ; for (i = 0; vim_isfilec(p[i]); i++) ; } if (i == 0) { // Nothing found, use the rest of the line. p = incl_regmatch.endp[0]; i = (int)STRLEN(p); } // Avoid checking before the start of the line, can // happen if \zs appears in the regexp. else if (p > line) { if (p[-1] == '"' || p[-1] == '<') { --p; ++i; } if (p[i] == '"' || p[i] == '>') ++i; } save_char = p[i]; p[i] = NUL; msg_outtrans_attr(p, HL_ATTR(HLF_D)); p[i] = save_char; } if (new_fname == NULL && action == ACTION_SHOW_ALL) { if (already_searched) msg_puts(_(" (Already listed)")); else msg_puts(_(" NOT FOUND")); } } out_flush(); // output each line directly } if (new_fname != NULL) { // Push the new file onto the file stack if (depth + 1 == old_files) { bigger = ALLOC_MULT(SearchedFile, max_path_depth * 2); if (bigger != NULL) { for (i = 0; i <= depth; i++) bigger[i] = files[i]; for (i = depth + 1; i < old_files + max_path_depth; i++) { bigger[i].fp = NULL; bigger[i].name = NULL; bigger[i].lnum = 0; bigger[i].matched = FALSE; } for (i = old_files; i < max_path_depth; i++) bigger[i + max_path_depth] = files[i]; old_files += max_path_depth; max_path_depth *= 2; vim_free(files); files = bigger; } } if ((files[depth + 1].fp = mch_fopen((char *)new_fname, "r")) == NULL) vim_free(new_fname); else { if (++depth == old_files) { /* * lalloc() for 'bigger' must have failed above. We * will forget one of our already visited files now. */ vim_free(files[old_files].name); ++old_files; } files[depth].name = curr_fname = new_fname; files[depth].lnum = 0; files[depth].matched = FALSE; if (action == ACTION_EXPAND) { msg_hist_off = TRUE; // reset in msg_trunc_attr() vim_snprintf((char*)IObuff, IOSIZE, _("Scanning included file: %s"), (char *)new_fname); msg_trunc_attr((char *)IObuff, TRUE, HL_ATTR(HLF_R)); } else if (p_verbose >= 5) { verbose_enter(); smsg(_("Searching included file %s"), (char *)new_fname); verbose_leave(); } } } } else { /* * Check if the line is a define (type == FIND_DEFINE) */ p = line; search_line: define_matched = FALSE; if (def_regmatch.regprog != NULL && vim_regexec(&def_regmatch, line, (colnr_T)0)) { /* * Pattern must be first identifier after 'define', so skip * to that position before checking for match of pattern. Also * don't let it match beyond the end of this identifier. */ p = def_regmatch.endp[0]; while (*p && !vim_iswordc(*p)) p++; define_matched = TRUE; } /* * Look for a match. Don't do this if we are looking for a * define and this line didn't match define_prog above. */ if (def_regmatch.regprog == NULL || define_matched) { if (define_matched || compl_status_sol()) { // compare the first "len" chars from "ptr" startp = skipwhite(p); if (p_ic) matched = !MB_STRNICMP(startp, ptr, len); else matched = !STRNCMP(startp, ptr, len); if (matched && define_matched && whole && vim_iswordc(startp[len])) matched = FALSE; } else if (regmatch.regprog != NULL && vim_regexec(&regmatch, line, (colnr_T)(p - line))) { matched = TRUE; startp = regmatch.startp[0]; /* * Check if the line is not a comment line (unless we are * looking for a define). A line starting with "# define" * is not considered to be a comment line. */ if (!define_matched && skip_comments) { if ((*line != '#' || STRNCMP(skipwhite(line + 1), "define", 6) != 0) && get_leader_len(line, NULL, FALSE, TRUE)) matched = FALSE; /* * Also check for a "/ *" or "/ /" before the match. * Skips lines like "int backwards; / * normal index * * /" when looking for "normal". * Note: Doesn't skip "/ *" in comments. */ p = skipwhite(line); if (matched || (p[0] == '/' && p[1] == '*') || p[0] == '*') for (p = line; *p && p < startp; ++p) { if (matched && p[0] == '/' && (p[1] == '*' || p[1] == '/')) { matched = FALSE; // After "//" all text is comment if (p[1] == '/') break; ++p; } else if (!matched && p[0] == '*' && p[1] == '/') { // Can find match after "* /". matched = TRUE; ++p; } } } } } } if (matched) { if (action == ACTION_EXPAND) { int cont_s_ipos = FALSE; int add_r; char_u *aux; if (depth == -1 && lnum == curwin->w_cursor.lnum) break; found = TRUE; aux = p = startp; if (compl_status_adding()) { p += ins_compl_len(); if (vim_iswordp(p)) goto exit_matched; p = find_word_start(p); } p = find_word_end(p); i = (int)(p - aux); if (compl_status_adding() && i == ins_compl_len()) { // IOSIZE > compl_length, so the STRNCPY works STRNCPY(IObuff, aux, i); // Get the next line: when "depth" < 0 from the current // buffer, otherwise from the included file. Jump to // exit_matched when past the last line. if (depth < 0) { if (lnum >= end_lnum) goto exit_matched; line = get_line_and_copy(++lnum, file_line); } else if (vim_fgets(line = file_line, LSIZE, files[depth].fp)) goto exit_matched; // we read a line, set "already" to check this "line" later // if depth >= 0 we'll increase files[depth].lnum far // below -- Acevedo already = aux = p = skipwhite(line); p = find_word_start(p); p = find_word_end(p); if (p > aux) { if (*aux != ')' && IObuff[i-1] != TAB) { if (IObuff[i-1] != ' ') IObuff[i++] = ' '; // IObuf =~ "\(\k\|\i\).* ", thus i >= 2 if (p_js && (IObuff[i-2] == '.' || (vim_strchr(p_cpo, CPO_JOINSP) == NULL && (IObuff[i-2] == '?' || IObuff[i-2] == '!')))) IObuff[i++] = ' '; } // copy as much as possible of the new word if (p - aux >= IOSIZE - i) p = aux + IOSIZE - i - 1; STRNCPY(IObuff + i, aux, p - aux); i += (int)(p - aux); cont_s_ipos = TRUE; } IObuff[i] = NUL; aux = IObuff; if (i == ins_compl_len()) goto exit_matched; } add_r = ins_compl_add_infercase(aux, i, p_ic, curr_fname == curbuf->b_fname ? NULL : curr_fname, dir, cont_s_ipos); if (add_r == OK) // if dir was BACKWARD then honor it just once dir = FORWARD; else if (add_r == FAIL) break; } else if (action == ACTION_SHOW_ALL) { found = TRUE; if (!did_show) gotocmdline(TRUE); // cursor at status line if (curr_fname != prev_fname) { if (did_show) msg_putchar('\n'); // cursor below last one if (!got_int) // don't display if 'q' typed // at "--more--" message msg_home_replace_hl(curr_fname); prev_fname = curr_fname; } did_show = TRUE; if (!got_int) show_pat_in_path(line, type, TRUE, action, (depth == -1) ? NULL : files[depth].fp, (depth == -1) ? &lnum : &files[depth].lnum, match_count++); // Set matched flag for this file and all the ones that // include it for (i = 0; i <= depth; ++i) files[i].matched = TRUE; } else if (--count <= 0) { found = TRUE; if (depth == -1 && lnum == curwin->w_cursor.lnum #if defined(FEAT_QUICKFIX) && g_do_tagpreview == 0 #endif ) emsg(_(e_match_is_on_current_line)); else if (action == ACTION_SHOW) { show_pat_in_path(line, type, did_show, action, (depth == -1) ? NULL : files[depth].fp, (depth == -1) ? &lnum : &files[depth].lnum, 1L); did_show = TRUE; } else { #ifdef FEAT_GUI need_mouse_correct = TRUE; #endif #if defined(FEAT_QUICKFIX) // ":psearch" uses the preview window if (g_do_tagpreview != 0) { curwin_save = curwin; prepare_tagpreview(TRUE, TRUE, FALSE); } #endif if (action == ACTION_SPLIT) { if (win_split(0, 0) == FAIL) break; RESET_BINDING(curwin); } if (depth == -1) { // match in current file #if defined(FEAT_QUICKFIX) if (g_do_tagpreview != 0) { if (!win_valid(curwin_save)) break; if (!GETFILE_SUCCESS(getfile( curwin_save->w_buffer->b_fnum, NULL, NULL, TRUE, lnum, FALSE))) break; // failed to jump to file } else #endif setpcmark(); curwin->w_cursor.lnum = lnum; check_cursor(); } else { if (!GETFILE_SUCCESS(getfile( 0, files[depth].name, NULL, TRUE, files[depth].lnum, FALSE))) break; // failed to jump to file // autocommands may have changed the lnum, we don't // want that here curwin->w_cursor.lnum = files[depth].lnum; } } if (action != ACTION_SHOW) { curwin->w_cursor.col = (colnr_T)(startp - line); curwin->w_set_curswant = TRUE; } #if defined(FEAT_QUICKFIX) if (g_do_tagpreview != 0 && curwin != curwin_save && win_valid(curwin_save)) { // Return cursor to where we were validate_cursor(); redraw_later(VALID); win_enter(curwin_save, TRUE); } # ifdef FEAT_PROP_POPUP else if (WIN_IS_POPUP(curwin)) // can't keep focus in popup window win_enter(firstwin, TRUE); # endif #endif break; } exit_matched: matched = FALSE; // look for other matches in the rest of the line if we // are not at the end of it already if (def_regmatch.regprog == NULL && action == ACTION_EXPAND && !compl_status_sol() && *startp != NUL && *(p = startp + mb_ptr2len(startp)) != NUL) goto search_line; } line_breakcheck(); if (action == ACTION_EXPAND) ins_compl_check_keys(30, FALSE); if (got_int || ins_compl_interrupted()) break; /* * Read the next line. When reading an included file and encountering * end-of-file, close the file and continue in the file that included * it. */ while (depth >= 0 && !already && vim_fgets(line = file_line, LSIZE, files[depth].fp)) { fclose(files[depth].fp); --old_files; files[old_files].name = files[depth].name; files[old_files].matched = files[depth].matched; --depth; curr_fname = (depth == -1) ? curbuf->b_fname : files[depth].name; if (depth < depth_displayed) depth_displayed = depth; } if (depth >= 0) // we could read the line { files[depth].lnum++; // Remove any CR and LF from the line. i = (int)STRLEN(line); if (i > 0 && line[i - 1] == '\n') line[--i] = NUL; if (i > 0 && line[i - 1] == '\r') line[--i] = NUL; } else if (!already) { if (++lnum > end_lnum) break; line = get_line_and_copy(lnum, file_line); } already = NULL; } // End of big for (;;) loop. // Close any files that are still open. for (i = 0; i <= depth; i++) { fclose(files[i].fp); vim_free(files[i].name); } for (i = old_files; i < max_path_depth; i++) vim_free(files[i].name); vim_free(files); if (type == CHECK_PATH) { if (!did_show) { if (action != ACTION_SHOW_ALL) msg(_("All included files were found")); else msg(_("No included files")); } } else if (!found && action != ACTION_EXPAND) { if (got_int || ins_compl_interrupted()) emsg(_(e_interrupted)); else if (type == FIND_DEFINE) emsg(_(e_couldnt_find_definition)); else emsg(_(e_couldnt_find_pattern)); } if (action == ACTION_SHOW || action == ACTION_SHOW_ALL) msg_end(); fpip_end: vim_free(file_line); vim_regfree(regmatch.regprog); vim_regfree(incl_regmatch.regprog); vim_regfree(def_regmatch.regprog); }
332560855806865581562523893033870446906
search.c
30662263767422374466151794140989574879
CWE-416
CVE-2022-1968
Use After Free in GitHub repository vim/vim prior to 8.2.
https://nvd.nist.gov/vuln/detail/CVE-2022-1968
196,578
vim
44db8213d38c39877d2148eff6a72f4beccfb94e
https://github.com/vim/vim
https://github.com/vim/vim/commit/44db8213d38c39877d2148eff6a72f4beccfb94e
patch 8.2.4219: reading before the start of the line Problem: Reading before the start of the line. Solution: Check boundary before trying to read the character.
1
yank_copy_line(struct block_def *bd, long y_idx, int exclude_trailing_space) { char_u *pnew; if (exclude_trailing_space) bd->endspaces = 0; if ((pnew = alloc(bd->startspaces + bd->endspaces + bd->textlen + 1)) == NULL) return FAIL; y_current->y_array[y_idx] = pnew; vim_memset(pnew, ' ', (size_t)bd->startspaces); pnew += bd->startspaces; mch_memmove(pnew, bd->textstart, (size_t)bd->textlen); pnew += bd->textlen; vim_memset(pnew, ' ', (size_t)bd->endspaces); pnew += bd->endspaces; if (exclude_trailing_space) { int s = bd->textlen + bd->endspaces; while (VIM_ISWHITE(*(bd->textstart + s - 1)) && s > 0) { s = s - (*mb_head_off)(bd->textstart, bd->textstart + s - 1) - 1; pnew--; } } *pnew = NUL; return OK; }
329046653104270093704788517563971777510
register.c
197045442768005159362544298682780980387
CWE-787
CVE-2022-0407
Heap-based Buffer Overflow in GitHub repository vim/vim prior to 8.2.
https://nvd.nist.gov/vuln/detail/CVE-2022-0407
240,276
vim
44db8213d38c39877d2148eff6a72f4beccfb94e
https://github.com/vim/vim
https://github.com/vim/vim/commit/44db8213d38c39877d2148eff6a72f4beccfb94e
patch 8.2.4219: reading before the start of the line Problem: Reading before the start of the line. Solution: Check boundary before trying to read the character.
0
yank_copy_line(struct block_def *bd, long y_idx, int exclude_trailing_space) { char_u *pnew; if (exclude_trailing_space) bd->endspaces = 0; if ((pnew = alloc(bd->startspaces + bd->endspaces + bd->textlen + 1)) == NULL) return FAIL; y_current->y_array[y_idx] = pnew; vim_memset(pnew, ' ', (size_t)bd->startspaces); pnew += bd->startspaces; mch_memmove(pnew, bd->textstart, (size_t)bd->textlen); pnew += bd->textlen; vim_memset(pnew, ' ', (size_t)bd->endspaces); pnew += bd->endspaces; if (exclude_trailing_space) { int s = bd->textlen + bd->endspaces; while (s > 0 && VIM_ISWHITE(*(bd->textstart + s - 1))) { s = s - (*mb_head_off)(bd->textstart, bd->textstart + s - 1) - 1; pnew--; } } *pnew = NUL; return OK; }
215111577434116975174543951046273639484
register.c
303069020104340496076924952267660178460
CWE-787
CVE-2022-0407
Heap-based Buffer Overflow in GitHub repository vim/vim prior to 8.2.
https://nvd.nist.gov/vuln/detail/CVE-2022-0407
196,629
tensorflow
579261dcd446385831fe4f7457d802a59685121d
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/579261dcd446385831fe4f7457d802a59685121d
Fix crash in MatrixSolve when inputs have different batch dimensions. Before, the process would crash or certain elements would be silently ignored. Now an InvalidArgument is raised. PiperOrigin-RevId: 384844020 Change-Id: Iba44417e383bdd0e1abc4012bfca83b2377dd335
1
void ComputeAsync(OpKernelContext* context, DoneCallback done) final { const Tensor& input = context->input(0); const Tensor& rhs = context->input(1); const int ndims = input.dims(); const int64 n = input.dim_size(ndims - 1); const int64 nrhs = rhs.dim_size(ndims - 1); // Validate inputs. OP_REQUIRES_ASYNC( context, ndims >= 2, errors::InvalidArgument("Input must have rank >= 2, got ", ndims), done); OP_REQUIRES_ASYNC(context, rhs.dims() == ndims, errors::InvalidArgument( "Input and right-hand side must have same rank, got ", ndims, " != ", rhs.dims()), done); OP_REQUIRES_ASYNC( context, input.dim_size(ndims - 2) == n, errors::InvalidArgument("Input matrices must be squares, got", input.dim_size(ndims - 2), " != ", n), done); OP_REQUIRES_ASYNC(context, rhs.dim_size(ndims - 2) == n, errors::InvalidArgument( "Input matrix and right-hand side must have the " "same number of rows, got", n, " != ", rhs.dim_size(ndims - 2)), done); // Allocate output. Tensor* output; OP_REQUIRES_OK_ASYNC( context, context->forward_input_or_allocate_output({1}, 0, rhs.shape(), &output), done); // To be consistent with the MatrixInverse op, we define the solution for // an empty set of equations as the empty matrix. if (input.NumElements() == 0 || rhs.NumElements() == 0) { done(); return; } // TODO(rmlarsen): Convert to std::make_unique when available. std::unique_ptr<CudaSolver> solver(new CudaSolver(context)); // Make a copy of the input for the factorization step, or, if adjoint_ is // false, try to reuse the input buffer if this op owns it exclusively. Tensor input_copy; const GPUDevice& device = context->eigen_device<GPUDevice>(); if (adjoint_) { // For the adjoint case, it is simpler to always make a transposed copy up // front. OP_REQUIRES_OK_ASYNC( context, solver->allocate_scoped_tensor(DataTypeToEnum<Scalar>::value, input.shape(), &input_copy), done); OP_REQUIRES_OK_ASYNC(context, DoMatrixTranspose(device, input, &input_copy), done); } else { OP_REQUIRES_OK_ASYNC( context, solver->forward_input_or_allocate_scoped_tensor( {0}, DataTypeToEnum<Scalar>::value, input.shape(), &input_copy), done); if (!input.SharesBufferWith(input_copy)) { device.memcpy(input_copy.flat<Scalar>().data(), input.flat<Scalar>().data(), input.NumElements() * sizeof(Scalar)); } } auto input_copy_reshaped = input_copy.template flat_inner_dims<Scalar, 3>(); const int64 batch_size = input_copy_reshaped.dimension(0); // Allocate pivots on the device. Tensor pivots; OP_REQUIRES_OK_ASYNC( context, solver->allocate_scoped_tensor(DataTypeToEnum<int>::value, TensorShape{batch_size, n}, &pivots), done); auto pivots_mat = pivots.template matrix<int>(); // 1. Compute the partially pivoted LU factorization(s) of the // matrix/matrices. std::vector<DeviceLapackInfo> dev_info; auto input_copy_ptrs = solver->GetScratchSpace<uint8>( sizeof(Scalar*) * batch_size, "input_copt_ptrs", /* on_host */ true); const int kMaxMatrixSizeToBatchSizeRatio = 128; const bool use_batched_solver = n <= kMaxMatrixSizeToBatchSizeRatio * batch_size; if (use_batched_solver) { // For small matrices or large batch sizes, we use the batched interface // from cuBlas. const Scalar** input_copy_ptrs_base = reinterpret_cast<const Scalar**>(input_copy_ptrs.mutable_data()); for (int batch = 0; batch < batch_size; ++batch) { input_copy_ptrs_base[batch] = &input_copy_reshaped(batch, 0, 0); } dev_info.push_back( solver->GetDeviceLapackInfo(batch_size, "getrfBatched")); OP_REQUIRES_OK_ASYNC( context, solver->GetrfBatched(n, input_copy_ptrs_base, n, pivots_mat.data(), &dev_info.back(), batch_size), done); } else { // For small batch sizes or large matrices, we use the non-batched // interface from cuSolver, which is much faster for large matrices. dev_info.push_back(solver->GetDeviceLapackInfo(batch_size, "getrf")); for (int batch = 0; batch < batch_size; ++batch) { OP_REQUIRES_OK_ASYNC( context, solver->Getrf(n, n, &input_copy_reshaped(batch, 0, 0), n, &pivots_mat(batch, 0), &dev_info.back()(batch)), done); } } // 2. Make a transposed copy of the right-hand sides. This is necessary // because cuBLAS assumes column-major storage while TensorFlow TF uses // row-major. TensorShape transposed_rhs_shape(rhs.shape()); transposed_rhs_shape.RemoveLastDims(2); transposed_rhs_shape.AddDim(nrhs); transposed_rhs_shape.AddDim(n); Tensor transposed_rhs; OP_REQUIRES_OK_ASYNC( context, solver->allocate_scoped_tensor(DataTypeToEnum<Scalar>::value, transposed_rhs_shape, &transposed_rhs), done); if (nrhs > 1) { OP_REQUIRES_OK_ASYNC( context, DoMatrixTranspose(device, rhs, &transposed_rhs), done); } else { device.memcpy(transposed_rhs.flat<Scalar>().data(), rhs.flat<Scalar>().data(), rhs.NumElements() * sizeof(Scalar)); } // 3. Solve op(A) X = B (in column major form). // We use a trick here: If adjoint_ is true, we converted A to column major // form above. If adjoint is false then I leave A in row-major form and use // trans_a = CUBLAS_OP_T to effectively transform it to column-major on the // fly. (This means that we actually use the LU-factorization of A^T in that // case, but that is equally good for solving AX=B). This way we save an // explicit transpose in the more common case of adjoint_ == false. auto input_copy_ptr_array = solver->GetScratchSpace<uint8>( sizeof(Scalar*) * batch_size, "input_copy_ptr_array", /* on_host */ true); auto transposed_rhs_ptr_array = solver->GetScratchSpace<uint8>( sizeof(Scalar*) * batch_size, "transposed_rhs_ptr_array", /* on_host */ true); auto transposed_rhs_reshaped = transposed_rhs.template flat_inner_dims<Scalar, 3>(); if (use_batched_solver) { const Scalar** input_copy_ptrs_base = reinterpret_cast<const Scalar**>(input_copy_ptr_array.mutable_data()); const Scalar** transposed_rhs_ptrs_base = reinterpret_cast<const Scalar**>( transposed_rhs_ptr_array.mutable_data()); for (int batch = 0; batch < batch_size; ++batch) { input_copy_ptrs_base[batch] = &input_copy_reshaped(batch, 0, 0); transposed_rhs_ptrs_base[batch] = &transposed_rhs_reshaped(batch, 0, 0); } int host_info = 0; OP_REQUIRES_OK_ASYNC( context, solver->GetrsBatched(adjoint_ ? CUBLAS_OP_C : CUBLAS_OP_T, n, nrhs, input_copy_ptrs_base, n, pivots_mat.data(), transposed_rhs_ptrs_base, n, &host_info, batch_size), done); OP_REQUIRES_ASYNC( context, host_info == 0, errors::InvalidArgument("The ", -host_info, "'th argument to cublas*getrsBatched had " "an illegal value."), done); } else { dev_info.push_back(solver->GetDeviceLapackInfo(batch_size, "getrs")); for (int batch = 0; batch < batch_size; ++batch) { OP_REQUIRES_OK_ASYNC( context, solver->Getrs(adjoint_ ? CUBLAS_OP_C : CUBLAS_OP_T, n, nrhs, &input_copy_reshaped(batch, 0, 0), n, &pivots_mat(batch, 0), &transposed_rhs_reshaped(batch, 0, 0), n, &dev_info.back()(batch)), done); } } // 4. Transpose X to get the final result in row-major form. if (nrhs > 1) { OP_REQUIRES_OK_ASYNC( context, DoMatrixTranspose(device, transposed_rhs, output), done); } else { device.memcpy(output->flat<Scalar>().data(), transposed_rhs.flat<Scalar>().data(), transposed_rhs.NumElements() * sizeof(Scalar)); } // Callback for checking info after kernels finish. Also capture the // temporary Tensors/ScratchSpace so they don't get deallocated before the // kernels run. TODO(rmlarsen): Use move capture once C++14 becomes // available. auto info_checker = [context, done, dev_info]( const Status& status, const std::vector<HostLapackInfo>& host_infos) { if (!status.ok() && errors::IsInvalidArgument(status) && !host_infos.empty()) { for (int i = 0; i < host_infos[0].size(); ++i) { // Match the CPU error message for singular matrices. Otherwise // just print the original error message from the status below. OP_REQUIRES_ASYNC(context, host_infos[0].data()[i] <= 0, errors::InvalidArgument(kErrMsg), done); } } OP_REQUIRES_OK_ASYNC(context, status, done); done(); }; CudaSolver::CheckLapackInfoAndDeleteSolverAsync(std::move(solver), dev_info, std::move(info_checker)); }
232512673394609281083836207268567643755
matrix_solve_op.cc
18056043033202767652193305242094140715
CWE-354
CVE-2021-41206
TensorFlow is an open source platform for machine learning. In affected versions several TensorFlow operations are missing validation for the shapes of the tensor arguments involved in the call. Depending on the API, this can result in undefined behavior and segfault or `CHECK`-fail related crashes but in some scenarios writes and reads from heap populated arrays are also possible. We have discovered these issues internally via tooling while working on improving/testing GPU op determinism. As such, we don't have reproducers and there will be multiple fixes for these issues. These fixes will be included in TensorFlow 2.7.0. We will also cherrypick these commits on TensorFlow 2.6.1, TensorFlow 2.5.2, and TensorFlow 2.4.4, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2021-41206
241,369
tensorflow
579261dcd446385831fe4f7457d802a59685121d
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/579261dcd446385831fe4f7457d802a59685121d
Fix crash in MatrixSolve when inputs have different batch dimensions. Before, the process would crash or certain elements would be silently ignored. Now an InvalidArgument is raised. PiperOrigin-RevId: 384844020 Change-Id: Iba44417e383bdd0e1abc4012bfca83b2377dd335
0
void ComputeAsync(OpKernelContext* context, DoneCallback done) final { const Tensor& input = context->input(0); const Tensor& rhs = context->input(1); const int ndims = input.dims(); const int64 n = input.dim_size(ndims - 1); const int64 nrhs = rhs.dim_size(ndims - 1); // Validate inputs. OP_REQUIRES_ASYNC( context, ndims >= 2, errors::InvalidArgument("Input must have rank >= 2, got ", ndims), done); OP_REQUIRES_ASYNC(context, rhs.dims() == ndims, errors::InvalidArgument( "Input and right-hand side must have same rank, got ", ndims, " != ", rhs.dims()), done); OP_REQUIRES_ASYNC( context, input.dim_size(ndims - 2) == n, errors::InvalidArgument("Input matrices must be squares, got ", input.dim_size(ndims - 2), " != ", n), done); OP_REQUIRES_ASYNC(context, rhs.dim_size(ndims - 2) == n, errors::InvalidArgument( "Input matrix and right-hand side must have the " "same number of rows, got ", n, " != ", rhs.dim_size(ndims - 2)), done); for (int dim = 0; dim < ndims - 2; dim++) { OP_REQUIRES_ASYNC( context, input.dim_size(dim) == rhs.dim_size(dim), errors::InvalidArgument( "All input tensors must have the same outer dimensions."), done); } // Allocate output. Tensor* output; OP_REQUIRES_OK_ASYNC( context, context->forward_input_or_allocate_output({1}, 0, rhs.shape(), &output), done); // To be consistent with the MatrixInverse op, we define the solution for // an empty set of equations as the empty matrix. if (input.NumElements() == 0 || rhs.NumElements() == 0) { done(); return; } // TODO(rmlarsen): Convert to std::make_unique when available. std::unique_ptr<CudaSolver> solver(new CudaSolver(context)); // Make a copy of the input for the factorization step, or, if adjoint_ is // false, try to reuse the input buffer if this op owns it exclusively. Tensor input_copy; const GPUDevice& device = context->eigen_device<GPUDevice>(); if (adjoint_) { // For the adjoint case, it is simpler to always make a transposed copy up // front. OP_REQUIRES_OK_ASYNC( context, solver->allocate_scoped_tensor(DataTypeToEnum<Scalar>::value, input.shape(), &input_copy), done); OP_REQUIRES_OK_ASYNC(context, DoMatrixTranspose(device, input, &input_copy), done); } else { OP_REQUIRES_OK_ASYNC( context, solver->forward_input_or_allocate_scoped_tensor( {0}, DataTypeToEnum<Scalar>::value, input.shape(), &input_copy), done); if (!input.SharesBufferWith(input_copy)) { device.memcpy(input_copy.flat<Scalar>().data(), input.flat<Scalar>().data(), input.NumElements() * sizeof(Scalar)); } } auto input_copy_reshaped = input_copy.template flat_inner_dims<Scalar, 3>(); const int64 batch_size = input_copy_reshaped.dimension(0); // Allocate pivots on the device. Tensor pivots; OP_REQUIRES_OK_ASYNC( context, solver->allocate_scoped_tensor(DataTypeToEnum<int>::value, TensorShape{batch_size, n}, &pivots), done); auto pivots_mat = pivots.template matrix<int>(); // 1. Compute the partially pivoted LU factorization(s) of the // matrix/matrices. std::vector<DeviceLapackInfo> dev_info; auto input_copy_ptrs = solver->GetScratchSpace<uint8>( sizeof(Scalar*) * batch_size, "input_copt_ptrs", /* on_host */ true); const int kMaxMatrixSizeToBatchSizeRatio = 128; const bool use_batched_solver = n <= kMaxMatrixSizeToBatchSizeRatio * batch_size; if (use_batched_solver) { // For small matrices or large batch sizes, we use the batched interface // from cuBlas. const Scalar** input_copy_ptrs_base = reinterpret_cast<const Scalar**>(input_copy_ptrs.mutable_data()); for (int batch = 0; batch < batch_size; ++batch) { input_copy_ptrs_base[batch] = &input_copy_reshaped(batch, 0, 0); } dev_info.push_back( solver->GetDeviceLapackInfo(batch_size, "getrfBatched")); OP_REQUIRES_OK_ASYNC( context, solver->GetrfBatched(n, input_copy_ptrs_base, n, pivots_mat.data(), &dev_info.back(), batch_size), done); } else { // For small batch sizes or large matrices, we use the non-batched // interface from cuSolver, which is much faster for large matrices. dev_info.push_back(solver->GetDeviceLapackInfo(batch_size, "getrf")); for (int batch = 0; batch < batch_size; ++batch) { OP_REQUIRES_OK_ASYNC( context, solver->Getrf(n, n, &input_copy_reshaped(batch, 0, 0), n, &pivots_mat(batch, 0), &dev_info.back()(batch)), done); } } // 2. Make a transposed copy of the right-hand sides. This is necessary // because cuBLAS assumes column-major storage while TensorFlow TF uses // row-major. TensorShape transposed_rhs_shape(rhs.shape()); transposed_rhs_shape.RemoveLastDims(2); transposed_rhs_shape.AddDim(nrhs); transposed_rhs_shape.AddDim(n); Tensor transposed_rhs; OP_REQUIRES_OK_ASYNC( context, solver->allocate_scoped_tensor(DataTypeToEnum<Scalar>::value, transposed_rhs_shape, &transposed_rhs), done); if (nrhs > 1) { OP_REQUIRES_OK_ASYNC( context, DoMatrixTranspose(device, rhs, &transposed_rhs), done); } else { device.memcpy(transposed_rhs.flat<Scalar>().data(), rhs.flat<Scalar>().data(), rhs.NumElements() * sizeof(Scalar)); } // 3. Solve op(A) X = B (in column major form). // We use a trick here: If adjoint_ is true, we converted A to column major // form above. If adjoint is false then I leave A in row-major form and use // trans_a = CUBLAS_OP_T to effectively transform it to column-major on the // fly. (This means that we actually use the LU-factorization of A^T in that // case, but that is equally good for solving AX=B). This way we save an // explicit transpose in the more common case of adjoint_ == false. auto input_copy_ptr_array = solver->GetScratchSpace<uint8>( sizeof(Scalar*) * batch_size, "input_copy_ptr_array", /* on_host */ true); auto transposed_rhs_ptr_array = solver->GetScratchSpace<uint8>( sizeof(Scalar*) * batch_size, "transposed_rhs_ptr_array", /* on_host */ true); auto transposed_rhs_reshaped = transposed_rhs.template flat_inner_dims<Scalar, 3>(); if (use_batched_solver) { const Scalar** input_copy_ptrs_base = reinterpret_cast<const Scalar**>(input_copy_ptr_array.mutable_data()); const Scalar** transposed_rhs_ptrs_base = reinterpret_cast<const Scalar**>( transposed_rhs_ptr_array.mutable_data()); for (int batch = 0; batch < batch_size; ++batch) { input_copy_ptrs_base[batch] = &input_copy_reshaped(batch, 0, 0); transposed_rhs_ptrs_base[batch] = &transposed_rhs_reshaped(batch, 0, 0); } int host_info = 0; OP_REQUIRES_OK_ASYNC( context, solver->GetrsBatched(adjoint_ ? CUBLAS_OP_C : CUBLAS_OP_T, n, nrhs, input_copy_ptrs_base, n, pivots_mat.data(), transposed_rhs_ptrs_base, n, &host_info, batch_size), done); OP_REQUIRES_ASYNC( context, host_info == 0, errors::InvalidArgument("The ", -host_info, "'th argument to cublas*getrsBatched had " "an illegal value."), done); } else { dev_info.push_back(solver->GetDeviceLapackInfo(batch_size, "getrs")); for (int batch = 0; batch < batch_size; ++batch) { OP_REQUIRES_OK_ASYNC( context, solver->Getrs(adjoint_ ? CUBLAS_OP_C : CUBLAS_OP_T, n, nrhs, &input_copy_reshaped(batch, 0, 0), n, &pivots_mat(batch, 0), &transposed_rhs_reshaped(batch, 0, 0), n, &dev_info.back()(batch)), done); } } // 4. Transpose X to get the final result in row-major form. if (nrhs > 1) { OP_REQUIRES_OK_ASYNC( context, DoMatrixTranspose(device, transposed_rhs, output), done); } else { device.memcpy(output->flat<Scalar>().data(), transposed_rhs.flat<Scalar>().data(), transposed_rhs.NumElements() * sizeof(Scalar)); } // Callback for checking info after kernels finish. Also capture the // temporary Tensors/ScratchSpace so they don't get deallocated before the // kernels run. TODO(rmlarsen): Use move capture once C++14 becomes // available. auto info_checker = [context, done, dev_info]( const Status& status, const std::vector<HostLapackInfo>& host_infos) { if (!status.ok() && errors::IsInvalidArgument(status) && !host_infos.empty()) { for (int i = 0; i < host_infos[0].size(); ++i) { // Match the CPU error message for singular matrices. Otherwise // just print the original error message from the status below. OP_REQUIRES_ASYNC(context, host_infos[0].data()[i] <= 0, errors::InvalidArgument(kErrMsg), done); } } OP_REQUIRES_OK_ASYNC(context, status, done); done(); }; CudaSolver::CheckLapackInfoAndDeleteSolverAsync(std::move(solver), dev_info, std::move(info_checker)); }
213027909579375149223707117272594449042
matrix_solve_op.cc
56729187938743390978402639550963026335
CWE-354
CVE-2021-41206
TensorFlow is an open source platform for machine learning. In affected versions several TensorFlow operations are missing validation for the shapes of the tensor arguments involved in the call. Depending on the API, this can result in undefined behavior and segfault or `CHECK`-fail related crashes but in some scenarios writes and reads from heap populated arrays are also possible. We have discovered these issues internally via tooling while working on improving/testing GPU op determinism. As such, we don't have reproducers and there will be multiple fixes for these issues. These fixes will be included in TensorFlow 2.7.0. We will also cherrypick these commits on TensorFlow 2.6.1, TensorFlow 2.5.2, and TensorFlow 2.4.4, as these are also affected and still in supported range.
https://nvd.nist.gov/vuln/detail/CVE-2021-41206
196,705
tensorflow
11ced8467eccad9c7cb94867708be8fa5c66c730
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/11ced8467eccad9c7cb94867708be8fa5c66c730
Fix UB in SparseTensorDenseAdd Added more input validation to avoid nullptr dereferencing and array index out of bounds issues. PiperOrigin-RevId: 446192704
1
Status ValidateInputs(const Tensor *a_indices, const Tensor *a_values, const Tensor *a_shape, const Tensor *b) { if (!TensorShapeUtils::IsMatrix(a_indices->shape())) { return errors::InvalidArgument( "Input a_indices should be a matrix but received shape: ", a_indices->shape().DebugString()); } if (!TensorShapeUtils::IsVector(a_values->shape()) || !TensorShapeUtils::IsVector(a_shape->shape())) { return errors::InvalidArgument( "Inputs a_values and a_shape should be vectors " "but received shapes: ", a_values->shape().DebugString(), " and ", a_shape->shape().DebugString()); } if (a_shape->NumElements() != b->dims()) { return errors::InvalidArgument( "Two operands have different ranks; received: ", a_shape->NumElements(), " and ", b->dims()); } const auto a_shape_flat = a_shape->flat<Index>(); for (int i = 0; i < b->dims(); ++i) { if (a_shape_flat(i) != b->dim_size(i)) { return errors::InvalidArgument( "Dimension ", i, " does not equal (no broadcasting is supported): sparse side ", a_shape_flat(i), " vs dense side ", b->dim_size(i)); } } return Status::OK(); }
308425823880781073775676879611190785715
sparse_tensor_dense_add_op.cc
91198918327439956509177796541242214319
CWE-20
CVE-2022-29206
TensorFlow is an open source platform for machine learning. Prior to versions 2.9.0, 2.8.1, 2.7.2, and 2.6.4, the implementation of `tf.raw_ops.SparseTensorDenseAdd` does not fully validate the input arguments. In this case, a reference gets bound to a `nullptr` during kernel execution. This is undefined behavior. Versions 2.9.0, 2.8.1, 2.7.2, and 2.6.4 contain a patch for this issue.
https://nvd.nist.gov/vuln/detail/CVE-2022-29206
242,926
tensorflow
11ced8467eccad9c7cb94867708be8fa5c66c730
https://github.com/tensorflow/tensorflow
https://github.com/tensorflow/tensorflow/commit/11ced8467eccad9c7cb94867708be8fa5c66c730
Fix UB in SparseTensorDenseAdd Added more input validation to avoid nullptr dereferencing and array index out of bounds issues. PiperOrigin-RevId: 446192704
0
Status ValidateInputs(const Tensor *a_indices, const Tensor *a_values, const Tensor *a_shape, const Tensor *b) { if (!TensorShapeUtils::IsMatrix(a_indices->shape())) { return errors::InvalidArgument( "Input a_indices should be a matrix but received shape: ", a_indices->shape().DebugString()); } if (!TensorShapeUtils::IsVector(a_values->shape()) || !TensorShapeUtils::IsVector(a_shape->shape())) { return errors::InvalidArgument( "Inputs a_values and a_shape should be vectors " "but received shapes: ", a_values->shape().DebugString(), " and ", a_shape->shape().DebugString()); } int64_t nnz = a_indices->dim_size(0); int64_t ndims = a_indices->dim_size(1); if (a_values->dim_size(0) != nnz) { return errors::InvalidArgument("Dimensions ", nnz, " and ", a_values->dim_size(0), " are not compatible"); } if (a_shape->dim_size(0) != ndims) { return errors::InvalidArgument("Dimensions ", ndims, " and ", a_shape->dim_size(0), " are not compatible"); } if (a_shape->NumElements() != b->dims()) { return errors::InvalidArgument( "Two operands have different ranks; received: ", a_shape->NumElements(), " and ", b->dims()); } const auto a_shape_flat = a_shape->flat<Index>(); for (int i = 0; i < b->dims(); ++i) { if (a_shape_flat(i) != b->dim_size(i)) { return errors::InvalidArgument( "Dimension ", i, " does not equal (no broadcasting is supported): sparse side ", a_shape_flat(i), " vs dense side ", b->dim_size(i)); } } // Check for invalid indices. const auto a_indices_mat = a_indices->flat_inner_dims<Index>(); for (int64_t zidx = 0; zidx < nnz; ++zidx) { for (int64_t didx = 0; didx < ndims; ++didx) { const Index idx = a_indices_mat(zidx, didx); if (idx < 0 || idx >= a_shape_flat(didx)) { return errors::InvalidArgument( "Sparse tensor has an invalid index on dimension ", didx, ": " "a_indices(", zidx, ",", didx, ") = ", idx, ", dense tensor shape: ", a_shape_flat); } } } return Status::OK(); }
310798775905628265586866839597720584207
sparse_tensor_dense_add_op.cc
106434963445193170812694157920438487376
CWE-20
CVE-2022-29206
TensorFlow is an open source platform for machine learning. Prior to versions 2.9.0, 2.8.1, 2.7.2, and 2.6.4, the implementation of `tf.raw_ops.SparseTensorDenseAdd` does not fully validate the input arguments. In this case, a reference gets bound to a `nullptr` during kernel execution. This is undefined behavior. Versions 2.9.0, 2.8.1, 2.7.2, and 2.6.4 contain a patch for this issue.
https://nvd.nist.gov/vuln/detail/CVE-2022-29206
196,860
gpac
a51f951b878c2b73c1d8e2f1518c7cdc5fb82c3f
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/a51f951b878c2b73c1d8e2f1518c7cdc5fb82c3f
fixed #1782 (fuzz)
1
GF_Err afra_box_read(GF_Box *s, GF_BitStream *bs) { unsigned int i; GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s; ISOM_DECREASE_SIZE(ptr, 9) ptr->long_ids = gf_bs_read_int(bs, 1); ptr->long_offsets = gf_bs_read_int(bs, 1); ptr->global_entries = gf_bs_read_int(bs, 1); ptr->reserved = gf_bs_read_int(bs, 5); ptr->time_scale = gf_bs_read_u32(bs); ptr->entry_count = gf_bs_read_u32(bs); if (ptr->size / ( (ptr->long_offsets ? 16 : 12) ) < ptr->entry_count) return GF_ISOM_INVALID_FILE; for (i=0; i<ptr->entry_count; i++) { GF_AfraEntry *ae = gf_malloc(sizeof(GF_AfraEntry)); if (!ae) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, 8) ae->time = gf_bs_read_u64(bs); if (ptr->long_offsets) { ISOM_DECREASE_SIZE(ptr, 8) ae->offset = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) ae->offset = gf_bs_read_u32(bs); } gf_list_insert(ptr->local_access_entries, ae, i); } if (ptr->global_entries) { ISOM_DECREASE_SIZE(ptr, 4) ptr->global_entry_count = gf_bs_read_u32(bs); for (i=0; i<ptr->global_entry_count; i++) { GF_GlobalAfraEntry *ae = gf_malloc(sizeof(GF_GlobalAfraEntry)); if (!ae) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, 8) ae->time = gf_bs_read_u64(bs); if (ptr->long_ids) { ISOM_DECREASE_SIZE(ptr, 8) ae->segment = gf_bs_read_u32(bs); ae->fragment = gf_bs_read_u32(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) ae->segment = gf_bs_read_u16(bs); ae->fragment = gf_bs_read_u16(bs); } if (ptr->long_offsets) { ISOM_DECREASE_SIZE(ptr, 16) ae->afra_offset = gf_bs_read_u64(bs); ae->offset_from_afra = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 8) ae->afra_offset = gf_bs_read_u32(bs); ae->offset_from_afra = gf_bs_read_u32(bs); } gf_list_insert(ptr->global_access_entries, ae, i); } } return GF_OK; }
312927211426500504617752335989791880756
None
CWE-787
CVE-2021-33361
Memory leak in the afra_box_read function in MP4Box in GPAC 1.0.1 allows attackers to read memory via a crafted file.
https://nvd.nist.gov/vuln/detail/CVE-2021-33361
246,693
gpac
a51f951b878c2b73c1d8e2f1518c7cdc5fb82c3f
https://github.com/gpac/gpac
https://github.com/gpac/gpac/commit/a51f951b878c2b73c1d8e2f1518c7cdc5fb82c3f
fixed #1782 (fuzz)
0
GF_Err afra_box_read(GF_Box *s, GF_BitStream *bs) { unsigned int i; GF_AdobeFragRandomAccessBox *ptr = (GF_AdobeFragRandomAccessBox *)s; ISOM_DECREASE_SIZE(ptr, 9) ptr->long_ids = gf_bs_read_int(bs, 1); ptr->long_offsets = gf_bs_read_int(bs, 1); ptr->global_entries = gf_bs_read_int(bs, 1); ptr->reserved = gf_bs_read_int(bs, 5); ptr->time_scale = gf_bs_read_u32(bs); ptr->entry_count = gf_bs_read_u32(bs); if (ptr->size / ( (ptr->long_offsets ? 16 : 12) ) < ptr->entry_count) return GF_ISOM_INVALID_FILE; for (i=0; i<ptr->entry_count; i++) { GF_AfraEntry *ae = gf_malloc(sizeof(GF_AfraEntry)); if (!ae) return GF_OUT_OF_MEM; gf_list_insert(ptr->local_access_entries, ae, i); ISOM_DECREASE_SIZE(ptr, 8) ae->time = gf_bs_read_u64(bs); if (ptr->long_offsets) { ISOM_DECREASE_SIZE(ptr, 8) ae->offset = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) ae->offset = gf_bs_read_u32(bs); } } if (ptr->global_entries) { ISOM_DECREASE_SIZE(ptr, 4) ptr->global_entry_count = gf_bs_read_u32(bs); for (i=0; i<ptr->global_entry_count; i++) { GF_GlobalAfraEntry *ae = gf_malloc(sizeof(GF_GlobalAfraEntry)); if (!ae) return GF_OUT_OF_MEM; gf_list_insert(ptr->global_access_entries, ae, i); ISOM_DECREASE_SIZE(ptr, 8) ae->time = gf_bs_read_u64(bs); if (ptr->long_ids) { ISOM_DECREASE_SIZE(ptr, 8) ae->segment = gf_bs_read_u32(bs); ae->fragment = gf_bs_read_u32(bs); } else { ISOM_DECREASE_SIZE(ptr, 4) ae->segment = gf_bs_read_u16(bs); ae->fragment = gf_bs_read_u16(bs); } if (ptr->long_offsets) { ISOM_DECREASE_SIZE(ptr, 16) ae->afra_offset = gf_bs_read_u64(bs); ae->offset_from_afra = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 8) ae->afra_offset = gf_bs_read_u32(bs); ae->offset_from_afra = gf_bs_read_u32(bs); } } } return GF_OK; }
301022642870722199968022360468016167215
None
CWE-787
CVE-2021-33361
Memory leak in the afra_box_read function in MP4Box in GPAC 1.0.1 allows attackers to read memory via a crafted file.
https://nvd.nist.gov/vuln/detail/CVE-2021-33361
196,893
envoy
e9f936d85dc1edc34fabd0a1725ec180f2316353
https://github.com/istio/envoy
https://github.com/envoyproxy/envoy/commit/e9f936d85dc1edc34fabd0a1725ec180f2316353
CVE-2022-21654 tls allows re-use when some cert validation settings have changed Signed-off-by: Yan Avlasov <yavlasov@google.com>
1
void DefaultCertValidator::updateDigestForSessionId(bssl::ScopedEVP_MD_CTX& md, uint8_t hash_buffer[EVP_MAX_MD_SIZE], unsigned hash_length) { int rc; // Hash all the settings that affect whether the server will allow/accept // the client connection. This ensures that the client is always validated against // the correct settings, even if session resumption across different listeners // is enabled. if (ca_cert_ != nullptr) { rc = X509_digest(ca_cert_.get(), EVP_sha256(), hash_buffer, &hash_length); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); RELEASE_ASSERT(hash_length == SHA256_DIGEST_LENGTH, fmt::format("invalid SHA256 hash length {}", hash_length)); rc = EVP_DigestUpdate(md.get(), hash_buffer, hash_length); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } for (const auto& hash : verify_certificate_hash_list_) { rc = EVP_DigestUpdate(md.get(), hash.data(), hash.size() * sizeof(std::remove_reference<decltype(hash)>::type::value_type)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } for (const auto& hash : verify_certificate_spki_list_) { rc = EVP_DigestUpdate(md.get(), hash.data(), hash.size() * sizeof(std::remove_reference<decltype(hash)>::type::value_type)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } }
140411723524972581248505271605198365196
default_validator.cc
12469299547216327731154844869138538271
CWE-362
CVE-2022-21654
Envoy is an open source edge and service proxy, designed for cloud-native applications. Envoy's tls allows re-use when some cert validation settings have changed from their default configuration. The only workaround for this issue is to ensure that default tls settings are used. Users are advised to upgrade.
https://nvd.nist.gov/vuln/detail/CVE-2022-21654
247,550
envoy
e9f936d85dc1edc34fabd0a1725ec180f2316353
https://github.com/istio/envoy
https://github.com/envoyproxy/envoy/commit/e9f936d85dc1edc34fabd0a1725ec180f2316353
CVE-2022-21654 tls allows re-use when some cert validation settings have changed Signed-off-by: Yan Avlasov <yavlasov@google.com>
0
void DefaultCertValidator::updateDigestForSessionId(bssl::ScopedEVP_MD_CTX& md, uint8_t hash_buffer[EVP_MAX_MD_SIZE], unsigned hash_length) { int rc; // Hash all the settings that affect whether the server will allow/accept // the client connection. This ensures that the client is always validated against // the correct settings, even if session resumption across different listeners // is enabled. if (ca_cert_ != nullptr) { rc = X509_digest(ca_cert_.get(), EVP_sha256(), hash_buffer, &hash_length); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); RELEASE_ASSERT(hash_length == SHA256_DIGEST_LENGTH, fmt::format("invalid SHA256 hash length {}", hash_length)); rc = EVP_DigestUpdate(md.get(), hash_buffer, hash_length); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } for (const auto& hash : verify_certificate_hash_list_) { rc = EVP_DigestUpdate(md.get(), hash.data(), hash.size() * sizeof(std::remove_reference<decltype(hash)>::type::value_type)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } for (const auto& hash : verify_certificate_spki_list_) { rc = EVP_DigestUpdate(md.get(), hash.data(), hash.size() * sizeof(std::remove_reference<decltype(hash)>::type::value_type)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } rc = EVP_DigestUpdate(md.get(), &verify_trusted_ca_, sizeof(verify_trusted_ca_)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); if (config_ != nullptr) { for (const auto& matcher : config_->subjectAltNameMatchers()) { size_t hash = MessageUtil::hash(matcher); rc = EVP_DigestUpdate(md.get(), &hash, sizeof(hash)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } const std::string& crl = config_->certificateRevocationList(); if (!crl.empty()) { rc = EVP_DigestUpdate(md.get(), crl.data(), crl.length()); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } bool allow_expired = config_->allowExpiredCertificate(); rc = EVP_DigestUpdate(md.get(), &allow_expired, sizeof(allow_expired)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); auto trust_chain_verification = config_->trustChainVerification(); rc = EVP_DigestUpdate(md.get(), &trust_chain_verification, sizeof(trust_chain_verification)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); auto only_leaf_crl = config_->onlyVerifyLeafCertificateCrl(); rc = EVP_DigestUpdate(md.get(), &only_leaf_crl, sizeof(only_leaf_crl)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } }
325686322374262257116614301181972043862
default_validator.cc
261316308158712382898480668520478060002
CWE-362
CVE-2022-21654
Envoy is an open source edge and service proxy, designed for cloud-native applications. Envoy's tls allows re-use when some cert validation settings have changed from their default configuration. The only workaround for this issue is to ensure that default tls settings are used. Users are advised to upgrade.
https://nvd.nist.gov/vuln/detail/CVE-2022-21654
197,111
tinyexr
a685e3332f61cd4e59324bf3f669d36973d64270
https://github.com/syoyo/tinyexr
https://github.com/syoyo/tinyexr/commit/a685e3332f61cd4e59324bf3f669d36973d64270
Make line_no with too large value(2**20) invalid. Fixes #124
1
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; if ((data_width < 0) || (data_height < 0)) { if (err) { std::stringstream ss; ss << "Invalid data width or data height: " << data_width << ", " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if ((data_width > threshold) || (data_height > threshold)) { if (err) { std::stringstream ss; ss << "data_with or data_height too large. data_width: " << data_width << ", " << "data_height = " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } } size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { // value check if (exr_header->tile_size_x < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_size_y < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { if (err) { (*err) += "Insufficient data size.\n"; } return TINYEXR_ERROR_INVALID_DATA; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } if (tile_coordinates[3] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { if (err) { (*err) += "Insufficient data length.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; exr_image->num_tiles = static_cast<int>(num_tiles); } } else { // scanline format // Don't allow too large image(256GB * pixel_data_size or more). Workaround // for #104. size_t total_data_len = size_t(data_width) * size_t(data_height) * size_t(num_channels); const bool total_data_len_overflown = sizeof(void*) == 8 ? (total_data_len >= 0x4000000000) : false; if ((total_data_len == 0) || total_data_len_overflown ) { if (err) { std::stringstream ss; ss << "Image data size is zero or too large: width = " << data_width << ", height = " << data_height << ", channels = " << num_channels << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else if (data_len == 0) { // TODO(syoyo): May be ok to raise the threshold for example `data_len // < 4` invalid_data = true; } else { // line_no may be negative. int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y // overflow check tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]); if (lno > std::numeric_limits<int>::max()) { line_no = -1; // invalid } else if (lno < -std::numeric_limits<int>::max()) { line_no = -1; // invalid } else { line_no -= exr_header->data_window[1]; } if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } } // omp parallel } if (invalid_data) { if (err) { std::stringstream ss; (*err) += "Invalid data found when decoding pixels.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; }
204576660378471471312302041175468111939
tinyexr.h
28581937103314011160798682220091501322
CWE-20
CVE-2020-19490
tinyexr 0.9.5 has a integer overflow over-write in tinyexr::DecodePixelData in tinyexr.h, related to OpenEXR code.
https://nvd.nist.gov/vuln/detail/CVE-2020-19490
252,437
tinyexr
a685e3332f61cd4e59324bf3f669d36973d64270
https://github.com/syoyo/tinyexr
https://github.com/syoyo/tinyexr/commit/a685e3332f61cd4e59324bf3f669d36973d64270
Make line_no with too large value(2**20) invalid. Fixes #124
0
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; if ((data_width < 0) || (data_height < 0)) { if (err) { std::stringstream ss; ss << "Invalid data width or data height: " << data_width << ", " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if ((data_width > threshold) || (data_height > threshold)) { if (err) { std::stringstream ss; ss << "data_with or data_height too large. data_width: " << data_width << ", " << "data_height = " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } } size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { // value check if (exr_header->tile_size_x < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_size_y < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { if (err) { (*err) += "Insufficient data size.\n"; } return TINYEXR_ERROR_INVALID_DATA; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } if (tile_coordinates[3] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { if (err) { (*err) += "Insufficient data length.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; exr_image->num_tiles = static_cast<int>(num_tiles); } } else { // scanline format // Don't allow too large image(256GB * pixel_data_size or more). Workaround // for #104. size_t total_data_len = size_t(data_width) * size_t(data_height) * size_t(num_channels); const bool total_data_len_overflown = sizeof(void*) == 8 ? (total_data_len >= 0x4000000000) : false; if ((total_data_len == 0) || total_data_len_overflown ) { if (err) { std::stringstream ss; ss << "Image data size is zero or too large: width = " << data_width << ", height = " << data_height << ", channels = " << num_channels << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) { // Too large value. Assume this is invalid // 2**20 = 1048576 = heuristic value. invalid_data = true; } else if (data_len == 0) { // TODO(syoyo): May be ok to raise the threshold for example `data_len // < 4` invalid_data = true; } else { // line_no may be negative. int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y // overflow check tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]); if (lno > std::numeric_limits<int>::max()) { line_no = -1; // invalid } else if (lno < -std::numeric_limits<int>::max()) { line_no = -1; // invalid } else { line_no -= exr_header->data_window[1]; } if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } } // omp parallel } if (invalid_data) { if (err) { std::stringstream ss; (*err) += "Invalid data found when decoding pixels.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; }
151063885227112731774220545683650954280
tinyexr.h
117099710183052888477970990927407043374
CWE-20
CVE-2020-19490
tinyexr 0.9.5 has a integer overflow over-write in tinyexr::DecodePixelData in tinyexr.h, related to OpenEXR code.
https://nvd.nist.gov/vuln/detail/CVE-2020-19490
197,128
mruby
f72315575f78a9a773adbce0ee7d3ec33434cb76
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/f72315575f78a9a773adbce0ee7d3ec33434cb76
codegen.c: fix a argument generation bug in array assignment.
1
gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val) { int idx; int type = nint(tree->car); switch (type) { case NODE_GVAR: case NODE_ARG: case NODE_LVAR: case NODE_IVAR: case NODE_CVAR: case NODE_CONST: case NODE_NIL: case NODE_MASGN: if (rhs) { codegen(s, rhs, VAL); pop(); sp = cursp(); } break; case NODE_COLON2: case NODE_CALL: case NODE_SCALL: /* keep evaluation order */ break; case NODE_NVAR: codegen_error(s, "Can't assign to numbered parameter"); break; default: codegen_error(s, "unknown lhs"); break; } tree = tree->cdr; switch (type) { case NODE_GVAR: gen_setxv(s, OP_SETGV, sp, nsym(tree), val); break; case NODE_ARG: case NODE_LVAR: idx = lv_idx(s, nsym(tree)); if (idx > 0) { if (idx != sp) { gen_move(s, idx, sp, val); } break; } else { /* upvar */ gen_setupvar(s, sp, nsym(tree)); } break; case NODE_IVAR: gen_setxv(s, OP_SETIV, sp, nsym(tree), val); break; case NODE_CVAR: gen_setxv(s, OP_SETCV, sp, nsym(tree), val); break; case NODE_CONST: gen_setxv(s, OP_SETCONST, sp, nsym(tree), val); break; case NODE_COLON2: if (sp) { gen_move(s, cursp(), sp, 0); } sp = cursp(); push(); codegen(s, tree->car, VAL); if (rhs) { codegen(s, rhs, VAL); pop(); gen_move(s, sp, cursp(), 0); } pop_n(2); idx = new_sym(s, nsym(tree->cdr)); genop_2(s, OP_SETMCNST, sp, idx); break; case NODE_CALL: case NODE_SCALL: { int noself = 0, safe = (type == NODE_SCALL), skip = 0, top, call, n = 0; mrb_sym mid = nsym(tree->cdr->car); top = cursp(); if (val || sp == cursp()) { push(); /* room for retval */ } call = cursp(); if (!tree->car) { noself = 1; push(); } else { codegen(s, tree->car, VAL); /* receiver */ } if (safe) { int recv = cursp()-1; gen_move(s, cursp(), recv, 1); skip = genjmp2_0(s, OP_JMPNIL, cursp(), val); } tree = tree->cdr->cdr->car; if (tree) { if (tree->car) { /* positional arguments */ n = gen_values(s, tree->car, VAL, (tree->cdr->car)?13:14); if (n < 0) { /* variable length */ n = 15; push(); } } if (tree->cdr->car) { /* keyword arguments */ if (n == 14) { pop_n(n); genop_2(s, OP_ARRAY, cursp(), n); push(); n = 15; } gen_hash(s, tree->cdr->car->cdr, VAL, 0); if (n < 14) { n++; } else { pop_n(2); genop_2(s, OP_ARYPUSH, cursp(), 1); } push(); } } if (rhs) { codegen(s, rhs, VAL); pop(); } else { gen_move(s, cursp(), sp, 0); } if (val) { gen_move(s, top, cursp(), 1); } if (n < 14) { n++; } else { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } s->sp = call; if (mid == MRB_OPSYM_2(s->mrb, aref) && n == 2) { genop_1(s, OP_SETIDX, cursp()); } else { genop_3(s, noself ? OP_SSEND : OP_SEND, cursp(), new_sym(s, attrsym(s, mid)), n); } if (safe) { dispatch(s, skip); } s->sp = top; } break; case NODE_MASGN: gen_vmassignment(s, tree->car, sp, val); break; /* splat without assignment */ case NODE_NIL: break; default: codegen_error(s, "unknown lhs"); break; } if (val) push(); }
236078569306136776334536654321578023921
codegen.c
69656694646846748382204460208931803734
CWE-125
CVE-2022-0717
Out-of-bounds Read in GitHub repository mruby/mruby prior to 3.2.
https://nvd.nist.gov/vuln/detail/CVE-2022-0717
253,515
mruby
f72315575f78a9a773adbce0ee7d3ec33434cb76
https://github.com/mruby/mruby
https://github.com/mruby/mruby/commit/f72315575f78a9a773adbce0ee7d3ec33434cb76
codegen.c: fix a argument generation bug in array assignment.
0
gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val) { int idx; int type = nint(tree->car); switch (type) { case NODE_GVAR: case NODE_ARG: case NODE_LVAR: case NODE_IVAR: case NODE_CVAR: case NODE_CONST: case NODE_NIL: case NODE_MASGN: if (rhs) { codegen(s, rhs, VAL); pop(); sp = cursp(); } break; case NODE_COLON2: case NODE_CALL: case NODE_SCALL: /* keep evaluation order */ break; case NODE_NVAR: codegen_error(s, "Can't assign to numbered parameter"); break; default: codegen_error(s, "unknown lhs"); break; } tree = tree->cdr; switch (type) { case NODE_GVAR: gen_setxv(s, OP_SETGV, sp, nsym(tree), val); break; case NODE_ARG: case NODE_LVAR: idx = lv_idx(s, nsym(tree)); if (idx > 0) { if (idx != sp) { gen_move(s, idx, sp, val); } break; } else { /* upvar */ gen_setupvar(s, sp, nsym(tree)); } break; case NODE_IVAR: gen_setxv(s, OP_SETIV, sp, nsym(tree), val); break; case NODE_CVAR: gen_setxv(s, OP_SETCV, sp, nsym(tree), val); break; case NODE_CONST: gen_setxv(s, OP_SETCONST, sp, nsym(tree), val); break; case NODE_COLON2: if (sp) { gen_move(s, cursp(), sp, 0); } sp = cursp(); push(); codegen(s, tree->car, VAL); if (rhs) { codegen(s, rhs, VAL); pop(); gen_move(s, sp, cursp(), 0); } pop_n(2); idx = new_sym(s, nsym(tree->cdr)); genop_2(s, OP_SETMCNST, sp, idx); break; case NODE_CALL: case NODE_SCALL: { int noself = 0, safe = (type == NODE_SCALL), skip = 0, top, call, n = 0; mrb_sym mid = nsym(tree->cdr->car); top = cursp(); if (val || sp == cursp()) { push(); /* room for retval */ } call = cursp(); if (!tree->car) { noself = 1; push(); } else { codegen(s, tree->car, VAL); /* receiver */ } if (safe) { int recv = cursp()-1; gen_move(s, cursp(), recv, 1); skip = genjmp2_0(s, OP_JMPNIL, cursp(), val); } tree = tree->cdr->cdr->car; if (tree) { if (tree->car) { /* positional arguments */ n = gen_values(s, tree->car, VAL, (tree->cdr->car)?13:14); if (n < 0) { /* variable length */ n = 15; push(); } } if (tree->cdr->car) { /* keyword arguments */ if (n == 14) { pop_n(n); genop_2(s, OP_ARRAY, cursp(), n); push(); n = 15; } gen_hash(s, tree->cdr->car->cdr, VAL, 0); if (n < 14) { n++; } else { pop_n(2); genop_2(s, OP_ARYPUSH, cursp(), 1); } push(); } } if (rhs) { codegen(s, rhs, VAL); pop(); } else { gen_move(s, cursp(), sp, 0); } if (val) { gen_move(s, top, cursp(), 1); } if (n < 15) { n++; if (n == 15) { pop_n(14); genop_2(s, OP_ARRAY, cursp(), 15); } } else { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } s->sp = call; if (mid == MRB_OPSYM_2(s->mrb, aref) && n == 2) { genop_1(s, OP_SETIDX, cursp()); } else { genop_3(s, noself ? OP_SSEND : OP_SEND, cursp(), new_sym(s, attrsym(s, mid)), n); } if (safe) { dispatch(s, skip); } s->sp = top; } break; case NODE_MASGN: gen_vmassignment(s, tree->car, sp, val); break; /* splat without assignment */ case NODE_NIL: break; default: codegen_error(s, "unknown lhs"); break; } if (val) push(); }
24141483316666647152319474982009169945
None
CWE-125
CVE-2022-0717
Out-of-bounds Read in GitHub repository mruby/mruby prior to 3.2.
https://nvd.nist.gov/vuln/detail/CVE-2022-0717