\r
--------------------------\r
Changes in 1.9 (not yet released)\r
+- Fix CPLYMeshFileLoader checking for wrong vertex count when switching between 16/32 bit. Thanks @randomMesh for reporting.\r
- Fix bug that AnimatedMeshSceneNode ignored ReadOnlyMaterials flag when checking materials for transparent render passes.\r
- Unify checks if materials should use transparent render pass with new IVideoDriver::needsTransparentRenderPass function.\r
- Material.ZWriteEnable is now of type E_ZWRITE instead of bool. This allows now setting materials to always "on" independent of material type and transparency.\r
++i;\r
if ( mipSize.Width == 1 && mipSize.Height == 1 && i < mipLevel)\r
return 0;\r
- } \r
+ }\r
\r
return MipMapsData + dataSize;\r
}\r
- \r
+\r
return 0;\r
}\r
\r
return imageSize;\r
}\r
\r
+// Define to check for all compressed image formats cases in a switch\r
+#define IRR_CASE_IIMAGE_COMPRESSED_FORMAT\\r
+ case ECF_DXT1:\\r
+ case ECF_DXT2:\\r
+ case ECF_DXT3:\\r
+ case ECF_DXT4:\\r
+ case ECF_DXT5:\\r
+ case ECF_PVRTC_RGB2:\\r
+ case ECF_PVRTC_ARGB2:\\r
+ case ECF_PVRTC2_ARGB2:\\r
+ case ECF_PVRTC_RGB4:\\r
+ case ECF_PVRTC_ARGB4:\\r
+ case ECF_PVRTC2_ARGB4:\\r
+ case ECF_ETC1:\\r
+ case ECF_ETC2_RGB:\\r
+ case ECF_ETC2_ARGB:\r
+\r
//! check if this is compressed color format\r
static bool isCompressedFormat(const ECOLOR_FORMAT format)\r
{\r
switch(format)\r
{\r
- case ECF_DXT1:\r
- case ECF_DXT2:\r
- case ECF_DXT3:\r
- case ECF_DXT4:\r
- case ECF_DXT5:\r
- case ECF_PVRTC_RGB2:\r
- case ECF_PVRTC_ARGB2:\r
- case ECF_PVRTC2_ARGB2:\r
- case ECF_PVRTC_RGB4:\r
- case ECF_PVRTC_ARGB4:\r
- case ECF_PVRTC2_ARGB4:\r
- case ECF_ETC1:\r
- case ECF_ETC2_RGB:\r
- case ECF_ETC2_ARGB:\r
+ IRR_CASE_IIMAGE_COMPRESSED_FORMAT\r
return true;\r
default:\r
return false;\r
case ECF_R8G8B8:\r
convert_A1R5G5B5toR8G8B8(sP, sN, dP);\r
break;\r
+ IRR_CASE_IIMAGE_COMPRESSED_FORMAT\r
+ os::Printer::log("CColorConverter::convert_viaFormat method doesn't support compressed images.", ELL_WARNING);\r
+ break;\r
+#ifndef _DEBUG\r
default:\r
break;\r
+#endif\r
}\r
break;\r
case ECF_R5G6B5:\r
case ECF_R8G8B8:\r
convert_R5G6B5toR8G8B8(sP, sN, dP);\r
break;\r
+ IRR_CASE_IIMAGE_COMPRESSED_FORMAT\r
+ os::Printer::log("CColorConverter::convert_viaFormat method doesn't support compressed images.", ELL_WARNING);\r
+ break;\r
+#ifndef _DEBUG\r
default:\r
break;\r
+#endif\r
}\r
break;\r
case ECF_A8R8G8B8:\r
case ECF_R8G8B8:\r
convert_A8R8G8B8toR8G8B8(sP, sN, dP);\r
break;\r
+ IRR_CASE_IIMAGE_COMPRESSED_FORMAT\r
+ os::Printer::log("CColorConverter::convert_viaFormat method doesn't support compressed images.", ELL_WARNING);\r
+ break;\r
+#ifndef _DEBUG\r
default:\r
break;\r
+#endif\r
}\r
break;\r
case ECF_R8G8B8:\r
case ECF_R8G8B8:\r
convert_R8G8B8toR8G8B8(sP, sN, dP);\r
break;\r
+ IRR_CASE_IIMAGE_COMPRESSED_FORMAT\r
+ os::Printer::log("CColorConverter::convert_viaFormat method doesn't support compressed images.", ELL_WARNING);\r
+ break;\r
+#ifndef _DEBUG\r
default:\r
break;\r
+#endif\r
}\r
break;\r
+ IRR_CASE_IIMAGE_COMPRESSED_FORMAT\r
+ os::Printer::log("CColorConverter::convert_viaFormat method doesn't support compressed images.", ELL_WARNING);\r
+ break;\r
#ifndef _DEBUG\r
default:\r
break;\r
const io::path& name,\r
const ECOLOR_FORMAT format)\r
{\r
+ if ( IImage::isCompressedFormat(format) )\r
+ return 0;\r
+\r
CD3D9Texture* tex = new CD3D9Texture(this, size, name, ETT_2D, format);\r
if (tex)\r
{\r
ITexture* CD3D9Driver::addRenderTargetTextureCubemap(const irr::u32 sideLen,\r
const io::path& name, const ECOLOR_FORMAT format)\r
{\r
+ if ( IImage::isCompressedFormat(format) )\r
+ return 0;\r
+\r
CD3D9Texture* tex = new CD3D9Texture(this, core::dimension2d<u32>(sideLen, sideLen), name, ETT_CUBEMAP, format);\r
if (tex)\r
{\r
//! sets a pixel\r
void CImage::setPixel(u32 x, u32 y, const SColor &color, bool blend)\r
{\r
- if (IImage::isCompressedFormat(Format))\r
- {\r
- os::Printer::log("IImage::setPixel method doesn't work with compressed images.", ELL_WARNING);\r
- return;\r
- }\r
-\r
if (x >= Size.Width || y >= Size.Height)\r
return;\r
\r
u32 * dest = (u32*) (Data + ( y * Pitch ) + ( x << 2 ));\r
*dest = blend ? PixelBlend32 ( *dest, color.color ) : color.color;\r
} break;\r
+\r
+ IRR_CASE_IIMAGE_COMPRESSED_FORMAT\r
+ os::Printer::log("IImage::setPixel method doesn't work with compressed images.", ELL_WARNING);\r
+ return;\r
+\r
+ case ECF_UNKNOWN:\r
+ os::Printer::log("IImage::setPixel unknown format.", ELL_WARNING);\r
+ return;\r
+\r
default:\r
break;\r
}\r
//! returns a pixel\r
SColor CImage::getPixel(u32 x, u32 y) const\r
{\r
- if (IImage::isCompressedFormat(Format))\r
- {\r
- os::Printer::log("IImage::getPixel method doesn't work with compressed images.", ELL_WARNING);\r
- return SColor(0);\r
- }\r
-\r
if (x >= Size.Width || y >= Size.Height)\r
return SColor(0);\r
\r
u8* p = Data+(y*3)*Size.Width + (x*3);\r
return SColor(255,p[0],p[1],p[2]);\r
}\r
+\r
+ IRR_CASE_IIMAGE_COMPRESSED_FORMAT\r
+ os::Printer::log("IImage::getPixel method doesn't work with compressed images.", ELL_WARNING);\r
+ break;\r
+\r
+ case ECF_UNKNOWN:\r
+ os::Printer::log("IImage::getPixel unknown format.", ELL_WARNING);\r
+ break;\r
+\r
default:\r
break;\r
}\r
return;\r
}\r
\r
- if ( !Blit(BLITTER_TEXTURE, target, 0, &pos, this, 0, 0) \r
+ if ( !Blit(BLITTER_TEXTURE, target, 0, &pos, this, 0, 0)\r
&& target && pos.X == 0 && pos.Y == 0 &&\r
CColorConverter::canConvertFormat(Format, target->getColorFormat()) )\r
{\r
#include "IWriteFile.h"\r
#include "CColorConverter.h"\r
#include "irrString.h"\r
+#include "os.h"\r
\r
namespace irr\r
{\r
= CColorConverter::convert_R5G6B5toR8G8B8;\r
break;\r
default:\r
+ os::Printer::log("CImageWriterBMP does not support image format", ColorFormatNames[image->getColorFormat()], ELL_WARNING);\r
break;\r
}\r
\r
#include "IWriteFile.h"\r
#include "CImage.h"\r
#include "irrString.h"\r
+#include "os.h"\r
\r
#ifdef _IRR_COMPILE_WITH_LIBJPEG_\r
#include <stdio.h> // required for jpeglib.h\r
format = CColorConverter::convert_R5G6B5toR8G8B8;\r
break;\r
default:\r
+ os::Printer::log("writeJPEGFile does not support image format", ColorFormatNames[image->getColorFormat()], ELL_WARNING);\r
break;\r
}\r
\r
break;\r
// TODO: Error handling in case of unsupported color format\r
default:\r
- break;\r
+ os::Printer::log("CImageWriterPNG does not support image format", ColorFormatNames[image->getColorFormat()], ELL_WARNING);\r
+ png_destroy_write_struct(&png_ptr, &info_ptr);\r
+ delete [] tmpImage;\r
+ return false;\r
}\r
\r
// Create array of pointers to rows in image data\r
#include "IWriteFile.h"\r
#include "CColorConverter.h"\r
#include "irrString.h"\r
+#include "os.h"\r
\r
namespace irr\r
{\r
imageHeader.ImageDescriptor |= 0;\r
break;\r
default:\r
+ os::Printer::log("CImageWriterTGA does not support image format", ColorFormatNames[image->getColorFormat()], ELL_WARNING);\r
break;\r
}\r
\r
AssignedDepth = false;\r
AssignedStencil = false;\r
}\r
+#ifdef _DEBUG\r
+ Driver->testGLError(__LINE__);\r
+#endif\r
\r
RequestDepthStencilUpdate = false;\r
}\r
statesCache.IsCached = false;\r
\r
#ifdef GL_VERSION_2_1\r
- if (Version >= 210)\r
+ if (Version >= 201)\r
{\r
if (!statesCache.IsCached || material.TextureLayer[i].LODBias != statesCache.LODBias)\r
{\r
ITexture* COpenGLDriver::addRenderTargetTexture(const core::dimension2d<u32>& size,\r
const io::path& name, const ECOLOR_FORMAT format)\r
{\r
+ if ( IImage::isCompressedFormat(format) )\r
+ return 0;\r
+\r
//disable mip-mapping\r
bool generateMipLevels = getTextureCreationFlag(ETCF_CREATE_MIP_MAPS);\r
setTextureCreationFlag(ETCF_CREATE_MIP_MAPS, false);\r
//! Creates a render target texture for a cubemap\r
ITexture* COpenGLDriver::addRenderTargetTextureCubemap(const irr::u32 sideLen, const io::path& name, const ECOLOR_FORMAT format)\r
{\r
+ if ( IImage::isCompressedFormat(format) )\r
+ return 0;\r
+\r
//disable mip-mapping\r
bool generateMipLevels = getTextureCreationFlag(ETCF_CREATE_MIP_MAPS);\r
setTextureCreationFlag(ETCF_CREATE_MIP_MAPS, false);\r
pixelType = GL_UNSIGNED_INT_8_8_8_8_REV;\r
break;\r
case ECF_DXT1:\r
- supported = true;\r
- internalFormat = GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;\r
- pixelFormat = GL_BGRA_EXT;\r
- pixelType = GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;\r
+ if (queryOpenGLFeature(COpenGLExtensionHandler::IRR_EXT_texture_compression_s3tc))\r
+ {\r
+ supported = true;\r
+ internalFormat = GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;\r
+ pixelFormat = GL_BGRA_EXT;\r
+ pixelType = GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;\r
+ }\r
break;\r
case ECF_DXT2:\r
case ECF_DXT3:\r
pGlGetUniformLocationARB(0), pGlGetUniformLocation(0),\r
pGlUniform1fvARB(0), pGlUniform2fvARB(0), pGlUniform3fvARB(0), pGlUniform4fvARB(0),\r
pGlUniform1ivARB(0), pGlUniform2ivARB(0), pGlUniform3ivARB(0), pGlUniform4ivARB(0),\r
- pGlUniformMatrix2fvARB(0), pGlUniformMatrix3fvARB(0), pGlUniformMatrix4fvARB(0),\r
+ pGlUniformMatrix2fvARB(0), pGlUniformMatrix2x3fv(0), pGlUniformMatrix2x4fv(0),\r
+ pGlUniformMatrix3x2fv(0), pGlUniformMatrix3fvARB(0), pGlUniformMatrix3x4fv(0),\r
+ pGlUniformMatrix4x2fv(0), pGlUniformMatrix4x3fv(0), pGlUniformMatrix4fvARB(0),\r
pGlGetActiveUniformARB(0), pGlGetActiveUniform(0),\r
pGlPointParameterfARB(0), pGlPointParameterfvARB(0),\r
pGlStencilFuncSeparate(0), pGlStencilOpSeparate(0),\r
pGlBlendFuncIndexedAMD(0), pGlBlendFunciARB(0), pGlBlendFuncSeparateIndexedAMD(0), pGlBlendFuncSeparateiARB(0),\r
pGlBlendEquationIndexedAMD(0), pGlBlendEquationiARB(0), pGlBlendEquationSeparateIndexedAMD(0), pGlBlendEquationSeparateiARB(0),\r
// DSA\r
- pGlTextureStorage2D(0), pGlTextureStorage3D(0), pGlTextureSubImage2D(0), pGlNamedFramebufferTexture(0),\r
+ pGlTextureStorage2D(0), pGlTextureStorage3D(0), pGlTextureSubImage2D(0), pGlGetTextureImage(0), pGlNamedFramebufferTexture(0),\r
pGlTextureParameteri(0), pGlCreateTextures(0), pGlCreateFramebuffers(0), pGlBindTextures(0), pGlGenerateTextureMipmap(0),\r
// DSA with EXT or functions to simulate it\r
- pGlTextureSubImage2DEXT(0), pGlTextureStorage2DEXT(0), pGlTexStorage2D(0), pGlTextureStorage3DEXT(0),\r
- pGlTexStorage3D(0), pGlNamedFramebufferTextureEXT(0), pGlFramebufferTexture(0), pGlGenerateTextureMipmapEXT(0)\r
+ pGlTextureStorage2DEXT(0), pGlTexStorage2D(0), pGlTextureStorage3DEXT(0), pGlTexStorage3D(0), pGlTextureSubImage2DEXT(0), pGlGetTextureImageEXT(0),\r
+ pGlNamedFramebufferTextureEXT(0), pGlFramebufferTexture(0), pGlGenerateTextureMipmapEXT(0)\r
#if defined(GLX_SGI_swap_control)\r
,pGlxSwapIntervalSGI(0)\r
#endif\r
pGlUniform3ivARB = (PFNGLUNIFORM3IVARBPROC) IRR_OGL_LOAD_EXTENSION("glUniform3ivARB");\r
pGlUniform4ivARB = (PFNGLUNIFORM4IVARBPROC) IRR_OGL_LOAD_EXTENSION("glUniform4ivARB");\r
pGlUniformMatrix2fvARB = (PFNGLUNIFORMMATRIX2FVARBPROC) IRR_OGL_LOAD_EXTENSION("glUniformMatrix2fvARB");\r
+ pGlUniformMatrix2x3fv = (PFNGLUNIFORMMATRIX2X3FVPROC) IRR_OGL_LOAD_EXTENSION("glUniformMatrix2x3fv");\r
+ pGlUniformMatrix2x4fv = (PFNGLUNIFORMMATRIX2X4FVPROC)IRR_OGL_LOAD_EXTENSION("glUniformMatrix2x4fv");\r
+ pGlUniformMatrix3x2fv = (PFNGLUNIFORMMATRIX3X2FVPROC)IRR_OGL_LOAD_EXTENSION("glUniformMatrix3x2fv");\r
pGlUniformMatrix3fvARB = (PFNGLUNIFORMMATRIX3FVARBPROC) IRR_OGL_LOAD_EXTENSION("glUniformMatrix3fvARB");\r
+ pGlUniformMatrix3x4fv = (PFNGLUNIFORMMATRIX3X4FVPROC)IRR_OGL_LOAD_EXTENSION("glUniformMatrix3x4fv");\r
+ pGlUniformMatrix4x2fv = (PFNGLUNIFORMMATRIX4X2FVPROC)IRR_OGL_LOAD_EXTENSION("glUniformMatrix4x2fv");\r
+ pGlUniformMatrix4x3fv = (PFNGLUNIFORMMATRIX4X3FVPROC)IRR_OGL_LOAD_EXTENSION("glUniformMatrix4x3fv");\r
pGlUniformMatrix4fvARB = (PFNGLUNIFORMMATRIX4FVARBPROC) IRR_OGL_LOAD_EXTENSION("glUniformMatrix4fvARB");\r
pGlGetActiveUniformARB = (PFNGLGETACTIVEUNIFORMARBPROC) IRR_OGL_LOAD_EXTENSION("glGetActiveUniformARB");\r
pGlGetActiveUniform = (PFNGLGETACTIVEUNIFORMPROC) IRR_OGL_LOAD_EXTENSION("glGetActiveUniform");\r
pGlBlendEquationSeparateIndexedAMD = (PFNGLBLENDEQUATIONSEPARATEINDEXEDAMDPROC) IRR_OGL_LOAD_EXTENSION("glBlendEquationSeparateIndexedAMD");\r
pGlBlendEquationSeparateiARB = (PFNGLBLENDEQUATIONSEPARATEIPROC) IRR_OGL_LOAD_EXTENSION("glBlendEquationSeparateiARB");\r
\r
- pGlTextureSubImage2D = (PFNGLTEXTURESUBIMAGE2DPROC)IRR_OGL_LOAD_EXTENSION("glTextureSubImage2D");\r
pGlTextureStorage2D = (PFNGLTEXTURESTORAGE2DPROC) IRR_OGL_LOAD_EXTENSION("glTextureStorage2D");\r
pGlTextureStorage3D = (PFNGLTEXTURESTORAGE3DPROC) IRR_OGL_LOAD_EXTENSION("glTextureStorage3D");\r
+ pGlTextureSubImage2D = (PFNGLTEXTURESUBIMAGE2DPROC)IRR_OGL_LOAD_EXTENSION("glTextureSubImage2D");\r
+ pGlGetTextureImage = (PFNGLGETTEXTUREIMAGEPROC)IRR_OGL_LOAD_EXTENSION("glGetTextureImage");\r
pGlNamedFramebufferTexture = (PFNGLNAMEDFRAMEBUFFERTEXTUREPROC) IRR_OGL_LOAD_EXTENSION("glNamedFramebufferTexture");\r
pGlTextureParameteri = (PFNGLTEXTUREPARAMETERIPROC) IRR_OGL_LOAD_EXTENSION("glTextureParameteri");\r
pGlCreateTextures = (PFNGLCREATETEXTURESPROC) IRR_OGL_LOAD_EXTENSION("glCreateTextures");\r
pGlBindTextures = (PFNGLBINDTEXTURESPROC) IRR_OGL_LOAD_EXTENSION("glBindTextures");\r
pGlGenerateTextureMipmap = (PFNGLGENERATETEXTUREMIPMAPPROC) IRR_OGL_LOAD_EXTENSION("glGenerateTextureMipmap");\r
//==============================\r
- pGlTextureSubImage2DEXT = (PFNGLTEXTURESUBIMAGE2DEXTPROC)IRR_OGL_LOAD_EXTENSION("glTextureSubImage2DEXT");\r
pGlTextureStorage2DEXT = (PFNGLTEXTURESTORAGE2DEXTPROC)IRR_OGL_LOAD_EXTENSION("glTextureStorage2DEXT");\r
pGlTexStorage2D = (PFNGLTEXSTORAGE2DPROC)IRR_OGL_LOAD_EXTENSION("glTexStorage2D");\r
pGlTextureStorage3DEXT = (PFNGLTEXTURESTORAGE3DEXTPROC)IRR_OGL_LOAD_EXTENSION("glTextureStorage3DEXT");\r
pGlTexStorage3D = (PFNGLTEXSTORAGE3DPROC)IRR_OGL_LOAD_EXTENSION("glTexStorage3D");\r
+ pGlTextureSubImage2DEXT = (PFNGLTEXTURESUBIMAGE2DEXTPROC)IRR_OGL_LOAD_EXTENSION("glTextureSubImage2DEXT");\r
+ pGlGetTextureImageEXT = (PFNGLGETTEXTUREIMAGEEXTPROC)IRR_OGL_LOAD_EXTENSION("glGetTextureImageEXT");\r
pGlNamedFramebufferTextureEXT = (PFNGLNAMEDFRAMEBUFFERTEXTUREEXTPROC)IRR_OGL_LOAD_EXTENSION("glNamedFramebufferTextureEXT");\r
pGlFramebufferTexture = (PFNGLFRAMEBUFFERTEXTUREPROC)IRR_OGL_LOAD_EXTENSION("glFramebufferTexture");\r
pGlActiveTexture = (PFNGLACTIVETEXTUREPROC)IRR_OGL_LOAD_EXTENSION("glActiveTexture");\r
#endif\r
OcclusionQuerySupport=false;\r
\r
- Feature.BlendOperation = (Version >= 140) ||\r
+ Feature.BlendOperation = (Version >= 104) ||\r
FeatureAvailable[IRR_EXT_blend_minmax] ||\r
FeatureAvailable[IRR_EXT_blend_subtract] ||\r
FeatureAvailable[IRR_EXT_blend_logic_op];\r
return FeatureAvailable[IRR_ARB_occlusion_query] && OcclusionQuerySupport;\r
case EVDF_POLYGON_OFFSET:\r
// both features supported with OpenGL 1.1\r
- return Version>=110;\r
+ return Version>=101;\r
case EVDF_BLEND_OPERATIONS:\r
return Feature.BlendOperation;\r
case EVDF_BLEND_SEPARATE:\r
- return (Version>=140) || FeatureAvailable[IRR_EXT_blend_func_separate];\r
+ return (Version>=104) || FeatureAvailable[IRR_EXT_blend_func_separate];\r
case EVDF_TEXTURE_MATRIX:\r
return true;\r
case EVDF_TEXTURE_COMPRESSED_DXT:\r
return FeatureAvailable[IRR_EXT_texture_compression_s3tc];\r
case EVDF_TEXTURE_CUBEMAP:\r
- return (Version >= 130) || FeatureAvailable[IRR_ARB_texture_cube_map] || FeatureAvailable[IRR_EXT_texture_cube_map];\r
+ return (Version >= 103) || FeatureAvailable[IRR_ARB_texture_cube_map] || FeatureAvailable[IRR_EXT_texture_cube_map];\r
case EVDF_TEXTURE_CUBEMAP_SEAMLESS:\r
return FeatureAvailable[IRR_ARB_seamless_cube_map];\r
case EVDF_DEPTH_CLAMP:\r
"GL_ARB_depth_buffer_float",\r
"GL_ARB_depth_clamp",\r
"GL_ARB_depth_texture",\r
+ "GL_ARB_direct_state_access",\r
"GL_ARB_draw_buffers",\r
"GL_ARB_draw_buffers_blend",\r
"GL_ARB_draw_elements_base_vertex",\r
IRR_ARB_depth_buffer_float,\r
IRR_ARB_depth_clamp,\r
IRR_ARB_depth_texture,\r
+ IRR_ARB_direct_state_access,\r
IRR_ARB_draw_buffers,\r
IRR_ARB_draw_buffers_blend,\r
IRR_ARB_draw_elements_base_vertex,\r
void extGlUniform3iv(GLint loc, GLsizei count, const GLint *v);\r
void extGlUniform4iv(GLint loc, GLsizei count, const GLint *v);\r
void extGlUniformMatrix2fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat *v);\r
+ void extGlUniformMatrix2x3fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat *v);\r
+ void extGlUniformMatrix2x4fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat *v);\r
+ void extGlUniformMatrix3x2fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat *v);\r
void extGlUniformMatrix3fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat *v);\r
+ void extGlUniformMatrix3x4fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat *v);\r
+ void extGlUniformMatrix4x2fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat *v);\r
+ void extGlUniformMatrix4x3fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat *v);\r
void extGlUniformMatrix4fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat *v);\r
void extGlGetActiveUniformARB(GLhandleARB program, GLuint index, GLsizei maxlength, GLsizei *length, GLint *size, GLenum *type, GLcharARB *name);\r
void extGlGetActiveUniform(GLuint program, GLuint index, GLsizei maxlength, GLsizei *length, GLint *size, GLenum *type, GLchar *name);\r
void extGlTextureSubImage2D(GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void* pixels);\r
void extGlTextureStorage2D(GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);\r
void extGlTextureStorage3D(GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);\r
+ void extGlGetTextureImage(GLuint texture, GLenum target, GLint level, GLenum format, GLenum type, GLsizei bufSize, void* pixels);\r
void extGlNamedFramebufferTexture(GLuint framebuffer, GLenum attachment, GLuint texture, GLint level);\r
void extGlTextureParameteri(GLuint texture, GLenum pname, GLint param);\r
void extGlCreateTextures(GLenum target, GLsizei n, GLuint* textures);\r
PFNGLUNIFORM3IVARBPROC pGlUniform3ivARB;\r
PFNGLUNIFORM4IVARBPROC pGlUniform4ivARB;\r
PFNGLUNIFORMMATRIX2FVARBPROC pGlUniformMatrix2fvARB;\r
+ PFNGLUNIFORMMATRIX2X3FVPROC pGlUniformMatrix2x3fv;\r
+ PFNGLUNIFORMMATRIX2X4FVPROC pGlUniformMatrix2x4fv;\r
+ PFNGLUNIFORMMATRIX3X2FVPROC pGlUniformMatrix3x2fv;\r
PFNGLUNIFORMMATRIX3FVARBPROC pGlUniformMatrix3fvARB;\r
+ PFNGLUNIFORMMATRIX3X4FVPROC pGlUniformMatrix3x4fv;\r
+ PFNGLUNIFORMMATRIX4X2FVPROC pGlUniformMatrix4x2fv;\r
+ PFNGLUNIFORMMATRIX4X3FVPROC pGlUniformMatrix4x3fv;\r
PFNGLUNIFORMMATRIX4FVARBPROC pGlUniformMatrix4fvARB;\r
PFNGLGETACTIVEUNIFORMARBPROC pGlGetActiveUniformARB;\r
PFNGLGETACTIVEUNIFORMPROC pGlGetActiveUniform;\r
PFNGLTEXTURESTORAGE2DPROC pGlTextureStorage2D;\r
PFNGLTEXTURESTORAGE3DPROC pGlTextureStorage3D;\r
PFNGLTEXTURESUBIMAGE2DPROC pGlTextureSubImage2D;\r
+ PFNGLGETTEXTUREIMAGEPROC pGlGetTextureImage;\r
PFNGLNAMEDFRAMEBUFFERTEXTUREPROC pGlNamedFramebufferTexture;\r
PFNGLTEXTUREPARAMETERIPROC pGlTextureParameteri;\r
PFNGLCREATETEXTURESPROC pGlCreateTextures;\r
PFNGLBINDTEXTURESPROC pGlBindTextures;\r
PFNGLGENERATETEXTUREMIPMAPPROC pGlGenerateTextureMipmap;\r
// DSA with EXT or functions to simulate it\r
- PFNGLTEXTURESUBIMAGE2DEXTPROC pGlTextureSubImage2DEXT;\r
PFNGLTEXTURESTORAGE2DEXTPROC pGlTextureStorage2DEXT;\r
PFNGLTEXSTORAGE2DPROC pGlTexStorage2D;\r
PFNGLTEXTURESTORAGE3DEXTPROC pGlTextureStorage3DEXT;\r
PFNGLTEXSTORAGE3DPROC pGlTexStorage3D;\r
+ PFNGLTEXTURESUBIMAGE2DEXTPROC pGlTextureSubImage2DEXT;\r
+ PFNGLGETTEXTUREIMAGEEXTPROC pGlGetTextureImageEXT;\r
PFNGLNAMEDFRAMEBUFFERTEXTUREEXTPROC pGlNamedFramebufferTextureEXT;\r
PFNGLFRAMEBUFFERTEXTUREPROC pGlFramebufferTexture;\r
PFNGLGENERATETEXTUREMIPMAPEXTPROC pGlGenerateTextureMipmapEXT;\r
#endif\r
}\r
\r
+inline void COpenGLExtensionHandler::extGlUniformMatrix2x3fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat* v)\r
+{\r
+#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
+ if (pGlUniformMatrix2x3fv)\r
+ pGlUniformMatrix2x3fv(loc, count, transpose, v);\r
+ else\r
+ os::Printer::log("glUniformMatrix2x3fv not supported", ELL_ERROR);\r
+#endif\r
+}\r
+\r
+inline void COpenGLExtensionHandler::extGlUniformMatrix2x4fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat* v)\r
+{\r
+#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
+ if (pGlUniformMatrix2x4fv)\r
+ pGlUniformMatrix2x4fv(loc, count, transpose, v);\r
+ else\r
+ os::Printer::log("glUniformMatrix2x4fv not supported", ELL_ERROR);\r
+#endif\r
+}\r
+\r
+inline void COpenGLExtensionHandler::extGlUniformMatrix3x2fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat* v)\r
+{\r
+#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
+ if (pGlUniformMatrix3x2fv)\r
+ pGlUniformMatrix3x2fv(loc, count, transpose, v);\r
+ else\r
+ os::Printer::log("glUniformMatrix3x2fv not supported", ELL_ERROR);\r
+#endif\r
+}\r
+\r
inline void COpenGLExtensionHandler::extGlUniformMatrix3fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat *v)\r
{\r
#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
#endif\r
}\r
\r
+inline void COpenGLExtensionHandler::extGlUniformMatrix3x4fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat* v)\r
+{\r
+#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
+ if (pGlUniformMatrix3x4fv)\r
+ pGlUniformMatrix3x4fv(loc, count, transpose, v);\r
+ else\r
+ os::Printer::log("glUniformMatrix3x4fv not supported", ELL_ERROR);\r
+#endif\r
+}\r
+\r
+inline void COpenGLExtensionHandler::extGlUniformMatrix4x2fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat* v)\r
+{\r
+#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
+ if (pGlUniformMatrix4x2fv)\r
+ pGlUniformMatrix4x2fv(loc, count, transpose, v);\r
+ else\r
+ os::Printer::log("glUniformMatrix4x2fv not supported", ELL_ERROR);\r
+#endif\r
+}\r
+\r
+inline void COpenGLExtensionHandler::extGlUniformMatrix4x3fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat* v)\r
+{\r
+#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
+ if (pGlUniformMatrix4x3fv)\r
+ pGlUniformMatrix4x3fv(loc, count, transpose, v);\r
+ else\r
+ os::Printer::log("glUniformMatrix4x3fv not supported", ELL_ERROR);\r
+#endif\r
+}\r
+\r
inline void COpenGLExtensionHandler::extGlUniformMatrix4fv(GLint loc, GLsizei count, GLboolean transpose, const GLfloat *v)\r
{\r
#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
\r
inline void COpenGLExtensionHandler::extGlTextureSubImage2D(GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void* pixels)\r
{\r
- if (Version>=450)\r
+ if (Version>=405 || FeatureAvailable[IRR_ARB_direct_state_access])\r
{\r
#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
if (pGlTextureSubImage2D)\r
\r
inline void COpenGLExtensionHandler::extGlTextureStorage2D(GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height)\r
{\r
- if (Version>=450)\r
+ if (Version>=405 || FeatureAvailable[IRR_ARB_direct_state_access])\r
{\r
#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
if (pGlTextureStorage2D)\r
\r
inline void COpenGLExtensionHandler::extGlTextureStorage3D(GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth)\r
{\r
- if (Version>=450)\r
+ if (Version>=405 || FeatureAvailable[IRR_ARB_direct_state_access])\r
{\r
#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
if (pGlTextureStorage3D)\r
}\r
}\r
\r
+inline void COpenGLExtensionHandler::extGlGetTextureImage(GLuint texture, GLenum target, GLint level, GLenum format, GLenum type, GLsizei bufSize, void* pixels)\r
+{\r
+ if (Version>=405 || FeatureAvailable[IRR_ARB_direct_state_access])\r
+ {\r
+#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
+ if (pGlGetTextureImage)\r
+ pGlGetTextureImage(texture,level,format,type,bufSize,pixels);\r
+#else\r
+ glGetTextureImage(texture,level,format,type,bufSize,pixels);\r
+#endif // _IRR_OPENGL_USE_EXTPOINTER_\r
+ }\r
+ else if (FeatureAvailable[IRR_EXT_direct_state_access])\r
+ {\r
+#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
+ if (pGlGetTextureImageEXT)\r
+ pGlGetTextureImageEXT(texture,target,level,format,type,pixels);\r
+#else\r
+ glGetTextureImageEXT(texture,target,level,format,type,pixels);\r
+#endif // _IRR_OPENGL_USE_EXTPOINTER_\r
+ }\r
+ else\r
+ {\r
+ GLint bound;\r
+ switch (target)\r
+ {\r
+ case GL_TEXTURE_2D_ARRAY:\r
+ glGetIntegerv(GL_TEXTURE_BINDING_2D_ARRAY, &bound);\r
+ break;\r
+ case GL_TEXTURE_3D:\r
+ glGetIntegerv(GL_TEXTURE_BINDING_3D, &bound);\r
+ break;\r
+ case GL_TEXTURE_CUBE_MAP_ARRAY:\r
+ glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP_ARRAY, &bound);\r
+ break;\r
+ default:\r
+ return;\r
+ }\r
+ glBindTexture(target, texture);\r
+ glGetTexImage(target,level,format,type,pixels);\r
+ glBindTexture(target, bound);\r
+ }\r
+}\r
+\r
inline void COpenGLExtensionHandler::extGlNamedFramebufferTexture(GLuint framebuffer, GLenum attachment, GLuint texture, GLint level)\r
{\r
if (!needsDSAFramebufferHack)\r
{\r
- if (Version>=450)\r
+ if (Version>=405 || FeatureAvailable[IRR_ARB_direct_state_access])\r
{\r
pGlNamedFramebufferTexture(framebuffer, attachment, texture, level);\r
return;\r
\r
inline void COpenGLExtensionHandler::extGlCreateTextures(GLenum target, GLsizei n, GLuint* textures)\r
{\r
- if (Version>=450)\r
+ if (Version>=405)\r
{\r
#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
if (pGlCreateTextures)\r
{\r
if (!needsDSAFramebufferHack)\r
{\r
- if (Version>=450)\r
+ if (Version>=405)\r
{\r
pGlCreateFramebuffers(n, framebuffers);\r
return;\r
GL_TEXTURE_1D_ARRAY,GL_TEXTURE_2D_ARRAY,GL_TEXTURE_BUFFER, // GL 3.x\r
GL_TEXTURE_CUBE_MAP_ARRAY,GL_TEXTURE_2D_MULTISAMPLE,GL_TEXTURE_2D_MULTISAMPLE_ARRAY}; // GL 4.x\r
\r
- if (Version>=440||FeatureAvailable[IRR_ARB_multi_bind])\r
+ if (Version>=404||FeatureAvailable[IRR_ARB_multi_bind])\r
{\r
#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
if (pGlBindTextures)\r
\r
inline void COpenGLExtensionHandler::extGlGenerateTextureMipmap(GLuint texture, GLenum target)\r
{\r
- if (Version>=450)\r
+ if (Version>=405 || FeatureAvailable[IRR_ARB_direct_state_access])\r
{\r
#ifdef _IRR_OPENGL_USE_EXTPOINTER_\r
if (pGlGenerateTextureMipmap)\r
case GL_FLOAT_MAT2:\r
Driver->extGlUniformMatrix2fv(UniformInfo[index].location, count/4, false, floats);\r
break;\r
+ case GL_FLOAT_MAT2x3:\r
+ Driver->extGlUniformMatrix2x3fv(UniformInfo[index].location, count/6, false, floats);\r
+ break;\r
+ case GL_FLOAT_MAT2x4:\r
+ Driver->extGlUniformMatrix2x4fv(UniformInfo[index].location, count/8, false, floats);\r
+ break;\r
+ case GL_FLOAT_MAT3x2:\r
+ Driver->extGlUniformMatrix3x2fv(UniformInfo[index].location, count/6, false, floats);\r
+ break;\r
case GL_FLOAT_MAT3:\r
Driver->extGlUniformMatrix3fv(UniformInfo[index].location, count/9, false, floats);\r
break;\r
+ case GL_FLOAT_MAT3x4:\r
+ Driver->extGlUniformMatrix3x4fv(UniformInfo[index].location, count/12, false, floats);\r
+ break;\r
+ case GL_FLOAT_MAT4x2:\r
+ Driver->extGlUniformMatrix4x2fv(UniformInfo[index].location, count/8, false, floats);\r
+ break;\r
+ case GL_FLOAT_MAT4x3:\r
+ Driver->extGlUniformMatrix4x3fv(UniformInfo[index].location, count/12, false, floats);\r
+ break;\r
case GL_FLOAT_MAT4:\r
Driver->extGlUniformMatrix4fv(UniformInfo[index].location, count/16, false, floats);\r
break;\r
{\r
if(floats)\r
{\r
- const GLint id = static_cast<const GLint>(*floats);\r
+ const GLint id = static_cast<GLint>(*floats);\r
Driver->extGlUniform1iv(UniformInfo[index].location, 1, &id);\r
}\r
else\r
if (continueReading)\r
{\r
// create a mesh buffer\r
- CDynamicMeshBuffer *mb = new CDynamicMeshBuffer(video::EVT_STANDARD, vertCount > 65565 ? video::EIT_32BIT : video::EIT_16BIT);\r
+ CDynamicMeshBuffer *mb = new CDynamicMeshBuffer(video::EVT_STANDARD, vertCount > 65536 ? video::EIT_32BIT : video::EIT_16BIT);\r
mb->getVertexBuffer().reallocate(vertCount);\r
mb->getIndexBuffer().reallocate(vertCount);\r
mb->setHardwareMappingHint(EHM_STATIC);\r
aes_32t nr = (kp[45] ^ kp[52] ^ kp[53] ? kp[52] : 14);\r
\r
#ifdef AES_ERR_CHK\r
- if( (nr != 10 || !(kp[0] | kp[3] | kp[4])) \r
+ if( (nr != 10 || !(kp[0] | kp[3] | kp[4]))\r
&& (nr != 12 || !(kp[0] | kp[5] | kp[6]))\r
&& (nr != 14 || !(kp[0] | kp[7] | kp[8])) )\r
return aes_error;\r
round(fwd_rnd, b1, b0, kp + 1 * N_COLS);\r
round(fwd_rnd, b0, b1, kp + 2 * N_COLS);\r
kp += 2 * N_COLS;\r
+ /* Falls through. */\r
case 12:\r
round(fwd_rnd, b1, b0, kp + 1 * N_COLS);\r
round(fwd_rnd, b0, b1, kp + 2 * N_COLS);\r
kp += 2 * N_COLS;\r
+ /* Falls through. */\r
case 10:\r
round(fwd_rnd, b1, b0, kp + 1 * N_COLS);\r
round(fwd_rnd, b0, b1, kp + 2 * N_COLS);\r
const aes_32t *kp = cx->ks + nr * N_COLS;\r
\r
#ifdef AES_ERR_CHK\r
- if( (nr != 10 || !(cx->ks[0] | cx->ks[3] | cx->ks[4])) \r
+ if( (nr != 10 || !(cx->ks[0] | cx->ks[3] | cx->ks[4]))\r
&& (nr != 12 || !(cx->ks[0] | cx->ks[5] | cx->ks[6]))\r
&& (nr != 14 || !(cx->ks[0] | cx->ks[7] | cx->ks[8])) )\r
return aes_error;\r
round(inv_rnd, b1, b0, kp - 1 * N_COLS);\r
round(inv_rnd, b0, b1, kp - 2 * N_COLS);\r
kp -= 2 * N_COLS;\r
+ /* Falls through. */\r
case 12:\r
round(inv_rnd, b1, b0, kp - 1 * N_COLS);\r
round(inv_rnd, b0, b1, kp - 2 * N_COLS);\r
kp -= 2 * N_COLS;\r
+ /* Falls through. */\r
case 10:\r
round(inv_rnd, b1, b0, kp - 1 * N_COLS);\r
round(inv_rnd, b0, b1, kp - 2 * N_COLS);\r
\r
LICENSE TERMS\r
\r
- The free distribution and use of this software in both source and binary \r
+ The free distribution and use of this software in both source and binary\r
form is allowed (with or without changes) provided that:\r
\r
- 1. distributions of this source code include the above copyright \r
+ 1. distributions of this source code include the above copyright\r
notice, this list of conditions and the following disclaimer;\r
\r
2. distributions in binary form include the above copyright\r
notice, this list of conditions and the following disclaimer\r
in the documentation and/or other associated materials;\r
\r
- 3. the copyright holder's name is not used to endorse products \r
- built using this software without specific written permission. \r
+ 3. the copyright holder's name is not used to endorse products\r
+ built using this software without specific written permission.\r
\r
ALTERNATIVELY, provided that this notice is retained in full, this product\r
may be distributed under the terms of the GNU General Public License (GPL),\r
in which case the provisions of the GPL apply INSTEAD OF those given above.\r
- \r
+\r
DISCLAIMER\r
\r
This software is provided 'as is' with no explicit or implied warranties\r
- in respect of its properties, including, but not limited to, correctness \r
+ in respect of its properties, including, but not limited to, correctness\r
and/or fitness for purpose.\r
---------------------------------------------------------------------------\r
Issue Date: 26/08/2003\r
\r
This is a byte oriented version of SHA2 that operates on arrays of bytes\r
stored in memory. This code implements sha256, sha384 and sha512 but the\r
- latter two functions rely on efficient 64-bit integer operations that \r
+ latter two functions rely on efficient 64-bit integer operations that\r
may not be very efficient on 32-bit machines\r
\r
- The sha256 functions use a type 'sha256_ctx' to hold details of the \r
+ The sha256 functions use a type 'sha256_ctx' to hold details of the\r
current hash state and uses the following three calls:\r
\r
void sha256_begin(sha256_ctx ctx[1])\r
- void sha256_hash(const unsigned char data[], \r
+ void sha256_hash(const unsigned char data[],\r
unsigned long len, sha256_ctx ctx[1])\r
void sha256_end(unsigned char hval[], sha256_ctx ctx[1])\r
\r
- The first subroutine initialises a hash computation by setting up the \r
- context in the sha256_ctx context. The second subroutine hashes 8-bit \r
- bytes from array data[] into the hash state withinh sha256_ctx context, \r
- the number of bytes to be hashed being given by the the unsigned long \r
- integer len. The third subroutine completes the hash calculation and \r
+ The first subroutine initialises a hash computation by setting up the\r
+ context in the sha256_ctx context. The second subroutine hashes 8-bit\r
+ bytes from array data[] into the hash state withinh sha256_ctx context,\r
+ the number of bytes to be hashed being given by the the unsigned long\r
+ integer len. The third subroutine completes the hash calculation and\r
places the resulting digest value in the array of 8-bit bytes hval[].\r
\r
The sha384 and sha512 functions are similar and use the interfaces:\r
\r
void sha384_begin(sha384_ctx ctx[1]);\r
- void sha384_hash(const unsigned char data[], \r
+ void sha384_hash(const unsigned char data[],\r
unsigned long len, sha384_ctx ctx[1]);\r
void sha384_end(unsigned char hval[], sha384_ctx ctx[1]);\r
\r
void sha512_begin(sha512_ctx ctx[1]);\r
- void sha512_hash(const unsigned char data[], \r
+ void sha512_hash(const unsigned char data[],\r
unsigned long len, sha512_ctx ctx[1]);\r
void sha512_end(unsigned char hval[], sha512_ctx ctx[1]);\r
\r
functions using a call with a hash length parameter as follows:\r
\r
int sha2_begin(unsigned long len, sha2_ctx ctx[1]);\r
- void sha2_hash(const unsigned char data[], \r
+ void sha2_hash(const unsigned char data[],\r
unsigned long len, sha2_ctx ctx[1]);\r
void sha2_end(unsigned char hval[], sha2_ctx ctx[1]);\r
\r
- My thanks to Erik Andersen <andersen@codepoet.org> for testing this code \r
+ My thanks to Erik Andersen <andersen@codepoet.org> for testing this code\r
on big-endian systems and for his assistance with corrections\r
*/\r
\r
/* BYTE ORDER IN 32-BIT WORDS\r
\r
To obtain the highest speed on processors with 32-bit words, this code\r
- needs to determine the byte order of the target machine. The following \r
- block of code is an attempt to capture the most obvious ways in which \r
- various environemnts define byte order. It may well fail, in which case \r
- the definitions will need to be set by editing at the points marked \r
- **** EDIT HERE IF NECESSARY **** below. My thanks to Peter Gutmann for \r
+ needs to determine the byte order of the target machine. The following\r
+ block of code is an attempt to capture the most obvious ways in which\r
+ various environemnts define byte order. It may well fail, in which case\r
+ the definitions will need to be set by editing at the points marked\r
+ **** EDIT HERE IF NECESSARY **** below. My thanks to Peter Gutmann for\r
some of these defines (from cryptlib).\r
*/\r
\r
#if defined(SWAP_BYTES)\r
#define bsw_32(p,n) { int _i = (n); while(_i--) p[_i] = bswap_32(p[_i]); }\r
#else\r
-#define bsw_32(p,n) \r
+#define bsw_32(p,n)\r
#endif\r
\r
/* SHA256 mixing function definitions */\r
\r
#endif\r
\r
-#define s256_0(x) (rotr32((x), 2) ^ rotr32((x), 13) ^ rotr32((x), 22)) \r
-#define s256_1(x) (rotr32((x), 6) ^ rotr32((x), 11) ^ rotr32((x), 25)) \r
-#define g256_0(x) (rotr32((x), 7) ^ rotr32((x), 18) ^ ((x) >> 3)) \r
-#define g256_1(x) (rotr32((x), 17) ^ rotr32((x), 19) ^ ((x) >> 10)) \r
+#define s256_0(x) (rotr32((x), 2) ^ rotr32((x), 13) ^ rotr32((x), 22))\r
+#define s256_1(x) (rotr32((x), 6) ^ rotr32((x), 11) ^ rotr32((x), 25))\r
+#define g256_0(x) (rotr32((x), 7) ^ rotr32((x), 18) ^ ((x) >> 3))\r
+#define g256_1(x) (rotr32((x), 17) ^ rotr32((x), 19) ^ ((x) >> 10))\r
\r
/* rotated SHA256 round definition. Rather than swapping variables as in */\r
/* FIPS-180, different variables are 'rotated' on each round, returning */\r
/* SHA256 mixing data */\r
\r
const sha2_32t k256[64] =\r
-{ n_u32(428a2f98), n_u32(71374491), n_u32(b5c0fbcf), n_u32(e9b5dba5), \r
- n_u32(3956c25b), n_u32(59f111f1), n_u32(923f82a4), n_u32(ab1c5ed5), \r
- n_u32(d807aa98), n_u32(12835b01), n_u32(243185be), n_u32(550c7dc3), \r
- n_u32(72be5d74), n_u32(80deb1fe), n_u32(9bdc06a7), n_u32(c19bf174), \r
- n_u32(e49b69c1), n_u32(efbe4786), n_u32(0fc19dc6), n_u32(240ca1cc), \r
- n_u32(2de92c6f), n_u32(4a7484aa), n_u32(5cb0a9dc), n_u32(76f988da), \r
- n_u32(983e5152), n_u32(a831c66d), n_u32(b00327c8), n_u32(bf597fc7), \r
- n_u32(c6e00bf3), n_u32(d5a79147), n_u32(06ca6351), n_u32(14292967), \r
- n_u32(27b70a85), n_u32(2e1b2138), n_u32(4d2c6dfc), n_u32(53380d13), \r
+{ n_u32(428a2f98), n_u32(71374491), n_u32(b5c0fbcf), n_u32(e9b5dba5),\r
+ n_u32(3956c25b), n_u32(59f111f1), n_u32(923f82a4), n_u32(ab1c5ed5),\r
+ n_u32(d807aa98), n_u32(12835b01), n_u32(243185be), n_u32(550c7dc3),\r
+ n_u32(72be5d74), n_u32(80deb1fe), n_u32(9bdc06a7), n_u32(c19bf174),\r
+ n_u32(e49b69c1), n_u32(efbe4786), n_u32(0fc19dc6), n_u32(240ca1cc),\r
+ n_u32(2de92c6f), n_u32(4a7484aa), n_u32(5cb0a9dc), n_u32(76f988da),\r
+ n_u32(983e5152), n_u32(a831c66d), n_u32(b00327c8), n_u32(bf597fc7),\r
+ n_u32(c6e00bf3), n_u32(d5a79147), n_u32(06ca6351), n_u32(14292967),\r
+ n_u32(27b70a85), n_u32(2e1b2138), n_u32(4d2c6dfc), n_u32(53380d13),\r
n_u32(650a7354), n_u32(766a0abb), n_u32(81c2c92e), n_u32(92722c85),\r
- n_u32(a2bfe8a1), n_u32(a81a664b), n_u32(c24b8b70), n_u32(c76c51a3), \r
- n_u32(d192e819), n_u32(d6990624), n_u32(f40e3585), n_u32(106aa070), \r
- n_u32(19a4c116), n_u32(1e376c08), n_u32(2748774c), n_u32(34b0bcb5), \r
- n_u32(391c0cb3), n_u32(4ed8aa4a), n_u32(5b9cca4f), n_u32(682e6ff3), \r
- n_u32(748f82ee), n_u32(78a5636f), n_u32(84c87814), n_u32(8cc70208), \r
+ n_u32(a2bfe8a1), n_u32(a81a664b), n_u32(c24b8b70), n_u32(c76c51a3),\r
+ n_u32(d192e819), n_u32(d6990624), n_u32(f40e3585), n_u32(106aa070),\r
+ n_u32(19a4c116), n_u32(1e376c08), n_u32(2748774c), n_u32(34b0bcb5),\r
+ n_u32(391c0cb3), n_u32(4ed8aa4a), n_u32(5b9cca4f), n_u32(682e6ff3),\r
+ n_u32(748f82ee), n_u32(78a5636f), n_u32(84c87814), n_u32(8cc70208),\r
n_u32(90befffa), n_u32(a4506ceb), n_u32(bef9a3f7), n_u32(c67178f2),\r
};\r
\r
/* and call the hash_compile function as required. */\r
\r
sha2_void sha256_hash(const unsigned char data[], unsigned long len, sha256_ctx ctx[1])\r
-{ sha2_32t pos = (sha2_32t)(ctx->count[0] & SHA256_MASK), \r
+{ sha2_32t pos = (sha2_32t)(ctx->count[0] & SHA256_MASK),\r
space = SHA256_BLOCK_SIZE - pos;\r
const unsigned char *sp = data;\r
\r
while(len >= space) /* tranfer whole blocks while possible */\r
{\r
memcpy(((unsigned char*)ctx->wbuf) + pos, sp, space);\r
- sp += space; len -= space; space = SHA256_BLOCK_SIZE; pos = 0; \r
+ sp += space; len -= space; space = SHA256_BLOCK_SIZE; pos = 0;\r
bsw_32(ctx->wbuf, SHA256_BLOCK_SIZE >> 2)\r
sha256_compile(ctx);\r
}\r
/* bytes in the buffer are now in an order in which references */\r
/* to 32-bit words will put bytes with lower addresses into the */\r
/* top of 32 bit words on BOTH big and little endian machines */\r
- \r
+\r
/* we now need to mask valid bytes and add the padding which is */\r
/* a single 1 bit and as many zero bits as necessary. */\r
ctx->wbuf[i >> 2] = (ctx->wbuf[i >> 2] & m1[i & 3]) | b1[i & 3];\r
else /* compute a word index for the empty buffer positions */\r
i = (i >> 2) + 1;\r
\r
- while(i < 14) /* and zero pad all but last two positions */ \r
+ while(i < 14) /* and zero pad all but last two positions */\r
ctx->wbuf[i++] = 0;\r
- \r
+\r
/* the following 32-bit length fields are assembled in the */\r
/* wrong byte order on little endian machines but this is */\r
/* corrected later since they are only ever used as 32-bit */\r
hval[i] = (unsigned char)(ctx->hash[i >> 2] >> (8 * (~i & 3)));\r
}\r
\r
-sha2_void sha256(unsigned char hval[], const unsigned char data[], unsigned long len) \r
+sha2_void sha256(unsigned char hval[], const unsigned char data[], unsigned long len)\r
{ sha256_ctx cx[1];\r
- \r
+\r
sha256_begin(cx); sha256_hash(data, len, cx); sha256_end(hval, cx);\r
}\r
\r
#if defined(SWAP_BYTES)\r
#define bsw_64(p,n) { int _i = (n); while(_i--) p[_i] = bswap_64(p[_i]); }\r
#else\r
-#define bsw_64(p,n) \r
+#define bsw_64(p,n)\r
#endif\r
\r
/* SHA512 mixing function definitions */\r
\r
-#define s512_0(x) (rotr64((x), 28) ^ rotr64((x), 34) ^ rotr64((x), 39)) \r
-#define s512_1(x) (rotr64((x), 14) ^ rotr64((x), 18) ^ rotr64((x), 41)) \r
-#define g512_0(x) (rotr64((x), 1) ^ rotr64((x), 8) ^ ((x) >> 7)) \r
-#define g512_1(x) (rotr64((x), 19) ^ rotr64((x), 61) ^ ((x) >> 6)) \r
+#define s512_0(x) (rotr64((x), 28) ^ rotr64((x), 34) ^ rotr64((x), 39))\r
+#define s512_1(x) (rotr64((x), 14) ^ rotr64((x), 18) ^ rotr64((x), 41))\r
+#define g512_0(x) (rotr64((x), 1) ^ rotr64((x), 8) ^ ((x) >> 7))\r
+#define g512_1(x) (rotr64((x), 19) ^ rotr64((x), 61) ^ ((x) >> 6))\r
\r
/* rotated SHA512 round definition. Rather than swapping variables as in */\r
/* FIPS-180, different variables are 'rotated' on each round, returning */\r
\r
/* SHA384/SHA512 mixing data */\r
\r
-const sha2_64t k512[80] = \r
+const sha2_64t k512[80] =\r
{\r
- n_u64(428a2f98d728ae22), n_u64(7137449123ef65cd), \r
+ n_u64(428a2f98d728ae22), n_u64(7137449123ef65cd),\r
n_u64(b5c0fbcfec4d3b2f), n_u64(e9b5dba58189dbbc),\r
n_u64(3956c25bf348b538), n_u64(59f111f1b605d019),\r
n_u64(923f82a4af194f9b), n_u64(ab1c5ed5da6d8118),\r
/* and little endian systems */\r
\r
sha2_void sha512_hash(const unsigned char data[], unsigned long len, sha512_ctx ctx[1])\r
-{ sha2_32t pos = (sha2_32t)(ctx->count[0] & SHA512_MASK), \r
+{ sha2_32t pos = (sha2_32t)(ctx->count[0] & SHA512_MASK),\r
space = SHA512_BLOCK_SIZE - pos;\r
const unsigned char *sp = data;\r
\r
while(len >= space) /* tranfer whole blocks while possible */\r
{\r
memcpy(((unsigned char*)ctx->wbuf) + pos, sp, space);\r
- sp += space; len -= space; space = SHA512_BLOCK_SIZE; pos = 0; \r
- bsw_64(ctx->wbuf, SHA512_BLOCK_SIZE >> 3); \r
+ sp += space; len -= space; space = SHA512_BLOCK_SIZE; pos = 0;\r
+ bsw_64(ctx->wbuf, SHA512_BLOCK_SIZE >> 3);\r
sha512_compile(ctx);\r
}\r
\r
\r
static sha2_64t m2[8] =\r
{\r
- n_u64(0000000000000000), n_u64(ff00000000000000), \r
+ n_u64(0000000000000000), n_u64(ff00000000000000),\r
n_u64(ffff000000000000), n_u64(ffffff0000000000),\r
n_u64(ffffffff00000000), n_u64(ffffffffff000000),\r
n_u64(ffffffffffff0000), n_u64(ffffffffffffff00)\r
\r
static sha2_64t b2[8] =\r
{\r
- n_u64(8000000000000000), n_u64(0080000000000000), \r
+ n_u64(8000000000000000), n_u64(0080000000000000),\r
n_u64(0000800000000000), n_u64(0000008000000000),\r
- n_u64(0000000080000000), n_u64(0000000000800000), \r
+ n_u64(0000000080000000), n_u64(0000000000800000),\r
n_u64(0000000000008000), n_u64(0000000000000080)\r
};\r
\r
/* bytes in the buffer are now in an order in which references */\r
/* to 64-bit words will put bytes with lower addresses into the */\r
/* top of 64 bit words on BOTH big and little endian machines */\r
- \r
+\r
/* we now need to mask valid bytes and add the padding which is */\r
/* a single 1 bit and as many zero bits as necessary. */\r
ctx->wbuf[i >> 3] = (ctx->wbuf[i >> 3] & m2[i & 7]) | b2[i & 7];\r
\r
while(i < 14)\r
ctx->wbuf[i++] = 0;\r
- \r
+\r
/* the following 64-bit length fields are assembled in the */\r
/* wrong byte order on little endian machines but this is */\r
/* corrected later since they are only ever used as 64-bit */\r
\r
/* SHA384 initialisation data */\r
\r
-const sha2_64t i384[80] = \r
+const sha2_64t i384[80] =\r
{\r
n_u64(cbbb9d5dc1059ed8), n_u64(629a292a367cd507),\r
n_u64(9159015a3070dd17), n_u64(152fecd8f70e5939),\r
\r
sha2_void sha384(unsigned char hval[], const unsigned char data[], unsigned long len)\r
{ sha384_ctx cx[1];\r
- \r
+\r
sha384_begin(cx); sha384_hash(data, len, cx); sha384_end(hval, cx);\r
}\r
\r
\r
/* SHA512 initialisation data */\r
\r
-const sha2_64t i512[80] = \r
+const sha2_64t i512[80] =\r
{\r
n_u64(6a09e667f3bcc908), n_u64(bb67ae8584caa73b),\r
n_u64(3c6ef372fe94f82b), n_u64(a54ff53a5f1d36f1),\r
sha_end(hval, ctx, SHA512_DIGEST_SIZE);\r
}\r
\r
-sha2_void sha512(unsigned char hval[], const unsigned char data[], unsigned long len) \r
+sha2_void sha512(unsigned char hval[], const unsigned char data[], unsigned long len)\r
{ sha512_ctx cx[1];\r
- \r
+\r
sha512_begin(cx); sha512_hash(data, len, cx); sha512_end(hval, cx);\r
}\r
\r
switch(len)\r
{\r
case 256: l = len >> 3;\r
+ /* Falls through. */\r
case 32: CTX_256(ctx)->count[0] = CTX_256(ctx)->count[1] = 0;\r
memcpy(CTX_256(ctx)->hash, i256, 32); break;\r
case 384: l = len >> 3;\r
+ /* Falls through. */\r
case 48: CTX_384(ctx)->count[0] = CTX_384(ctx)->count[1] = 0;\r
memcpy(CTX_384(ctx)->hash, i384, 64); break;\r
case 512: l = len >> 3;\r
+ /* Falls through. */\r
case 64: CTX_512(ctx)->count[0] = CTX_512(ctx)->count[1] = 0;\r
memcpy(CTX_512(ctx)->hash, i512, 64); break;\r
default: return SHA2_BAD;\r
}\r
- \r
+\r
ctx->sha2_len = l; return SHA2_GOOD;\r
}\r
\r