GPUImage定制的OpenGL ES着色器产生黑色图像
基于this另一OpenGL ES的图像过滤器工作:GPUImage定制的OpenGL ES着色器产生黑色图像
uniform sampler2D texture;
uniform float amount;
uniform vec2 texSize;
varying vec2 texCoord;
void main() {
vec4 color = texture2D(texture, texCoord);
vec4 orig = color;
/* High pass filter */
vec4 highpass = color * 5.0;
float dx = 1.0/texSize.x;
float dy = 1.0/texSize.y;
highpass += texture2D(texture, texCoord + vec2(-dx, -dy)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(dx, -dy)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(dx, dy)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(-dx, dy)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(-dx * 2.0, -dy * 2.0)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(dx * 2.0, -dy * 2.0)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(dx * 2.0, dy * 2.0)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(-dx * 2.0, dy * 2.0)) * -0.625;
highpass.a = 1.0;
/* Overlay blend */
vec3 overlay = vec3(1.0);
if (highpass.r <= 0.5) {
overlay.r = 2.0 * color.r * highpass.r;
} else {
overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r);
}
if (highpass.g <= 0.5) {
overlay.g = 2.0 * color.g * highpass.g;
} else {
overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g);
}
if (highpass.b <= 0.5) {
overlay.b = 2.0 * color.b * highpass.b;
} else {
overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b);
}
color.rgb = (overlay * 0.8) + (orig.rgb * 0.2);
/* Desaturated hard light */
vec3 desaturated = vec3(orig.r + orig.g + orig.b/3.0);
if (desaturated.r <= 0.5) {
color.rgb = 2.0 * color.rgb * desaturated;
} else {
color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated);
}
color = (orig * 0.6) + (color * 0.4);
/* Add back some color */
float average = (color.r + color.g + color.b)/3.0;
color.rgb += (average - color.rgb) * (1.0 - 1.0/(1.001 - 0.45));
gl_FragColor = (color * amount) + (orig * (1.0 - amount));
}
按我question yesterday,我知道分配精度每个浮点和VEC。这一次它编译得很好,但是当我在GPUImage中应用过滤器时(例如,通过将clarity
的值设置为0.8
),图像变黑。我的直觉告诉我这与纹理大小有关,但不知道GPUImage如何处理,我有点卡住了。
这是我在Objective-C实现:我想这样做的
.H
#import <GPUImage/GPUImage.h>
@interface GPUImageClarityFilter : GPUImageFilter
{
GLint clarityUniform;
}
// Gives the image a gritty, surreal contrasty effect
// Value 0 to 1
@property (readwrite, nonatomic) GLfloat clarity;
@end
.M
#import "GPUImageClarityFilter.h"
#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
NSString *const kGPUImageClarityFragmentShaderString = SHADER_STRING
(
uniform sampler2D inputImageTexture;
uniform lowp float clarity;
uniform highp vec2 textureSize;
varying highp vec2 textureCoordinate;
void main() {
highp vec4 color = texture2D(inputImageTexture, textureCoordinate);
highp vec4 orig = color;
/* High pass filter */
highp vec4 highpass = color * 5.0;
highp float dx = 1.0/textureSize.x;
highp float dy = 1.0/textureSize.y;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, -dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, -dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, -dy * 2.0)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, -dy * 2.0)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, dy * 2.0)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, dy * 2.0)) * -0.625;
highpass.a = 1.0;
/* Overlay blend */
highp vec3 overlay = vec3(1.0);
if (highpass.r <= 0.5) {
overlay.r = 2.0 * color.r * highpass.r;
} else {
overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r);
}
if (highpass.g <= 0.5) {
overlay.g = 2.0 * color.g * highpass.g;
} else {
overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g);
}
if (highpass.b <= 0.5) {
overlay.b = 2.0 * color.b * highpass.b;
} else {
overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b);
}
color.rgb = (overlay * 0.8) + (orig.rgb * 0.2);
/* Desaturated hard light */
highp vec3 desaturated = vec3(orig.r + orig.g + orig.b/3.0);
if (desaturated.r <= 0.5) {
color.rgb = 2.0 * color.rgb * desaturated;
} else {
color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated);
}
color = (orig * 0.6) + (color * 0.4);
/* Add back some color */
highp float average = (color.r + color.g + color.b)/3.0;
color.rgb += (average - color.rgb) * (1.0 - 1.0/(1.001 - 0.45));
gl_FragColor = (color * clarity) + (orig * (1.0 - clarity));
}
);
#else
NSString *const kGPUImageClarityFragmentShaderString = SHADER_STRING
(
uniform sampler2D inputImageTexture;
uniform float clarity;
uniform vec2 textureSize;
varying vec2 textureCoordinate;
void main() {
vec4 color = texture2D(inputImageTexture, textureCoordinate);
vec4 orig = color;
/* High pass filter */
vec4 highpass = color * 5.0;
float dx = 1.0/textureSize.x;
float dy = 1.0/textureSize.y;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, -dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, -dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, -dy * 2.0)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, -dy * 2.0)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, dy * 2.0)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, dy * 2.0)) * -0.625;
highpass.a = 1.0;
/* Overlay blend */
vec3 overlay = vec3(1.0);
if (highpass.r <= 0.5) {
overlay.r = 2.0 * color.r * highpass.r;
} else {
overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r);
}
if (highpass.g <= 0.5) {
overlay.g = 2.0 * color.g * highpass.g;
} else {
overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g);
}
if (highpass.b <= 0.5) {
overlay.b = 2.0 * color.b * highpass.b;
} else {
overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b);
}
color.rgb = (overlay * 0.8) + (orig.rgb * 0.2);
/* Desaturated hard light */
vec3 desaturated = vec3(orig.r + orig.g + orig.b/3.0);
if (desaturated.r <= 0.5) {
color.rgb = 2.0 * color.rgb * desaturated;
} else {
color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated);
}
color = (orig * 0.6) + (color * 0.4);
/* Add back some color */
float average = (color.r + color.g + color.b)/3.0;
color.rgb += (average - color.rgb) * (1.0 - 1.0/(1.001 - 0.45));
gl_FragColor = (color * clarity) + (orig * (1.0 - clarity));
}
);
#endif
@implementation GPUImageClarityFilter
@synthesize clarity = _clarity;
#pragma mark -
#pragma mark Initialization and teardown
- (id)init;
{
if (!(self = [super initWithFragmentShaderFromString:kGPUImageClarityFragmentShaderString]))
{
return nil;
}
clarityUniform = [filterProgram uniformIndex:@"clarity"];
self.clarity = 0.0;
return self;
}
#pragma mark -
#pragma mark Accessors
- (void)setClarity:(GLfloat)clarity;
{
_clarity = clarity;
[self setFloat:_clarity forUniform:clarityUniform program:filterProgram];
}
@end
的另一件事是运用GPUImage的内置在低通和高通滤波器中,但我感觉这会导致相当笨重的s olution。
这可能是由于textureSize
不是作为GPUImageFilter的一部分提供给您的标准制服。 inputImageTexture
和textureCoordinate
是由其中一个过滤器提供的标准制服,看起来您正在提供clarity
制服。
因为没有设置textureSize
,所以它将默认为0.0。然后您的1.0/textureSize.x
计算将被零除,这往往会导致iOS片段着色器中的黑帧。
您可以计算并提供该制服,或者改为使用GPUImage3x3TextureSamplingFilter替代您的自定义过滤器。该过滤器基类将1.0/textureSize.x
的结果作为texelWidth
统一(以及对于垂直分量的匹配texelHeight
)。你不必计算这个。实际上,它也会计算周围8个像素的纹理坐标,因此您可以切除上述四个计算并将其转换为非依赖性纹理读取。您只需计算基于2 * texelWidth
和2 * texelHeight
的四个纹理读取即可完成剩余的四次读取。
实际上,您可以将此操作分解为多个通道,以节省计算量,进行小方框模糊处理,然后进行叠加混合处理,然后执行此过滤器的最后一个阶段。这可以进一步加快这一进程。
所以,你可以重写
(void)setupFilterForSize:(CGSize)filterFrameSize
方法设置宽度&高度因子样GPUImageSharpenFilter
。
谢谢布拉德我会给它一个镜头。 – brandonscript
花了一些时间摆弄它,但遗憾的是我不太熟悉编写着色器来弄清楚如何集成3x3TextureSamplingFilter。相反,我只是玩了一些textureSize的硬编码值,并在'320.0'上着陆,给了我一个漂亮的效果。 – brandonscript
该死的,当然只有当输入图像具有特定尺寸时才有效。 – brandonscript